From 13102dee16e18947eec8a9493200092abefaca22 Mon Sep 17 00:00:00 2001 From: Mason Daugherty Date: Wed, 8 Oct 2025 23:30:30 -0400 Subject: [PATCH 1/8] chore(core): delete `memory.py` --- libs/core/langchain_core/memory.py | 120 ----------------------------- 1 file changed, 120 deletions(-) delete mode 100644 libs/core/langchain_core/memory.py diff --git a/libs/core/langchain_core/memory.py b/libs/core/langchain_core/memory.py deleted file mode 100644 index bd6ac278808fc..0000000000000 --- a/libs/core/langchain_core/memory.py +++ /dev/null @@ -1,120 +0,0 @@ -"""**Memory** maintains Chain state, incorporating context from past runs. - -This module contains memory abstractions from LangChain v0.0.x. - -These abstractions are now deprecated and will be removed in LangChain v1.0.0. -""" - -from __future__ import annotations - -from abc import ABC, abstractmethod -from typing import Any - -from pydantic import ConfigDict - -from langchain_core._api import deprecated -from langchain_core.load.serializable import Serializable -from langchain_core.runnables import run_in_executor - - -@deprecated( - since="0.3.3", - removal="1.0.0", - message=( - "Please see the migration guide at: " - "https://python.langchain.com/docs/versions/migrating_memory/" - ), -) -class BaseMemory(Serializable, ABC): - """Abstract base class for memory in Chains. - - Memory refers to state in Chains. Memory can be used to store information about - past executions of a Chain and inject that information into the inputs of - future executions of the Chain. For example, for conversational Chains Memory - can be used to store conversations and automatically add them to future model - prompts so that the model has the necessary context to respond coherently to - the latest input. - - Example: - .. code-block:: python - - class SimpleMemory(BaseMemory): - memories: dict[str, Any] = dict() - - @property - def memory_variables(self) -> list[str]: - return list(self.memories.keys()) - - def load_memory_variables( - self, inputs: dict[str, Any] - ) -> dict[str, str]: - return self.memories - - def save_context( - self, inputs: dict[str, Any], outputs: dict[str, str] - ) -> None: - pass - - def clear(self) -> None: - pass - - """ - - model_config = ConfigDict( - arbitrary_types_allowed=True, - ) - - @property - @abstractmethod - def memory_variables(self) -> list[str]: - """The string keys this memory class will add to chain inputs.""" - - @abstractmethod - def load_memory_variables(self, inputs: dict[str, Any]) -> dict[str, Any]: - """Return key-value pairs given the text input to the chain. - - Args: - inputs: The inputs to the chain. - - Returns: - A dictionary of key-value pairs. - """ - - async def aload_memory_variables(self, inputs: dict[str, Any]) -> dict[str, Any]: - """Async return key-value pairs given the text input to the chain. - - Args: - inputs: The inputs to the chain. - - Returns: - A dictionary of key-value pairs. - """ - return await run_in_executor(None, self.load_memory_variables, inputs) - - @abstractmethod - def save_context(self, inputs: dict[str, Any], outputs: dict[str, str]) -> None: - """Save the context of this chain run to memory. - - Args: - inputs: The inputs to the chain. - outputs: The outputs of the chain. - """ - - async def asave_context( - self, inputs: dict[str, Any], outputs: dict[str, str] - ) -> None: - """Async save the context of this chain run to memory. - - Args: - inputs: The inputs to the chain. - outputs: The outputs of the chain. - """ - await run_in_executor(None, self.save_context, inputs, outputs) - - @abstractmethod - def clear(self) -> None: - """Clear memory contents.""" - - async def aclear(self) -> None: - """Async clear memory contents.""" - await run_in_executor(None, self.clear) From c0b4ca41128ff2f5b1409b553e0e670a2ffd8384 Mon Sep 17 00:00:00 2001 From: Mason Daugherty Date: Wed, 8 Oct 2025 23:34:02 -0400 Subject: [PATCH 2/8] more --- .../langchain_classic/memory/buffer.py | 179 ----- .../langchain_classic/memory/buffer_window.py | 62 -- .../langchain_classic/memory/chat_memory.py | 104 --- .../langchain_classic/memory/entity.py | 614 ------------------ .../langchain_classic/memory/summary.py | 171 ----- .../memory/summary_buffer.py | 151 ----- .../langchain_classic/memory/token_buffer.py | 74 --- .../langchain_classic/memory/vectorstore.py | 122 ---- 8 files changed, 1477 deletions(-) delete mode 100644 libs/langchain/langchain_classic/memory/buffer.py delete mode 100644 libs/langchain/langchain_classic/memory/buffer_window.py delete mode 100644 libs/langchain/langchain_classic/memory/chat_memory.py delete mode 100644 libs/langchain/langchain_classic/memory/entity.py delete mode 100644 libs/langchain/langchain_classic/memory/summary.py delete mode 100644 libs/langchain/langchain_classic/memory/summary_buffer.py delete mode 100644 libs/langchain/langchain_classic/memory/token_buffer.py delete mode 100644 libs/langchain/langchain_classic/memory/vectorstore.py diff --git a/libs/langchain/langchain_classic/memory/buffer.py b/libs/langchain/langchain_classic/memory/buffer.py deleted file mode 100644 index c356b70da08c3..0000000000000 --- a/libs/langchain/langchain_classic/memory/buffer.py +++ /dev/null @@ -1,179 +0,0 @@ -from typing import Any - -from langchain_core._api import deprecated -from langchain_core.memory import BaseMemory -from langchain_core.messages import BaseMessage, get_buffer_string -from langchain_core.utils import pre_init -from typing_extensions import override - -from langchain_classic.memory.chat_memory import BaseChatMemory -from langchain_classic.memory.utils import get_prompt_input_key - - -@deprecated( - since="0.3.1", - removal="1.0.0", - message=( - "Please see the migration guide at: " - "https://python.langchain.com/docs/versions/migrating_memory/" - ), -) -class ConversationBufferMemory(BaseChatMemory): - """A basic memory implementation that simply stores the conversation history. - - This stores the entire conversation history in memory without any - additional processing. - - Note that additional processing may be required in some situations when the - conversation history is too large to fit in the context window of the model. - """ - - human_prefix: str = "Human" - ai_prefix: str = "AI" - memory_key: str = "history" #: :meta private: - - @property - def buffer(self) -> Any: - """String buffer of memory.""" - return self.buffer_as_messages if self.return_messages else self.buffer_as_str - - async def abuffer(self) -> Any: - """String buffer of memory.""" - return ( - await self.abuffer_as_messages() - if self.return_messages - else await self.abuffer_as_str() - ) - - def _buffer_as_str(self, messages: list[BaseMessage]) -> str: - return get_buffer_string( - messages, - human_prefix=self.human_prefix, - ai_prefix=self.ai_prefix, - ) - - @property - def buffer_as_str(self) -> str: - """Exposes the buffer as a string in case return_messages is True.""" - return self._buffer_as_str(self.chat_memory.messages) - - async def abuffer_as_str(self) -> str: - """Exposes the buffer as a string in case return_messages is True.""" - messages = await self.chat_memory.aget_messages() - return self._buffer_as_str(messages) - - @property - def buffer_as_messages(self) -> list[BaseMessage]: - """Exposes the buffer as a list of messages in case return_messages is False.""" - return self.chat_memory.messages - - async def abuffer_as_messages(self) -> list[BaseMessage]: - """Exposes the buffer as a list of messages in case return_messages is False.""" - return await self.chat_memory.aget_messages() - - @property - def memory_variables(self) -> list[str]: - """Will always return list of memory variables. - - :meta private: - """ - return [self.memory_key] - - @override - def load_memory_variables(self, inputs: dict[str, Any]) -> dict[str, Any]: - """Return history buffer.""" - return {self.memory_key: self.buffer} - - @override - async def aload_memory_variables(self, inputs: dict[str, Any]) -> dict[str, Any]: - """Return key-value pairs given the text input to the chain.""" - buffer = await self.abuffer() - return {self.memory_key: buffer} - - -@deprecated( - since="0.3.1", - removal="1.0.0", - message=( - "Please see the migration guide at: " - "https://python.langchain.com/docs/versions/migrating_memory/" - ), -) -class ConversationStringBufferMemory(BaseMemory): - """A basic memory implementation that simply stores the conversation history. - - This stores the entire conversation history in memory without any - additional processing. - - Equivalent to ConversationBufferMemory but tailored more specifically - for string-based conversations rather than chat models. - - Note that additional processing may be required in some situations when the - conversation history is too large to fit in the context window of the model. - """ - - human_prefix: str = "Human" - ai_prefix: str = "AI" - """Prefix to use for AI generated responses.""" - buffer: str = "" - output_key: str | None = None - input_key: str | None = None - memory_key: str = "history" #: :meta private: - - @pre_init - def validate_chains(cls, values: dict) -> dict: - """Validate that return messages is not True.""" - if values.get("return_messages", False): - msg = "return_messages must be False for ConversationStringBufferMemory" - raise ValueError(msg) - return values - - @property - def memory_variables(self) -> list[str]: - """Will always return list of memory variables. - - :meta private: - """ - return [self.memory_key] - - @override - def load_memory_variables(self, inputs: dict[str, Any]) -> dict[str, str]: - """Return history buffer.""" - return {self.memory_key: self.buffer} - - async def aload_memory_variables(self, inputs: dict[str, Any]) -> dict[str, str]: - """Return history buffer.""" - return self.load_memory_variables(inputs) - - def save_context(self, inputs: dict[str, Any], outputs: dict[str, str]) -> None: - """Save context from this conversation to buffer.""" - if self.input_key is None: - prompt_input_key = get_prompt_input_key(inputs, self.memory_variables) - else: - prompt_input_key = self.input_key - if self.output_key is None: - if len(outputs) != 1: - msg = f"One output key expected, got {outputs.keys()}" - raise ValueError(msg) - output_key = next(iter(outputs.keys())) - else: - output_key = self.output_key - human = f"{self.human_prefix}: " + inputs[prompt_input_key] - ai = f"{self.ai_prefix}: " + outputs[output_key] - self.buffer += f"\n{human}\n{ai}" - - async def asave_context( - self, - inputs: dict[str, Any], - outputs: dict[str, str], - ) -> None: - """Save context from this conversation to buffer.""" - return self.save_context(inputs, outputs) - - def clear(self) -> None: - """Clear memory contents.""" - self.buffer = "" - - @override - async def aclear(self) -> None: - self.clear() diff --git a/libs/langchain/langchain_classic/memory/buffer_window.py b/libs/langchain/langchain_classic/memory/buffer_window.py deleted file mode 100644 index 264a836caa44b..0000000000000 --- a/libs/langchain/langchain_classic/memory/buffer_window.py +++ /dev/null @@ -1,62 +0,0 @@ -from typing import Any - -from langchain_core._api import deprecated -from langchain_core.messages import BaseMessage, get_buffer_string -from typing_extensions import override - -from langchain_classic.memory.chat_memory import BaseChatMemory - - -@deprecated( - since="0.3.1", - removal="1.0.0", - message=( - "Please see the migration guide at: " - "https://python.langchain.com/docs/versions/migrating_memory/" - ), -) -class ConversationBufferWindowMemory(BaseChatMemory): - """Use to keep track of the last k turns of a conversation. - - If the number of messages in the conversation is more than the maximum number - of messages to keep, the oldest messages are dropped. - """ - - human_prefix: str = "Human" - ai_prefix: str = "AI" - memory_key: str = "history" #: :meta private: - k: int = 5 - """Number of messages to store in buffer.""" - - @property - def buffer(self) -> str | list[BaseMessage]: - """String buffer of memory.""" - return self.buffer_as_messages if self.return_messages else self.buffer_as_str - - @property - def buffer_as_str(self) -> str: - """Exposes the buffer as a string in case return_messages is False.""" - messages = self.chat_memory.messages[-self.k * 2 :] if self.k > 0 else [] - return get_buffer_string( - messages, - human_prefix=self.human_prefix, - ai_prefix=self.ai_prefix, - ) - - @property - def buffer_as_messages(self) -> list[BaseMessage]: - """Exposes the buffer as a list of messages in case return_messages is True.""" - return self.chat_memory.messages[-self.k * 2 :] if self.k > 0 else [] - - @property - def memory_variables(self) -> list[str]: - """Will always return list of memory variables. - - :meta private: - """ - return [self.memory_key] - - @override - def load_memory_variables(self, inputs: dict[str, Any]) -> dict[str, Any]: - """Return history buffer.""" - return {self.memory_key: self.buffer} diff --git a/libs/langchain/langchain_classic/memory/chat_memory.py b/libs/langchain/langchain_classic/memory/chat_memory.py deleted file mode 100644 index 5a86a78024e30..0000000000000 --- a/libs/langchain/langchain_classic/memory/chat_memory.py +++ /dev/null @@ -1,104 +0,0 @@ -import warnings -from abc import ABC -from typing import Any - -from langchain_core._api import deprecated -from langchain_core.chat_history import ( - BaseChatMessageHistory, - InMemoryChatMessageHistory, -) -from langchain_core.memory import BaseMemory -from langchain_core.messages import AIMessage, HumanMessage -from pydantic import Field - -from langchain_classic.memory.utils import get_prompt_input_key - - -@deprecated( - since="0.3.1", - removal="1.0.0", - message=( - "Please see the migration guide at: " - "https://python.langchain.com/docs/versions/migrating_memory/" - ), -) -class BaseChatMemory(BaseMemory, ABC): - """Abstract base class for chat memory. - - **ATTENTION** This abstraction was created prior to when chat models had - native tool calling capabilities. - It does **NOT** support native tool calling capabilities for chat models and - will fail SILENTLY if used with a chat model that has native tool calling. - - DO NOT USE THIS ABSTRACTION FOR NEW CODE. - """ - - chat_memory: BaseChatMessageHistory = Field( - default_factory=InMemoryChatMessageHistory, - ) - output_key: str | None = None - input_key: str | None = None - return_messages: bool = False - - def _get_input_output( - self, - inputs: dict[str, Any], - outputs: dict[str, str], - ) -> tuple[str, str]: - if self.input_key is None: - prompt_input_key = get_prompt_input_key(inputs, self.memory_variables) - else: - prompt_input_key = self.input_key - if self.output_key is None: - if len(outputs) == 1: - output_key = next(iter(outputs.keys())) - elif "output" in outputs: - output_key = "output" - warnings.warn( - f"'{self.__class__.__name__}' got multiple output keys:" - f" {outputs.keys()}. The default 'output' key is being used." - f" If this is not desired, please manually set 'output_key'.", - stacklevel=3, - ) - else: - msg = ( - f"Got multiple output keys: {outputs.keys()}, cannot " - f"determine which to store in memory. Please set the " - f"'output_key' explicitly." - ) - raise ValueError(msg) - else: - output_key = self.output_key - return inputs[prompt_input_key], outputs[output_key] - - def save_context(self, inputs: dict[str, Any], outputs: dict[str, str]) -> None: - """Save context from this conversation to buffer.""" - input_str, output_str = self._get_input_output(inputs, outputs) - self.chat_memory.add_messages( - [ - HumanMessage(content=input_str), - AIMessage(content=output_str), - ], - ) - - async def asave_context( - self, - inputs: dict[str, Any], - outputs: dict[str, str], - ) -> None: - """Save context from this conversation to buffer.""" - input_str, output_str = self._get_input_output(inputs, outputs) - await self.chat_memory.aadd_messages( - [ - HumanMessage(content=input_str), - AIMessage(content=output_str), - ], - ) - - def clear(self) -> None: - """Clear memory contents.""" - self.chat_memory.clear() - - async def aclear(self) -> None: - """Clear memory contents.""" - await self.chat_memory.aclear() diff --git a/libs/langchain/langchain_classic/memory/entity.py b/libs/langchain/langchain_classic/memory/entity.py deleted file mode 100644 index 3b45140792df9..0000000000000 --- a/libs/langchain/langchain_classic/memory/entity.py +++ /dev/null @@ -1,614 +0,0 @@ -"""Deprecated as of LangChain v0.3.4 and will be removed in LangChain v1.0.0.""" - -import logging -from abc import ABC, abstractmethod -from collections.abc import Iterable -from itertools import islice -from typing import TYPE_CHECKING, Any - -from langchain_core._api import deprecated -from langchain_core.language_models import BaseLanguageModel -from langchain_core.messages import BaseMessage, get_buffer_string -from langchain_core.prompts import BasePromptTemplate -from pydantic import BaseModel, ConfigDict, Field -from typing_extensions import override - -from langchain_classic.chains.llm import LLMChain -from langchain_classic.memory.chat_memory import BaseChatMemory -from langchain_classic.memory.prompt import ( - ENTITY_EXTRACTION_PROMPT, - ENTITY_SUMMARIZATION_PROMPT, -) -from langchain_classic.memory.utils import get_prompt_input_key - -if TYPE_CHECKING: - import sqlite3 - -logger = logging.getLogger(__name__) - - -@deprecated( - since="0.3.1", - removal="1.0.0", - message=( - "Please see the migration guide at: " - "https://python.langchain.com/docs/versions/migrating_memory/" - ), -) -class BaseEntityStore(BaseModel, ABC): - """Abstract base class for Entity store.""" - - @abstractmethod - def get(self, key: str, default: str | None = None) -> str | None: - """Get entity value from store.""" - - @abstractmethod - def set(self, key: str, value: str | None) -> None: - """Set entity value in store.""" - - @abstractmethod - def delete(self, key: str) -> None: - """Delete entity value from store.""" - - @abstractmethod - def exists(self, key: str) -> bool: - """Check if entity exists in store.""" - - @abstractmethod - def clear(self) -> None: - """Delete all entities from store.""" - - -@deprecated( - since="0.3.1", - removal="1.0.0", - message=( - "Please see the migration guide at: " - "https://python.langchain.com/docs/versions/migrating_memory/" - ), -) -class InMemoryEntityStore(BaseEntityStore): - """In-memory Entity store.""" - - store: dict[str, str | None] = {} - - @override - def get(self, key: str, default: str | None = None) -> str | None: - return self.store.get(key, default) - - @override - def set(self, key: str, value: str | None) -> None: - self.store[key] = value - - @override - def delete(self, key: str) -> None: - del self.store[key] - - @override - def exists(self, key: str) -> bool: - return key in self.store - - @override - def clear(self) -> None: - return self.store.clear() - - -@deprecated( - since="0.3.1", - removal="1.0.0", - message=( - "Please see the migration guide at: " - "https://python.langchain.com/docs/versions/migrating_memory/" - ), -) -class UpstashRedisEntityStore(BaseEntityStore): - """Upstash Redis backed Entity store. - - Entities get a TTL of 1 day by default, and - that TTL is extended by 3 days every time the entity is read back. - """ - - def __init__( - self, - session_id: str = "default", - url: str = "", - token: str = "", - key_prefix: str = "memory_store", - ttl: int | None = 60 * 60 * 24, - recall_ttl: int | None = 60 * 60 * 24 * 3, - *args: Any, - **kwargs: Any, - ): - """Initializes the RedisEntityStore. - - Args: - session_id: Unique identifier for the session. - url: URL of the Redis server. - token: Authentication token for the Redis server. - key_prefix: Prefix for keys in the Redis store. - ttl: Time-to-live for keys in seconds (default 1 day). - recall_ttl: Time-to-live extension for keys when recalled (default 3 days). - *args: Additional positional arguments. - **kwargs: Additional keyword arguments. - """ - try: - from upstash_redis import Redis - except ImportError as e: - msg = ( - "Could not import upstash_redis python package. " - "Please install it with `pip install upstash_redis`." - ) - raise ImportError(msg) from e - - super().__init__(*args, **kwargs) - - try: - self.redis_client = Redis(url=url, token=token) - except Exception as exc: - error_msg = "Upstash Redis instance could not be initiated" - logger.exception(error_msg) - raise RuntimeError(error_msg) from exc - - self.session_id = session_id - self.key_prefix = key_prefix - self.ttl = ttl - self.recall_ttl = recall_ttl or ttl - - @property - def full_key_prefix(self) -> str: - """Returns the full key prefix with session ID.""" - return f"{self.key_prefix}:{self.session_id}" - - @override - def get(self, key: str, default: str | None = None) -> str | None: - res = ( - self.redis_client.getex(f"{self.full_key_prefix}:{key}", ex=self.recall_ttl) - or default - or "" - ) - logger.debug( - "Upstash Redis MEM get '%s:%s': '%s'", self.full_key_prefix, key, res - ) - return res - - @override - def set(self, key: str, value: str | None) -> None: - if not value: - return self.delete(key) - self.redis_client.set(f"{self.full_key_prefix}:{key}", value, ex=self.ttl) - logger.debug( - "Redis MEM set '%s:%s': '%s' EX %s", - self.full_key_prefix, - key, - value, - self.ttl, - ) - return None - - @override - def delete(self, key: str) -> None: - self.redis_client.delete(f"{self.full_key_prefix}:{key}") - - @override - def exists(self, key: str) -> bool: - return self.redis_client.exists(f"{self.full_key_prefix}:{key}") == 1 - - @override - def clear(self) -> None: - def scan_and_delete(cursor: int) -> int: - cursor, keys_to_delete = self.redis_client.scan( - cursor, - f"{self.full_key_prefix}:*", - ) - self.redis_client.delete(*keys_to_delete) - return cursor - - cursor = scan_and_delete(0) - while cursor != 0: - scan_and_delete(cursor) - - -@deprecated( - since="0.3.1", - removal="1.0.0", - message=( - "Please see the migration guide at: " - "https://python.langchain.com/docs/versions/migrating_memory/" - ), -) -class RedisEntityStore(BaseEntityStore): - """Redis-backed Entity store. - - Entities get a TTL of 1 day by default, and - that TTL is extended by 3 days every time the entity is read back. - """ - - redis_client: Any - session_id: str = "default" - key_prefix: str = "memory_store" - ttl: int | None = 60 * 60 * 24 - recall_ttl: int | None = 60 * 60 * 24 * 3 - - def __init__( - self, - session_id: str = "default", - url: str = "redis://localhost:6379/0", - key_prefix: str = "memory_store", - ttl: int | None = 60 * 60 * 24, - recall_ttl: int | None = 60 * 60 * 24 * 3, - *args: Any, - **kwargs: Any, - ): - """Initializes the RedisEntityStore. - - Args: - session_id: Unique identifier for the session. - url: URL of the Redis server. - key_prefix: Prefix for keys in the Redis store. - ttl: Time-to-live for keys in seconds (default 1 day). - recall_ttl: Time-to-live extension for keys when recalled (default 3 days). - *args: Additional positional arguments. - **kwargs: Additional keyword arguments. - """ - try: - import redis - except ImportError as e: - msg = ( - "Could not import redis python package. " - "Please install it with `pip install redis`." - ) - raise ImportError(msg) from e - - super().__init__(*args, **kwargs) - - try: - from langchain_community.utilities.redis import get_client - except ImportError as e: - msg = ( - "Could not import langchain_community.utilities.redis.get_client. " - "Please install it with `pip install langchain-community`." - ) - raise ImportError(msg) from e - - try: - self.redis_client = get_client(redis_url=url, decode_responses=True) - except redis.exceptions.ConnectionError: - logger.exception("Redis client could not connect") - - self.session_id = session_id - self.key_prefix = key_prefix - self.ttl = ttl - self.recall_ttl = recall_ttl or ttl - - @property - def full_key_prefix(self) -> str: - """Returns the full key prefix with session ID.""" - return f"{self.key_prefix}:{self.session_id}" - - @override - def get(self, key: str, default: str | None = None) -> str | None: - res = ( - self.redis_client.getex(f"{self.full_key_prefix}:{key}", ex=self.recall_ttl) - or default - or "" - ) - logger.debug("REDIS MEM get '%s:%s': '%s'", self.full_key_prefix, key, res) - return res - - @override - def set(self, key: str, value: str | None) -> None: - if not value: - return self.delete(key) - self.redis_client.set(f"{self.full_key_prefix}:{key}", value, ex=self.ttl) - logger.debug( - "REDIS MEM set '%s:%s': '%s' EX %s", - self.full_key_prefix, - key, - value, - self.ttl, - ) - return None - - @override - def delete(self, key: str) -> None: - self.redis_client.delete(f"{self.full_key_prefix}:{key}") - - @override - def exists(self, key: str) -> bool: - return self.redis_client.exists(f"{self.full_key_prefix}:{key}") == 1 - - @override - def clear(self) -> None: - # iterate a list in batches of size batch_size - def batched(iterable: Iterable[Any], batch_size: int) -> Iterable[Any]: - iterator = iter(iterable) - while batch := list(islice(iterator, batch_size)): - yield batch - - for keybatch in batched( - self.redis_client.scan_iter(f"{self.full_key_prefix}:*"), - 500, - ): - self.redis_client.delete(*keybatch) - - -@deprecated( - since="0.3.1", - removal="1.0.0", - message=( - "Please see the migration guide at: " - "https://python.langchain.com/docs/versions/migrating_memory/" - ), -) -class SQLiteEntityStore(BaseEntityStore): - """SQLite-backed Entity store with safe query construction.""" - - session_id: str = "default" - table_name: str = "memory_store" - conn: Any = None - - model_config = ConfigDict( - arbitrary_types_allowed=True, - ) - - def __init__( - self, - session_id: str = "default", - db_file: str = "entities.db", - table_name: str = "memory_store", - *args: Any, - **kwargs: Any, - ): - """Initializes the SQLiteEntityStore. - - Args: - session_id: Unique identifier for the session. - db_file: Path to the SQLite database file. - table_name: Name of the table to store entities. - *args: Additional positional arguments. - **kwargs: Additional keyword arguments. - """ - super().__init__(*args, **kwargs) - try: - import sqlite3 - except ImportError as e: - msg = ( - "Could not import sqlite3 python package. " - "Please install it with `pip install sqlite3`." - ) - raise ImportError(msg) from e - - # Basic validation to prevent obviously malicious table/session names - if not table_name.isidentifier() or not session_id.isidentifier(): - # Since we validate here, we can safely suppress the S608 bandit warning - msg = "Table name and session ID must be valid Python identifiers." - raise ValueError(msg) - - self.conn = sqlite3.connect(db_file) - self.session_id = session_id - self.table_name = table_name - self._create_table_if_not_exists() - - @property - def full_table_name(self) -> str: - """Returns the full table name with session ID.""" - return f"{self.table_name}_{self.session_id}" - - def _execute_query(self, query: str, params: tuple = ()) -> "sqlite3.Cursor": - """Executes a query with proper connection handling.""" - with self.conn: - return self.conn.execute(query, params) - - def _create_table_if_not_exists(self) -> None: - """Creates the entity table if it doesn't exist, using safe quoting.""" - # Use standard SQL double quotes for the table name identifier - create_table_query = f""" - CREATE TABLE IF NOT EXISTS "{self.full_table_name}" ( - key TEXT PRIMARY KEY, - value TEXT - ) - """ - self._execute_query(create_table_query) - - def get(self, key: str, default: str | None = None) -> str | None: - """Retrieves a value, safely quoting the table name.""" - # `?` placeholder is used for the value to prevent SQL injection - # Ignore S608 since we validate for malicious table/session names in `__init__` - query = f'SELECT value FROM "{self.full_table_name}" WHERE key = ?' # noqa: S608 - cursor = self._execute_query(query, (key,)) - result = cursor.fetchone() - return result[0] if result is not None else default - - def set(self, key: str, value: str | None) -> None: - """Inserts or replaces a value, safely quoting the table name.""" - if not value: - return self.delete(key) - # Ignore S608 since we validate for malicious table/session names in `__init__` - query = ( - "INSERT OR REPLACE INTO " # noqa: S608 - f'"{self.full_table_name}" (key, value) VALUES (?, ?)' - ) - self._execute_query(query, (key, value)) - return None - - def delete(self, key: str) -> None: - """Deletes a key-value pair, safely quoting the table name.""" - # Ignore S608 since we validate for malicious table/session names in `__init__` - query = f'DELETE FROM "{self.full_table_name}" WHERE key = ?' # noqa: S608 - self._execute_query(query, (key,)) - - def exists(self, key: str) -> bool: - """Checks for the existence of a key, safely quoting the table name.""" - # Ignore S608 since we validate for malicious table/session names in `__init__` - query = f'SELECT 1 FROM "{self.full_table_name}" WHERE key = ? LIMIT 1' # noqa: S608 - cursor = self._execute_query(query, (key,)) - return cursor.fetchone() is not None - - @override - def clear(self) -> None: - # Ignore S608 since we validate for malicious table/session names in `__init__` - query = f""" - DELETE FROM {self.full_table_name} - """ # noqa: S608 - with self.conn: - self.conn.execute(query) - - -@deprecated( - since="0.3.1", - removal="1.0.0", - message=( - "Please see the migration guide at: " - "https://python.langchain.com/docs/versions/migrating_memory/" - ), -) -class ConversationEntityMemory(BaseChatMemory): - """Entity extractor & summarizer memory. - - Extracts named entities from the recent chat history and generates summaries. - With a swappable entity store, persisting entities across conversations. - Defaults to an in-memory entity store, and can be swapped out for a Redis, - SQLite, or other entity store. - """ - - human_prefix: str = "Human" - ai_prefix: str = "AI" - llm: BaseLanguageModel - entity_extraction_prompt: BasePromptTemplate = ENTITY_EXTRACTION_PROMPT - entity_summarization_prompt: BasePromptTemplate = ENTITY_SUMMARIZATION_PROMPT - - # Cache of recently detected entity names, if any - # It is updated when load_memory_variables is called: - entity_cache: list[str] = [] - - # Number of recent message pairs to consider when updating entities: - k: int = 3 - - chat_history_key: str = "history" - - # Store to manage entity-related data: - entity_store: BaseEntityStore = Field(default_factory=InMemoryEntityStore) - - @property - def buffer(self) -> list[BaseMessage]: - """Access chat memory messages.""" - return self.chat_memory.messages - - @property - def memory_variables(self) -> list[str]: - """Will always return list of memory variables. - - :meta private: - """ - return ["entities", self.chat_history_key] - - def load_memory_variables(self, inputs: dict[str, Any]) -> dict[str, Any]: - """Load memory variables. - - Returns chat history and all generated entities with summaries if available, - and updates or clears the recent entity cache. - - New entity name can be found when calling this method, before the entity - summaries are generated, so the entity cache values may be empty if no entity - descriptions are generated yet. - """ - # Create an LLMChain for predicting entity names from the recent chat history: - chain = LLMChain(llm=self.llm, prompt=self.entity_extraction_prompt) - - if self.input_key is None: - prompt_input_key = get_prompt_input_key(inputs, self.memory_variables) - else: - prompt_input_key = self.input_key - - # Extract an arbitrary window of the last message pairs from - # the chat history, where the hyperparameter k is the - # number of message pairs: - buffer_string = get_buffer_string( - self.buffer[-self.k * 2 :], - human_prefix=self.human_prefix, - ai_prefix=self.ai_prefix, - ) - - # Generates a comma-separated list of named entities, - # e.g. "Jane, White House, UFO" - # or "NONE" if no named entities are extracted: - output = chain.predict( - history=buffer_string, - input=inputs[prompt_input_key], - ) - - # If no named entities are extracted, assigns an empty list. - if output.strip() == "NONE": - entities = [] - else: - # Make a list of the extracted entities: - entities = [w.strip() for w in output.split(",")] - - # Make a dictionary of entities with summary if exists: - entity_summaries = {} - - for entity in entities: - entity_summaries[entity] = self.entity_store.get(entity, "") - - # Replaces the entity name cache with the most recently discussed entities, - # or if no entities were extracted, clears the cache: - self.entity_cache = entities - - # Should we return as message objects or as a string? - if self.return_messages: - # Get last `k` pair of chat messages: - buffer: Any = self.buffer[-self.k * 2 :] - else: - # Reuse the string we made earlier: - buffer = buffer_string - - return { - self.chat_history_key: buffer, - "entities": entity_summaries, - } - - def save_context(self, inputs: dict[str, Any], outputs: dict[str, str]) -> None: - """Save context from this conversation history to the entity store. - - Generates a summary for each entity in the entity cache by prompting - the model, and saves these summaries to the entity store. - """ - super().save_context(inputs, outputs) - - if self.input_key is None: - prompt_input_key = get_prompt_input_key(inputs, self.memory_variables) - else: - prompt_input_key = self.input_key - - # Extract an arbitrary window of the last message pairs from - # the chat history, where the hyperparameter k is the - # number of message pairs: - buffer_string = get_buffer_string( - self.buffer[-self.k * 2 :], - human_prefix=self.human_prefix, - ai_prefix=self.ai_prefix, - ) - - input_data = inputs[prompt_input_key] - - # Create an LLMChain for predicting entity summarization from the context - chain = LLMChain(llm=self.llm, prompt=self.entity_summarization_prompt) - - # Generate new summaries for entities and save them in the entity store - for entity in self.entity_cache: - # Get existing summary if it exists - existing_summary = self.entity_store.get(entity, "") - output = chain.predict( - summary=existing_summary, - entity=entity, - history=buffer_string, - input=input_data, - ) - # Save the updated summary to the entity store - self.entity_store.set(entity, output.strip()) - - def clear(self) -> None: - """Clear memory contents.""" - self.chat_memory.clear() - self.entity_cache.clear() - self.entity_store.clear() diff --git a/libs/langchain/langchain_classic/memory/summary.py b/libs/langchain/langchain_classic/memory/summary.py deleted file mode 100644 index 5b2ed54e56a70..0000000000000 --- a/libs/langchain/langchain_classic/memory/summary.py +++ /dev/null @@ -1,171 +0,0 @@ -from __future__ import annotations - -from typing import Any - -from langchain_core._api import deprecated -from langchain_core.chat_history import BaseChatMessageHistory -from langchain_core.language_models import BaseLanguageModel -from langchain_core.messages import BaseMessage, SystemMessage, get_buffer_string -from langchain_core.prompts import BasePromptTemplate -from langchain_core.utils import pre_init -from pydantic import BaseModel -from typing_extensions import override - -from langchain_classic.chains.llm import LLMChain -from langchain_classic.memory.chat_memory import BaseChatMemory -from langchain_classic.memory.prompt import SUMMARY_PROMPT - - -@deprecated( - since="0.2.12", - removal="1.0", - message=( - "Refer here for how to incorporate summaries of conversation history: " - "https://langchain-ai.github.io/langgraph/how-tos/memory/add-summary-conversation-history/" - ), -) -class SummarizerMixin(BaseModel): - """Mixin for summarizer.""" - - human_prefix: str = "Human" - ai_prefix: str = "AI" - llm: BaseLanguageModel - prompt: BasePromptTemplate = SUMMARY_PROMPT - summary_message_cls: type[BaseMessage] = SystemMessage - - def predict_new_summary( - self, - messages: list[BaseMessage], - existing_summary: str, - ) -> str: - """Predict a new summary based on the messages and existing summary. - - Args: - messages: List of messages to summarize. - existing_summary: Existing summary to build upon. - - Returns: - A new summary string. - """ - new_lines = get_buffer_string( - messages, - human_prefix=self.human_prefix, - ai_prefix=self.ai_prefix, - ) - - chain = LLMChain(llm=self.llm, prompt=self.prompt) - return chain.predict(summary=existing_summary, new_lines=new_lines) - - async def apredict_new_summary( - self, - messages: list[BaseMessage], - existing_summary: str, - ) -> str: - """Predict a new summary based on the messages and existing summary. - - Args: - messages: List of messages to summarize. - existing_summary: Existing summary to build upon. - - Returns: - A new summary string. - """ - new_lines = get_buffer_string( - messages, - human_prefix=self.human_prefix, - ai_prefix=self.ai_prefix, - ) - - chain = LLMChain(llm=self.llm, prompt=self.prompt) - return await chain.apredict(summary=existing_summary, new_lines=new_lines) - - -@deprecated( - since="0.3.1", - removal="1.0.0", - message=( - "Please see the migration guide at: " - "https://python.langchain.com/docs/versions/migrating_memory/" - ), -) -class ConversationSummaryMemory(BaseChatMemory, SummarizerMixin): - """Continually summarizes the conversation history. - - The summary is updated after each conversation turn. - The implementations returns a summary of the conversation history which - can be used to provide context to the model. - """ - - buffer: str = "" - memory_key: str = "history" #: :meta private: - - @classmethod - def from_messages( - cls, - llm: BaseLanguageModel, - chat_memory: BaseChatMessageHistory, - *, - summarize_step: int = 2, - **kwargs: Any, - ) -> ConversationSummaryMemory: - """Create a ConversationSummaryMemory from a list of messages. - - Args: - llm: The language model to use for summarization. - chat_memory: The chat history to summarize. - summarize_step: Number of messages to summarize at a time. - **kwargs: Additional keyword arguments to pass to the class. - - Returns: - An instance of ConversationSummaryMemory with the summarized history. - """ - obj = cls(llm=llm, chat_memory=chat_memory, **kwargs) - for i in range(0, len(obj.chat_memory.messages), summarize_step): - obj.buffer = obj.predict_new_summary( - obj.chat_memory.messages[i : i + summarize_step], - obj.buffer, - ) - return obj - - @property - def memory_variables(self) -> list[str]: - """Will always return list of memory variables. - - :meta private: - """ - return [self.memory_key] - - @override - def load_memory_variables(self, inputs: dict[str, Any]) -> dict[str, Any]: - """Return history buffer.""" - if self.return_messages: - buffer: Any = [self.summary_message_cls(content=self.buffer)] - else: - buffer = self.buffer - return {self.memory_key: buffer} - - @pre_init - def validate_prompt_input_variables(cls, values: dict) -> dict: - """Validate that prompt input variables are consistent.""" - prompt_variables = values["prompt"].input_variables - expected_keys = {"summary", "new_lines"} - if expected_keys != set(prompt_variables): - msg = ( - "Got unexpected prompt input variables. The prompt expects " - f"{prompt_variables}, but it should have {expected_keys}." - ) - raise ValueError(msg) - return values - - def save_context(self, inputs: dict[str, Any], outputs: dict[str, str]) -> None: - """Save context from this conversation to buffer.""" - super().save_context(inputs, outputs) - self.buffer = self.predict_new_summary( - self.chat_memory.messages[-2:], - self.buffer, - ) - - def clear(self) -> None: - """Clear memory contents.""" - super().clear() - self.buffer = "" diff --git a/libs/langchain/langchain_classic/memory/summary_buffer.py b/libs/langchain/langchain_classic/memory/summary_buffer.py deleted file mode 100644 index fffcceb27bb4f..0000000000000 --- a/libs/langchain/langchain_classic/memory/summary_buffer.py +++ /dev/null @@ -1,151 +0,0 @@ -from typing import Any - -from langchain_core._api import deprecated -from langchain_core.messages import BaseMessage, get_buffer_string -from langchain_core.utils import pre_init -from typing_extensions import override - -from langchain_classic.memory.chat_memory import BaseChatMemory -from langchain_classic.memory.summary import SummarizerMixin - - -@deprecated( - since="0.3.1", - removal="1.0.0", - message=( - "Please see the migration guide at: " - "https://python.langchain.com/docs/versions/migrating_memory/" - ), -) -class ConversationSummaryBufferMemory(BaseChatMemory, SummarizerMixin): - """Buffer with summarizer for storing conversation memory. - - Provides a running summary of the conversation together with the most recent - messages in the conversation under the constraint that the total number of - tokens in the conversation does not exceed a certain limit. - """ - - max_token_limit: int = 2000 - moving_summary_buffer: str = "" - memory_key: str = "history" - - @property - def buffer(self) -> str | list[BaseMessage]: - """String buffer of memory.""" - return self.load_memory_variables({})[self.memory_key] - - async def abuffer(self) -> str | list[BaseMessage]: - """Async memory buffer.""" - memory_variables = await self.aload_memory_variables({}) - return memory_variables[self.memory_key] - - @property - def memory_variables(self) -> list[str]: - """Will always return list of memory variables. - - :meta private: - """ - return [self.memory_key] - - @override - def load_memory_variables(self, inputs: dict[str, Any]) -> dict[str, Any]: - """Return history buffer.""" - buffer = self.chat_memory.messages - if self.moving_summary_buffer != "": - first_messages: list[BaseMessage] = [ - self.summary_message_cls(content=self.moving_summary_buffer), - ] - buffer = first_messages + buffer - if self.return_messages: - final_buffer: Any = buffer - else: - final_buffer = get_buffer_string( - buffer, - human_prefix=self.human_prefix, - ai_prefix=self.ai_prefix, - ) - return {self.memory_key: final_buffer} - - @override - async def aload_memory_variables(self, inputs: dict[str, Any]) -> dict[str, Any]: - """Asynchronously return key-value pairs given the text input to the chain.""" - buffer = await self.chat_memory.aget_messages() - if self.moving_summary_buffer != "": - first_messages: list[BaseMessage] = [ - self.summary_message_cls(content=self.moving_summary_buffer), - ] - buffer = first_messages + buffer - if self.return_messages: - final_buffer: Any = buffer - else: - final_buffer = get_buffer_string( - buffer, - human_prefix=self.human_prefix, - ai_prefix=self.ai_prefix, - ) - return {self.memory_key: final_buffer} - - @pre_init - def validate_prompt_input_variables(cls, values: dict) -> dict: - """Validate that prompt input variables are consistent.""" - prompt_variables = values["prompt"].input_variables - expected_keys = {"summary", "new_lines"} - if expected_keys != set(prompt_variables): - msg = ( - "Got unexpected prompt input variables. The prompt expects " - f"{prompt_variables}, but it should have {expected_keys}." - ) - raise ValueError(msg) - return values - - def save_context(self, inputs: dict[str, Any], outputs: dict[str, str]) -> None: - """Save context from this conversation to buffer.""" - super().save_context(inputs, outputs) - self.prune() - - async def asave_context( - self, - inputs: dict[str, Any], - outputs: dict[str, str], - ) -> None: - """Asynchronously save context from this conversation to buffer.""" - await super().asave_context(inputs, outputs) - await self.aprune() - - def prune(self) -> None: - """Prune buffer if it exceeds max token limit.""" - buffer = self.chat_memory.messages - curr_buffer_length = self.llm.get_num_tokens_from_messages(buffer) - if curr_buffer_length > self.max_token_limit: - pruned_memory = [] - while curr_buffer_length > self.max_token_limit: - pruned_memory.append(buffer.pop(0)) - curr_buffer_length = self.llm.get_num_tokens_from_messages(buffer) - self.moving_summary_buffer = self.predict_new_summary( - pruned_memory, - self.moving_summary_buffer, - ) - - async def aprune(self) -> None: - """Asynchronously prune buffer if it exceeds max token limit.""" - buffer = self.chat_memory.messages - curr_buffer_length = self.llm.get_num_tokens_from_messages(buffer) - if curr_buffer_length > self.max_token_limit: - pruned_memory = [] - while curr_buffer_length > self.max_token_limit: - pruned_memory.append(buffer.pop(0)) - curr_buffer_length = self.llm.get_num_tokens_from_messages(buffer) - self.moving_summary_buffer = await self.apredict_new_summary( - pruned_memory, - self.moving_summary_buffer, - ) - - def clear(self) -> None: - """Clear memory contents.""" - super().clear() - self.moving_summary_buffer = "" - - async def aclear(self) -> None: - """Asynchronously clear memory contents.""" - await super().aclear() - self.moving_summary_buffer = "" diff --git a/libs/langchain/langchain_classic/memory/token_buffer.py b/libs/langchain/langchain_classic/memory/token_buffer.py deleted file mode 100644 index caa0f78bcefad..0000000000000 --- a/libs/langchain/langchain_classic/memory/token_buffer.py +++ /dev/null @@ -1,74 +0,0 @@ -from typing import Any - -from langchain_core._api import deprecated -from langchain_core.language_models import BaseLanguageModel -from langchain_core.messages import BaseMessage, get_buffer_string -from typing_extensions import override - -from langchain_classic.memory.chat_memory import BaseChatMemory - - -@deprecated( - since="0.3.1", - removal="1.0.0", - message=( - "Please see the migration guide at: " - "https://python.langchain.com/docs/versions/migrating_memory/" - ), -) -class ConversationTokenBufferMemory(BaseChatMemory): - """Conversation chat memory with token limit. - - Keeps only the most recent messages in the conversation under the constraint - that the total number of tokens in the conversation does not exceed a certain limit. - """ - - human_prefix: str = "Human" - ai_prefix: str = "AI" - llm: BaseLanguageModel - memory_key: str = "history" - max_token_limit: int = 2000 - - @property - def buffer(self) -> Any: - """String buffer of memory.""" - return self.buffer_as_messages if self.return_messages else self.buffer_as_str - - @property - def buffer_as_str(self) -> str: - """Exposes the buffer as a string in case return_messages is False.""" - return get_buffer_string( - self.chat_memory.messages, - human_prefix=self.human_prefix, - ai_prefix=self.ai_prefix, - ) - - @property - def buffer_as_messages(self) -> list[BaseMessage]: - """Exposes the buffer as a list of messages in case return_messages is True.""" - return self.chat_memory.messages - - @property - def memory_variables(self) -> list[str]: - """Will always return list of memory variables. - - :meta private: - """ - return [self.memory_key] - - @override - def load_memory_variables(self, inputs: dict[str, Any]) -> dict[str, Any]: - """Return history buffer.""" - return {self.memory_key: self.buffer} - - def save_context(self, inputs: dict[str, Any], outputs: dict[str, str]) -> None: - """Save context from this conversation to buffer. Pruned.""" - super().save_context(inputs, outputs) - # Prune buffer if it exceeds max token limit - buffer = self.chat_memory.messages - curr_buffer_length = self.llm.get_num_tokens_from_messages(buffer) - if curr_buffer_length > self.max_token_limit: - pruned_memory = [] - while curr_buffer_length > self.max_token_limit: - pruned_memory.append(buffer.pop(0)) - curr_buffer_length = self.llm.get_num_tokens_from_messages(buffer) diff --git a/libs/langchain/langchain_classic/memory/vectorstore.py b/libs/langchain/langchain_classic/memory/vectorstore.py deleted file mode 100644 index ce09cb3a3aa01..0000000000000 --- a/libs/langchain/langchain_classic/memory/vectorstore.py +++ /dev/null @@ -1,122 +0,0 @@ -"""Class for a VectorStore-backed memory object.""" - -from collections.abc import Sequence -from typing import Any - -from langchain_core._api import deprecated -from langchain_core.documents import Document -from langchain_core.memory import BaseMemory -from langchain_core.vectorstores import VectorStoreRetriever -from pydantic import Field - -from langchain_classic.memory.utils import get_prompt_input_key - - -@deprecated( - since="0.3.1", - removal="1.0.0", - message=( - "Please see the migration guide at: " - "https://python.langchain.com/docs/versions/migrating_memory/" - ), -) -class VectorStoreRetrieverMemory(BaseMemory): - """Vector Store Retriever Memory. - - Store the conversation history in a vector store and retrieves the relevant - parts of past conversation based on the input. - """ - - retriever: VectorStoreRetriever = Field(exclude=True) - """VectorStoreRetriever object to connect to.""" - - memory_key: str = "history" #: :meta private: - """Key name to locate the memories in the result of load_memory_variables.""" - - input_key: str | None = None - """Key name to index the inputs to load_memory_variables.""" - - return_docs: bool = False - """Whether or not to return the result of querying the database directly.""" - - exclude_input_keys: Sequence[str] = Field(default_factory=tuple) - """Input keys to exclude in addition to memory key when constructing the document""" - - @property - def memory_variables(self) -> list[str]: - """The list of keys emitted from the load_memory_variables method.""" - return [self.memory_key] - - def _get_prompt_input_key(self, inputs: dict[str, Any]) -> str: - """Get the input key for the prompt.""" - if self.input_key is None: - return get_prompt_input_key(inputs, self.memory_variables) - return self.input_key - - def _documents_to_memory_variables( - self, - docs: list[Document], - ) -> dict[str, list[Document] | str]: - result: list[Document] | str - if not self.return_docs: - result = "\n".join([doc.page_content for doc in docs]) - else: - result = docs - return {self.memory_key: result} - - def load_memory_variables( - self, - inputs: dict[str, Any], - ) -> dict[str, list[Document] | str]: - """Return history buffer.""" - input_key = self._get_prompt_input_key(inputs) - query = inputs[input_key] - docs = self.retriever.invoke(query) - return self._documents_to_memory_variables(docs) - - async def aload_memory_variables( - self, - inputs: dict[str, Any], - ) -> dict[str, list[Document] | str]: - """Return history buffer.""" - input_key = self._get_prompt_input_key(inputs) - query = inputs[input_key] - docs = await self.retriever.ainvoke(query) - return self._documents_to_memory_variables(docs) - - def _form_documents( - self, - inputs: dict[str, Any], - outputs: dict[str, str], - ) -> list[Document]: - """Format context from this conversation to buffer.""" - # Each document should only include the current turn, not the chat history - exclude = set(self.exclude_input_keys) - exclude.add(self.memory_key) - filtered_inputs = {k: v for k, v in inputs.items() if k not in exclude} - texts = [ - f"{k}: {v}" - for k, v in list(filtered_inputs.items()) + list(outputs.items()) - ] - page_content = "\n".join(texts) - return [Document(page_content=page_content)] - - def save_context(self, inputs: dict[str, Any], outputs: dict[str, str]) -> None: - """Save context from this conversation to buffer.""" - documents = self._form_documents(inputs, outputs) - self.retriever.add_documents(documents) - - async def asave_context( - self, - inputs: dict[str, Any], - outputs: dict[str, str], - ) -> None: - """Save context from this conversation to buffer.""" - documents = self._form_documents(inputs, outputs) - await self.retriever.aadd_documents(documents) - - def clear(self) -> None: - """Nothing to clear.""" - - async def aclear(self) -> None: - """Nothing to clear.""" From c36e75ec0104901724fda04f90f67a5552c6e3af Mon Sep 17 00:00:00 2001 From: Chester Curme Date: Wed, 15 Oct 2025 12:27:51 -0400 Subject: [PATCH 3/8] add back memory modules --- .../langchain_classic/memory/buffer.py | 179 +++++ .../langchain_classic/memory/buffer_window.py | 62 ++ .../langchain_classic/memory/chat_memory.py | 104 +++ .../langchain_classic/memory/entity.py | 614 ++++++++++++++++++ .../langchain_classic/memory/summary.py | 171 +++++ .../memory/summary_buffer.py | 151 +++++ .../langchain_classic/memory/token_buffer.py | 74 +++ .../langchain_classic/memory/vectorstore.py | 122 ++++ 8 files changed, 1477 insertions(+) create mode 100644 libs/langchain/langchain_classic/memory/buffer.py create mode 100644 libs/langchain/langchain_classic/memory/buffer_window.py create mode 100644 libs/langchain/langchain_classic/memory/chat_memory.py create mode 100644 libs/langchain/langchain_classic/memory/entity.py create mode 100644 libs/langchain/langchain_classic/memory/summary.py create mode 100644 libs/langchain/langchain_classic/memory/summary_buffer.py create mode 100644 libs/langchain/langchain_classic/memory/token_buffer.py create mode 100644 libs/langchain/langchain_classic/memory/vectorstore.py diff --git a/libs/langchain/langchain_classic/memory/buffer.py b/libs/langchain/langchain_classic/memory/buffer.py new file mode 100644 index 0000000000000..c356b70da08c3 --- /dev/null +++ b/libs/langchain/langchain_classic/memory/buffer.py @@ -0,0 +1,179 @@ +from typing import Any + +from langchain_core._api import deprecated +from langchain_core.memory import BaseMemory +from langchain_core.messages import BaseMessage, get_buffer_string +from langchain_core.utils import pre_init +from typing_extensions import override + +from langchain_classic.memory.chat_memory import BaseChatMemory +from langchain_classic.memory.utils import get_prompt_input_key + + +@deprecated( + since="0.3.1", + removal="1.0.0", + message=( + "Please see the migration guide at: " + "https://python.langchain.com/docs/versions/migrating_memory/" + ), +) +class ConversationBufferMemory(BaseChatMemory): + """A basic memory implementation that simply stores the conversation history. + + This stores the entire conversation history in memory without any + additional processing. + + Note that additional processing may be required in some situations when the + conversation history is too large to fit in the context window of the model. + """ + + human_prefix: str = "Human" + ai_prefix: str = "AI" + memory_key: str = "history" #: :meta private: + + @property + def buffer(self) -> Any: + """String buffer of memory.""" + return self.buffer_as_messages if self.return_messages else self.buffer_as_str + + async def abuffer(self) -> Any: + """String buffer of memory.""" + return ( + await self.abuffer_as_messages() + if self.return_messages + else await self.abuffer_as_str() + ) + + def _buffer_as_str(self, messages: list[BaseMessage]) -> str: + return get_buffer_string( + messages, + human_prefix=self.human_prefix, + ai_prefix=self.ai_prefix, + ) + + @property + def buffer_as_str(self) -> str: + """Exposes the buffer as a string in case return_messages is True.""" + return self._buffer_as_str(self.chat_memory.messages) + + async def abuffer_as_str(self) -> str: + """Exposes the buffer as a string in case return_messages is True.""" + messages = await self.chat_memory.aget_messages() + return self._buffer_as_str(messages) + + @property + def buffer_as_messages(self) -> list[BaseMessage]: + """Exposes the buffer as a list of messages in case return_messages is False.""" + return self.chat_memory.messages + + async def abuffer_as_messages(self) -> list[BaseMessage]: + """Exposes the buffer as a list of messages in case return_messages is False.""" + return await self.chat_memory.aget_messages() + + @property + def memory_variables(self) -> list[str]: + """Will always return list of memory variables. + + :meta private: + """ + return [self.memory_key] + + @override + def load_memory_variables(self, inputs: dict[str, Any]) -> dict[str, Any]: + """Return history buffer.""" + return {self.memory_key: self.buffer} + + @override + async def aload_memory_variables(self, inputs: dict[str, Any]) -> dict[str, Any]: + """Return key-value pairs given the text input to the chain.""" + buffer = await self.abuffer() + return {self.memory_key: buffer} + + +@deprecated( + since="0.3.1", + removal="1.0.0", + message=( + "Please see the migration guide at: " + "https://python.langchain.com/docs/versions/migrating_memory/" + ), +) +class ConversationStringBufferMemory(BaseMemory): + """A basic memory implementation that simply stores the conversation history. + + This stores the entire conversation history in memory without any + additional processing. + + Equivalent to ConversationBufferMemory but tailored more specifically + for string-based conversations rather than chat models. + + Note that additional processing may be required in some situations when the + conversation history is too large to fit in the context window of the model. + """ + + human_prefix: str = "Human" + ai_prefix: str = "AI" + """Prefix to use for AI generated responses.""" + buffer: str = "" + output_key: str | None = None + input_key: str | None = None + memory_key: str = "history" #: :meta private: + + @pre_init + def validate_chains(cls, values: dict) -> dict: + """Validate that return messages is not True.""" + if values.get("return_messages", False): + msg = "return_messages must be False for ConversationStringBufferMemory" + raise ValueError(msg) + return values + + @property + def memory_variables(self) -> list[str]: + """Will always return list of memory variables. + + :meta private: + """ + return [self.memory_key] + + @override + def load_memory_variables(self, inputs: dict[str, Any]) -> dict[str, str]: + """Return history buffer.""" + return {self.memory_key: self.buffer} + + async def aload_memory_variables(self, inputs: dict[str, Any]) -> dict[str, str]: + """Return history buffer.""" + return self.load_memory_variables(inputs) + + def save_context(self, inputs: dict[str, Any], outputs: dict[str, str]) -> None: + """Save context from this conversation to buffer.""" + if self.input_key is None: + prompt_input_key = get_prompt_input_key(inputs, self.memory_variables) + else: + prompt_input_key = self.input_key + if self.output_key is None: + if len(outputs) != 1: + msg = f"One output key expected, got {outputs.keys()}" + raise ValueError(msg) + output_key = next(iter(outputs.keys())) + else: + output_key = self.output_key + human = f"{self.human_prefix}: " + inputs[prompt_input_key] + ai = f"{self.ai_prefix}: " + outputs[output_key] + self.buffer += f"\n{human}\n{ai}" + + async def asave_context( + self, + inputs: dict[str, Any], + outputs: dict[str, str], + ) -> None: + """Save context from this conversation to buffer.""" + return self.save_context(inputs, outputs) + + def clear(self) -> None: + """Clear memory contents.""" + self.buffer = "" + + @override + async def aclear(self) -> None: + self.clear() diff --git a/libs/langchain/langchain_classic/memory/buffer_window.py b/libs/langchain/langchain_classic/memory/buffer_window.py new file mode 100644 index 0000000000000..264a836caa44b --- /dev/null +++ b/libs/langchain/langchain_classic/memory/buffer_window.py @@ -0,0 +1,62 @@ +from typing import Any + +from langchain_core._api import deprecated +from langchain_core.messages import BaseMessage, get_buffer_string +from typing_extensions import override + +from langchain_classic.memory.chat_memory import BaseChatMemory + + +@deprecated( + since="0.3.1", + removal="1.0.0", + message=( + "Please see the migration guide at: " + "https://python.langchain.com/docs/versions/migrating_memory/" + ), +) +class ConversationBufferWindowMemory(BaseChatMemory): + """Use to keep track of the last k turns of a conversation. + + If the number of messages in the conversation is more than the maximum number + of messages to keep, the oldest messages are dropped. + """ + + human_prefix: str = "Human" + ai_prefix: str = "AI" + memory_key: str = "history" #: :meta private: + k: int = 5 + """Number of messages to store in buffer.""" + + @property + def buffer(self) -> str | list[BaseMessage]: + """String buffer of memory.""" + return self.buffer_as_messages if self.return_messages else self.buffer_as_str + + @property + def buffer_as_str(self) -> str: + """Exposes the buffer as a string in case return_messages is False.""" + messages = self.chat_memory.messages[-self.k * 2 :] if self.k > 0 else [] + return get_buffer_string( + messages, + human_prefix=self.human_prefix, + ai_prefix=self.ai_prefix, + ) + + @property + def buffer_as_messages(self) -> list[BaseMessage]: + """Exposes the buffer as a list of messages in case return_messages is True.""" + return self.chat_memory.messages[-self.k * 2 :] if self.k > 0 else [] + + @property + def memory_variables(self) -> list[str]: + """Will always return list of memory variables. + + :meta private: + """ + return [self.memory_key] + + @override + def load_memory_variables(self, inputs: dict[str, Any]) -> dict[str, Any]: + """Return history buffer.""" + return {self.memory_key: self.buffer} diff --git a/libs/langchain/langchain_classic/memory/chat_memory.py b/libs/langchain/langchain_classic/memory/chat_memory.py new file mode 100644 index 0000000000000..5a86a78024e30 --- /dev/null +++ b/libs/langchain/langchain_classic/memory/chat_memory.py @@ -0,0 +1,104 @@ +import warnings +from abc import ABC +from typing import Any + +from langchain_core._api import deprecated +from langchain_core.chat_history import ( + BaseChatMessageHistory, + InMemoryChatMessageHistory, +) +from langchain_core.memory import BaseMemory +from langchain_core.messages import AIMessage, HumanMessage +from pydantic import Field + +from langchain_classic.memory.utils import get_prompt_input_key + + +@deprecated( + since="0.3.1", + removal="1.0.0", + message=( + "Please see the migration guide at: " + "https://python.langchain.com/docs/versions/migrating_memory/" + ), +) +class BaseChatMemory(BaseMemory, ABC): + """Abstract base class for chat memory. + + **ATTENTION** This abstraction was created prior to when chat models had + native tool calling capabilities. + It does **NOT** support native tool calling capabilities for chat models and + will fail SILENTLY if used with a chat model that has native tool calling. + + DO NOT USE THIS ABSTRACTION FOR NEW CODE. + """ + + chat_memory: BaseChatMessageHistory = Field( + default_factory=InMemoryChatMessageHistory, + ) + output_key: str | None = None + input_key: str | None = None + return_messages: bool = False + + def _get_input_output( + self, + inputs: dict[str, Any], + outputs: dict[str, str], + ) -> tuple[str, str]: + if self.input_key is None: + prompt_input_key = get_prompt_input_key(inputs, self.memory_variables) + else: + prompt_input_key = self.input_key + if self.output_key is None: + if len(outputs) == 1: + output_key = next(iter(outputs.keys())) + elif "output" in outputs: + output_key = "output" + warnings.warn( + f"'{self.__class__.__name__}' got multiple output keys:" + f" {outputs.keys()}. The default 'output' key is being used." + f" If this is not desired, please manually set 'output_key'.", + stacklevel=3, + ) + else: + msg = ( + f"Got multiple output keys: {outputs.keys()}, cannot " + f"determine which to store in memory. Please set the " + f"'output_key' explicitly." + ) + raise ValueError(msg) + else: + output_key = self.output_key + return inputs[prompt_input_key], outputs[output_key] + + def save_context(self, inputs: dict[str, Any], outputs: dict[str, str]) -> None: + """Save context from this conversation to buffer.""" + input_str, output_str = self._get_input_output(inputs, outputs) + self.chat_memory.add_messages( + [ + HumanMessage(content=input_str), + AIMessage(content=output_str), + ], + ) + + async def asave_context( + self, + inputs: dict[str, Any], + outputs: dict[str, str], + ) -> None: + """Save context from this conversation to buffer.""" + input_str, output_str = self._get_input_output(inputs, outputs) + await self.chat_memory.aadd_messages( + [ + HumanMessage(content=input_str), + AIMessage(content=output_str), + ], + ) + + def clear(self) -> None: + """Clear memory contents.""" + self.chat_memory.clear() + + async def aclear(self) -> None: + """Clear memory contents.""" + await self.chat_memory.aclear() diff --git a/libs/langchain/langchain_classic/memory/entity.py b/libs/langchain/langchain_classic/memory/entity.py new file mode 100644 index 0000000000000..3b45140792df9 --- /dev/null +++ b/libs/langchain/langchain_classic/memory/entity.py @@ -0,0 +1,614 @@ +"""Deprecated as of LangChain v0.3.4 and will be removed in LangChain v1.0.0.""" + +import logging +from abc import ABC, abstractmethod +from collections.abc import Iterable +from itertools import islice +from typing import TYPE_CHECKING, Any + +from langchain_core._api import deprecated +from langchain_core.language_models import BaseLanguageModel +from langchain_core.messages import BaseMessage, get_buffer_string +from langchain_core.prompts import BasePromptTemplate +from pydantic import BaseModel, ConfigDict, Field +from typing_extensions import override + +from langchain_classic.chains.llm import LLMChain +from langchain_classic.memory.chat_memory import BaseChatMemory +from langchain_classic.memory.prompt import ( + ENTITY_EXTRACTION_PROMPT, + ENTITY_SUMMARIZATION_PROMPT, +) +from langchain_classic.memory.utils import get_prompt_input_key + +if TYPE_CHECKING: + import sqlite3 + +logger = logging.getLogger(__name__) + + +@deprecated( + since="0.3.1", + removal="1.0.0", + message=( + "Please see the migration guide at: " + "https://python.langchain.com/docs/versions/migrating_memory/" + ), +) +class BaseEntityStore(BaseModel, ABC): + """Abstract base class for Entity store.""" + + @abstractmethod + def get(self, key: str, default: str | None = None) -> str | None: + """Get entity value from store.""" + + @abstractmethod + def set(self, key: str, value: str | None) -> None: + """Set entity value in store.""" + + @abstractmethod + def delete(self, key: str) -> None: + """Delete entity value from store.""" + + @abstractmethod + def exists(self, key: str) -> bool: + """Check if entity exists in store.""" + + @abstractmethod + def clear(self) -> None: + """Delete all entities from store.""" + + +@deprecated( + since="0.3.1", + removal="1.0.0", + message=( + "Please see the migration guide at: " + "https://python.langchain.com/docs/versions/migrating_memory/" + ), +) +class InMemoryEntityStore(BaseEntityStore): + """In-memory Entity store.""" + + store: dict[str, str | None] = {} + + @override + def get(self, key: str, default: str | None = None) -> str | None: + return self.store.get(key, default) + + @override + def set(self, key: str, value: str | None) -> None: + self.store[key] = value + + @override + def delete(self, key: str) -> None: + del self.store[key] + + @override + def exists(self, key: str) -> bool: + return key in self.store + + @override + def clear(self) -> None: + return self.store.clear() + + +@deprecated( + since="0.3.1", + removal="1.0.0", + message=( + "Please see the migration guide at: " + "https://python.langchain.com/docs/versions/migrating_memory/" + ), +) +class UpstashRedisEntityStore(BaseEntityStore): + """Upstash Redis backed Entity store. + + Entities get a TTL of 1 day by default, and + that TTL is extended by 3 days every time the entity is read back. + """ + + def __init__( + self, + session_id: str = "default", + url: str = "", + token: str = "", + key_prefix: str = "memory_store", + ttl: int | None = 60 * 60 * 24, + recall_ttl: int | None = 60 * 60 * 24 * 3, + *args: Any, + **kwargs: Any, + ): + """Initializes the RedisEntityStore. + + Args: + session_id: Unique identifier for the session. + url: URL of the Redis server. + token: Authentication token for the Redis server. + key_prefix: Prefix for keys in the Redis store. + ttl: Time-to-live for keys in seconds (default 1 day). + recall_ttl: Time-to-live extension for keys when recalled (default 3 days). + *args: Additional positional arguments. + **kwargs: Additional keyword arguments. + """ + try: + from upstash_redis import Redis + except ImportError as e: + msg = ( + "Could not import upstash_redis python package. " + "Please install it with `pip install upstash_redis`." + ) + raise ImportError(msg) from e + + super().__init__(*args, **kwargs) + + try: + self.redis_client = Redis(url=url, token=token) + except Exception as exc: + error_msg = "Upstash Redis instance could not be initiated" + logger.exception(error_msg) + raise RuntimeError(error_msg) from exc + + self.session_id = session_id + self.key_prefix = key_prefix + self.ttl = ttl + self.recall_ttl = recall_ttl or ttl + + @property + def full_key_prefix(self) -> str: + """Returns the full key prefix with session ID.""" + return f"{self.key_prefix}:{self.session_id}" + + @override + def get(self, key: str, default: str | None = None) -> str | None: + res = ( + self.redis_client.getex(f"{self.full_key_prefix}:{key}", ex=self.recall_ttl) + or default + or "" + ) + logger.debug( + "Upstash Redis MEM get '%s:%s': '%s'", self.full_key_prefix, key, res + ) + return res + + @override + def set(self, key: str, value: str | None) -> None: + if not value: + return self.delete(key) + self.redis_client.set(f"{self.full_key_prefix}:{key}", value, ex=self.ttl) + logger.debug( + "Redis MEM set '%s:%s': '%s' EX %s", + self.full_key_prefix, + key, + value, + self.ttl, + ) + return None + + @override + def delete(self, key: str) -> None: + self.redis_client.delete(f"{self.full_key_prefix}:{key}") + + @override + def exists(self, key: str) -> bool: + return self.redis_client.exists(f"{self.full_key_prefix}:{key}") == 1 + + @override + def clear(self) -> None: + def scan_and_delete(cursor: int) -> int: + cursor, keys_to_delete = self.redis_client.scan( + cursor, + f"{self.full_key_prefix}:*", + ) + self.redis_client.delete(*keys_to_delete) + return cursor + + cursor = scan_and_delete(0) + while cursor != 0: + scan_and_delete(cursor) + + +@deprecated( + since="0.3.1", + removal="1.0.0", + message=( + "Please see the migration guide at: " + "https://python.langchain.com/docs/versions/migrating_memory/" + ), +) +class RedisEntityStore(BaseEntityStore): + """Redis-backed Entity store. + + Entities get a TTL of 1 day by default, and + that TTL is extended by 3 days every time the entity is read back. + """ + + redis_client: Any + session_id: str = "default" + key_prefix: str = "memory_store" + ttl: int | None = 60 * 60 * 24 + recall_ttl: int | None = 60 * 60 * 24 * 3 + + def __init__( + self, + session_id: str = "default", + url: str = "redis://localhost:6379/0", + key_prefix: str = "memory_store", + ttl: int | None = 60 * 60 * 24, + recall_ttl: int | None = 60 * 60 * 24 * 3, + *args: Any, + **kwargs: Any, + ): + """Initializes the RedisEntityStore. + + Args: + session_id: Unique identifier for the session. + url: URL of the Redis server. + key_prefix: Prefix for keys in the Redis store. + ttl: Time-to-live for keys in seconds (default 1 day). + recall_ttl: Time-to-live extension for keys when recalled (default 3 days). + *args: Additional positional arguments. + **kwargs: Additional keyword arguments. + """ + try: + import redis + except ImportError as e: + msg = ( + "Could not import redis python package. " + "Please install it with `pip install redis`." + ) + raise ImportError(msg) from e + + super().__init__(*args, **kwargs) + + try: + from langchain_community.utilities.redis import get_client + except ImportError as e: + msg = ( + "Could not import langchain_community.utilities.redis.get_client. " + "Please install it with `pip install langchain-community`." + ) + raise ImportError(msg) from e + + try: + self.redis_client = get_client(redis_url=url, decode_responses=True) + except redis.exceptions.ConnectionError: + logger.exception("Redis client could not connect") + + self.session_id = session_id + self.key_prefix = key_prefix + self.ttl = ttl + self.recall_ttl = recall_ttl or ttl + + @property + def full_key_prefix(self) -> str: + """Returns the full key prefix with session ID.""" + return f"{self.key_prefix}:{self.session_id}" + + @override + def get(self, key: str, default: str | None = None) -> str | None: + res = ( + self.redis_client.getex(f"{self.full_key_prefix}:{key}", ex=self.recall_ttl) + or default + or "" + ) + logger.debug("REDIS MEM get '%s:%s': '%s'", self.full_key_prefix, key, res) + return res + + @override + def set(self, key: str, value: str | None) -> None: + if not value: + return self.delete(key) + self.redis_client.set(f"{self.full_key_prefix}:{key}", value, ex=self.ttl) + logger.debug( + "REDIS MEM set '%s:%s': '%s' EX %s", + self.full_key_prefix, + key, + value, + self.ttl, + ) + return None + + @override + def delete(self, key: str) -> None: + self.redis_client.delete(f"{self.full_key_prefix}:{key}") + + @override + def exists(self, key: str) -> bool: + return self.redis_client.exists(f"{self.full_key_prefix}:{key}") == 1 + + @override + def clear(self) -> None: + # iterate a list in batches of size batch_size + def batched(iterable: Iterable[Any], batch_size: int) -> Iterable[Any]: + iterator = iter(iterable) + while batch := list(islice(iterator, batch_size)): + yield batch + + for keybatch in batched( + self.redis_client.scan_iter(f"{self.full_key_prefix}:*"), + 500, + ): + self.redis_client.delete(*keybatch) + + +@deprecated( + since="0.3.1", + removal="1.0.0", + message=( + "Please see the migration guide at: " + "https://python.langchain.com/docs/versions/migrating_memory/" + ), +) +class SQLiteEntityStore(BaseEntityStore): + """SQLite-backed Entity store with safe query construction.""" + + session_id: str = "default" + table_name: str = "memory_store" + conn: Any = None + + model_config = ConfigDict( + arbitrary_types_allowed=True, + ) + + def __init__( + self, + session_id: str = "default", + db_file: str = "entities.db", + table_name: str = "memory_store", + *args: Any, + **kwargs: Any, + ): + """Initializes the SQLiteEntityStore. + + Args: + session_id: Unique identifier for the session. + db_file: Path to the SQLite database file. + table_name: Name of the table to store entities. + *args: Additional positional arguments. + **kwargs: Additional keyword arguments. + """ + super().__init__(*args, **kwargs) + try: + import sqlite3 + except ImportError as e: + msg = ( + "Could not import sqlite3 python package. " + "Please install it with `pip install sqlite3`." + ) + raise ImportError(msg) from e + + # Basic validation to prevent obviously malicious table/session names + if not table_name.isidentifier() or not session_id.isidentifier(): + # Since we validate here, we can safely suppress the S608 bandit warning + msg = "Table name and session ID must be valid Python identifiers." + raise ValueError(msg) + + self.conn = sqlite3.connect(db_file) + self.session_id = session_id + self.table_name = table_name + self._create_table_if_not_exists() + + @property + def full_table_name(self) -> str: + """Returns the full table name with session ID.""" + return f"{self.table_name}_{self.session_id}" + + def _execute_query(self, query: str, params: tuple = ()) -> "sqlite3.Cursor": + """Executes a query with proper connection handling.""" + with self.conn: + return self.conn.execute(query, params) + + def _create_table_if_not_exists(self) -> None: + """Creates the entity table if it doesn't exist, using safe quoting.""" + # Use standard SQL double quotes for the table name identifier + create_table_query = f""" + CREATE TABLE IF NOT EXISTS "{self.full_table_name}" ( + key TEXT PRIMARY KEY, + value TEXT + ) + """ + self._execute_query(create_table_query) + + def get(self, key: str, default: str | None = None) -> str | None: + """Retrieves a value, safely quoting the table name.""" + # `?` placeholder is used for the value to prevent SQL injection + # Ignore S608 since we validate for malicious table/session names in `__init__` + query = f'SELECT value FROM "{self.full_table_name}" WHERE key = ?' # noqa: S608 + cursor = self._execute_query(query, (key,)) + result = cursor.fetchone() + return result[0] if result is not None else default + + def set(self, key: str, value: str | None) -> None: + """Inserts or replaces a value, safely quoting the table name.""" + if not value: + return self.delete(key) + # Ignore S608 since we validate for malicious table/session names in `__init__` + query = ( + "INSERT OR REPLACE INTO " # noqa: S608 + f'"{self.full_table_name}" (key, value) VALUES (?, ?)' + ) + self._execute_query(query, (key, value)) + return None + + def delete(self, key: str) -> None: + """Deletes a key-value pair, safely quoting the table name.""" + # Ignore S608 since we validate for malicious table/session names in `__init__` + query = f'DELETE FROM "{self.full_table_name}" WHERE key = ?' # noqa: S608 + self._execute_query(query, (key,)) + + def exists(self, key: str) -> bool: + """Checks for the existence of a key, safely quoting the table name.""" + # Ignore S608 since we validate for malicious table/session names in `__init__` + query = f'SELECT 1 FROM "{self.full_table_name}" WHERE key = ? LIMIT 1' # noqa: S608 + cursor = self._execute_query(query, (key,)) + return cursor.fetchone() is not None + + @override + def clear(self) -> None: + # Ignore S608 since we validate for malicious table/session names in `__init__` + query = f""" + DELETE FROM {self.full_table_name} + """ # noqa: S608 + with self.conn: + self.conn.execute(query) + + +@deprecated( + since="0.3.1", + removal="1.0.0", + message=( + "Please see the migration guide at: " + "https://python.langchain.com/docs/versions/migrating_memory/" + ), +) +class ConversationEntityMemory(BaseChatMemory): + """Entity extractor & summarizer memory. + + Extracts named entities from the recent chat history and generates summaries. + With a swappable entity store, persisting entities across conversations. + Defaults to an in-memory entity store, and can be swapped out for a Redis, + SQLite, or other entity store. + """ + + human_prefix: str = "Human" + ai_prefix: str = "AI" + llm: BaseLanguageModel + entity_extraction_prompt: BasePromptTemplate = ENTITY_EXTRACTION_PROMPT + entity_summarization_prompt: BasePromptTemplate = ENTITY_SUMMARIZATION_PROMPT + + # Cache of recently detected entity names, if any + # It is updated when load_memory_variables is called: + entity_cache: list[str] = [] + + # Number of recent message pairs to consider when updating entities: + k: int = 3 + + chat_history_key: str = "history" + + # Store to manage entity-related data: + entity_store: BaseEntityStore = Field(default_factory=InMemoryEntityStore) + + @property + def buffer(self) -> list[BaseMessage]: + """Access chat memory messages.""" + return self.chat_memory.messages + + @property + def memory_variables(self) -> list[str]: + """Will always return list of memory variables. + + :meta private: + """ + return ["entities", self.chat_history_key] + + def load_memory_variables(self, inputs: dict[str, Any]) -> dict[str, Any]: + """Load memory variables. + + Returns chat history and all generated entities with summaries if available, + and updates or clears the recent entity cache. + + New entity name can be found when calling this method, before the entity + summaries are generated, so the entity cache values may be empty if no entity + descriptions are generated yet. + """ + # Create an LLMChain for predicting entity names from the recent chat history: + chain = LLMChain(llm=self.llm, prompt=self.entity_extraction_prompt) + + if self.input_key is None: + prompt_input_key = get_prompt_input_key(inputs, self.memory_variables) + else: + prompt_input_key = self.input_key + + # Extract an arbitrary window of the last message pairs from + # the chat history, where the hyperparameter k is the + # number of message pairs: + buffer_string = get_buffer_string( + self.buffer[-self.k * 2 :], + human_prefix=self.human_prefix, + ai_prefix=self.ai_prefix, + ) + + # Generates a comma-separated list of named entities, + # e.g. "Jane, White House, UFO" + # or "NONE" if no named entities are extracted: + output = chain.predict( + history=buffer_string, + input=inputs[prompt_input_key], + ) + + # If no named entities are extracted, assigns an empty list. + if output.strip() == "NONE": + entities = [] + else: + # Make a list of the extracted entities: + entities = [w.strip() for w in output.split(",")] + + # Make a dictionary of entities with summary if exists: + entity_summaries = {} + + for entity in entities: + entity_summaries[entity] = self.entity_store.get(entity, "") + + # Replaces the entity name cache with the most recently discussed entities, + # or if no entities were extracted, clears the cache: + self.entity_cache = entities + + # Should we return as message objects or as a string? + if self.return_messages: + # Get last `k` pair of chat messages: + buffer: Any = self.buffer[-self.k * 2 :] + else: + # Reuse the string we made earlier: + buffer = buffer_string + + return { + self.chat_history_key: buffer, + "entities": entity_summaries, + } + + def save_context(self, inputs: dict[str, Any], outputs: dict[str, str]) -> None: + """Save context from this conversation history to the entity store. + + Generates a summary for each entity in the entity cache by prompting + the model, and saves these summaries to the entity store. + """ + super().save_context(inputs, outputs) + + if self.input_key is None: + prompt_input_key = get_prompt_input_key(inputs, self.memory_variables) + else: + prompt_input_key = self.input_key + + # Extract an arbitrary window of the last message pairs from + # the chat history, where the hyperparameter k is the + # number of message pairs: + buffer_string = get_buffer_string( + self.buffer[-self.k * 2 :], + human_prefix=self.human_prefix, + ai_prefix=self.ai_prefix, + ) + + input_data = inputs[prompt_input_key] + + # Create an LLMChain for predicting entity summarization from the context + chain = LLMChain(llm=self.llm, prompt=self.entity_summarization_prompt) + + # Generate new summaries for entities and save them in the entity store + for entity in self.entity_cache: + # Get existing summary if it exists + existing_summary = self.entity_store.get(entity, "") + output = chain.predict( + summary=existing_summary, + entity=entity, + history=buffer_string, + input=input_data, + ) + # Save the updated summary to the entity store + self.entity_store.set(entity, output.strip()) + + def clear(self) -> None: + """Clear memory contents.""" + self.chat_memory.clear() + self.entity_cache.clear() + self.entity_store.clear() diff --git a/libs/langchain/langchain_classic/memory/summary.py b/libs/langchain/langchain_classic/memory/summary.py new file mode 100644 index 0000000000000..5b2ed54e56a70 --- /dev/null +++ b/libs/langchain/langchain_classic/memory/summary.py @@ -0,0 +1,171 @@ +from __future__ import annotations + +from typing import Any + +from langchain_core._api import deprecated +from langchain_core.chat_history import BaseChatMessageHistory +from langchain_core.language_models import BaseLanguageModel +from langchain_core.messages import BaseMessage, SystemMessage, get_buffer_string +from langchain_core.prompts import BasePromptTemplate +from langchain_core.utils import pre_init +from pydantic import BaseModel +from typing_extensions import override + +from langchain_classic.chains.llm import LLMChain +from langchain_classic.memory.chat_memory import BaseChatMemory +from langchain_classic.memory.prompt import SUMMARY_PROMPT + + +@deprecated( + since="0.2.12", + removal="1.0", + message=( + "Refer here for how to incorporate summaries of conversation history: " + "https://langchain-ai.github.io/langgraph/how-tos/memory/add-summary-conversation-history/" + ), +) +class SummarizerMixin(BaseModel): + """Mixin for summarizer.""" + + human_prefix: str = "Human" + ai_prefix: str = "AI" + llm: BaseLanguageModel + prompt: BasePromptTemplate = SUMMARY_PROMPT + summary_message_cls: type[BaseMessage] = SystemMessage + + def predict_new_summary( + self, + messages: list[BaseMessage], + existing_summary: str, + ) -> str: + """Predict a new summary based on the messages and existing summary. + + Args: + messages: List of messages to summarize. + existing_summary: Existing summary to build upon. + + Returns: + A new summary string. + """ + new_lines = get_buffer_string( + messages, + human_prefix=self.human_prefix, + ai_prefix=self.ai_prefix, + ) + + chain = LLMChain(llm=self.llm, prompt=self.prompt) + return chain.predict(summary=existing_summary, new_lines=new_lines) + + async def apredict_new_summary( + self, + messages: list[BaseMessage], + existing_summary: str, + ) -> str: + """Predict a new summary based on the messages and existing summary. + + Args: + messages: List of messages to summarize. + existing_summary: Existing summary to build upon. + + Returns: + A new summary string. + """ + new_lines = get_buffer_string( + messages, + human_prefix=self.human_prefix, + ai_prefix=self.ai_prefix, + ) + + chain = LLMChain(llm=self.llm, prompt=self.prompt) + return await chain.apredict(summary=existing_summary, new_lines=new_lines) + + +@deprecated( + since="0.3.1", + removal="1.0.0", + message=( + "Please see the migration guide at: " + "https://python.langchain.com/docs/versions/migrating_memory/" + ), +) +class ConversationSummaryMemory(BaseChatMemory, SummarizerMixin): + """Continually summarizes the conversation history. + + The summary is updated after each conversation turn. + The implementations returns a summary of the conversation history which + can be used to provide context to the model. + """ + + buffer: str = "" + memory_key: str = "history" #: :meta private: + + @classmethod + def from_messages( + cls, + llm: BaseLanguageModel, + chat_memory: BaseChatMessageHistory, + *, + summarize_step: int = 2, + **kwargs: Any, + ) -> ConversationSummaryMemory: + """Create a ConversationSummaryMemory from a list of messages. + + Args: + llm: The language model to use for summarization. + chat_memory: The chat history to summarize. + summarize_step: Number of messages to summarize at a time. + **kwargs: Additional keyword arguments to pass to the class. + + Returns: + An instance of ConversationSummaryMemory with the summarized history. + """ + obj = cls(llm=llm, chat_memory=chat_memory, **kwargs) + for i in range(0, len(obj.chat_memory.messages), summarize_step): + obj.buffer = obj.predict_new_summary( + obj.chat_memory.messages[i : i + summarize_step], + obj.buffer, + ) + return obj + + @property + def memory_variables(self) -> list[str]: + """Will always return list of memory variables. + + :meta private: + """ + return [self.memory_key] + + @override + def load_memory_variables(self, inputs: dict[str, Any]) -> dict[str, Any]: + """Return history buffer.""" + if self.return_messages: + buffer: Any = [self.summary_message_cls(content=self.buffer)] + else: + buffer = self.buffer + return {self.memory_key: buffer} + + @pre_init + def validate_prompt_input_variables(cls, values: dict) -> dict: + """Validate that prompt input variables are consistent.""" + prompt_variables = values["prompt"].input_variables + expected_keys = {"summary", "new_lines"} + if expected_keys != set(prompt_variables): + msg = ( + "Got unexpected prompt input variables. The prompt expects " + f"{prompt_variables}, but it should have {expected_keys}." + ) + raise ValueError(msg) + return values + + def save_context(self, inputs: dict[str, Any], outputs: dict[str, str]) -> None: + """Save context from this conversation to buffer.""" + super().save_context(inputs, outputs) + self.buffer = self.predict_new_summary( + self.chat_memory.messages[-2:], + self.buffer, + ) + + def clear(self) -> None: + """Clear memory contents.""" + super().clear() + self.buffer = "" diff --git a/libs/langchain/langchain_classic/memory/summary_buffer.py b/libs/langchain/langchain_classic/memory/summary_buffer.py new file mode 100644 index 0000000000000..fffcceb27bb4f --- /dev/null +++ b/libs/langchain/langchain_classic/memory/summary_buffer.py @@ -0,0 +1,151 @@ +from typing import Any + +from langchain_core._api import deprecated +from langchain_core.messages import BaseMessage, get_buffer_string +from langchain_core.utils import pre_init +from typing_extensions import override + +from langchain_classic.memory.chat_memory import BaseChatMemory +from langchain_classic.memory.summary import SummarizerMixin + + +@deprecated( + since="0.3.1", + removal="1.0.0", + message=( + "Please see the migration guide at: " + "https://python.langchain.com/docs/versions/migrating_memory/" + ), +) +class ConversationSummaryBufferMemory(BaseChatMemory, SummarizerMixin): + """Buffer with summarizer for storing conversation memory. + + Provides a running summary of the conversation together with the most recent + messages in the conversation under the constraint that the total number of + tokens in the conversation does not exceed a certain limit. + """ + + max_token_limit: int = 2000 + moving_summary_buffer: str = "" + memory_key: str = "history" + + @property + def buffer(self) -> str | list[BaseMessage]: + """String buffer of memory.""" + return self.load_memory_variables({})[self.memory_key] + + async def abuffer(self) -> str | list[BaseMessage]: + """Async memory buffer.""" + memory_variables = await self.aload_memory_variables({}) + return memory_variables[self.memory_key] + + @property + def memory_variables(self) -> list[str]: + """Will always return list of memory variables. + + :meta private: + """ + return [self.memory_key] + + @override + def load_memory_variables(self, inputs: dict[str, Any]) -> dict[str, Any]: + """Return history buffer.""" + buffer = self.chat_memory.messages + if self.moving_summary_buffer != "": + first_messages: list[BaseMessage] = [ + self.summary_message_cls(content=self.moving_summary_buffer), + ] + buffer = first_messages + buffer + if self.return_messages: + final_buffer: Any = buffer + else: + final_buffer = get_buffer_string( + buffer, + human_prefix=self.human_prefix, + ai_prefix=self.ai_prefix, + ) + return {self.memory_key: final_buffer} + + @override + async def aload_memory_variables(self, inputs: dict[str, Any]) -> dict[str, Any]: + """Asynchronously return key-value pairs given the text input to the chain.""" + buffer = await self.chat_memory.aget_messages() + if self.moving_summary_buffer != "": + first_messages: list[BaseMessage] = [ + self.summary_message_cls(content=self.moving_summary_buffer), + ] + buffer = first_messages + buffer + if self.return_messages: + final_buffer: Any = buffer + else: + final_buffer = get_buffer_string( + buffer, + human_prefix=self.human_prefix, + ai_prefix=self.ai_prefix, + ) + return {self.memory_key: final_buffer} + + @pre_init + def validate_prompt_input_variables(cls, values: dict) -> dict: + """Validate that prompt input variables are consistent.""" + prompt_variables = values["prompt"].input_variables + expected_keys = {"summary", "new_lines"} + if expected_keys != set(prompt_variables): + msg = ( + "Got unexpected prompt input variables. The prompt expects " + f"{prompt_variables}, but it should have {expected_keys}." + ) + raise ValueError(msg) + return values + + def save_context(self, inputs: dict[str, Any], outputs: dict[str, str]) -> None: + """Save context from this conversation to buffer.""" + super().save_context(inputs, outputs) + self.prune() + + async def asave_context( + self, + inputs: dict[str, Any], + outputs: dict[str, str], + ) -> None: + """Asynchronously save context from this conversation to buffer.""" + await super().asave_context(inputs, outputs) + await self.aprune() + + def prune(self) -> None: + """Prune buffer if it exceeds max token limit.""" + buffer = self.chat_memory.messages + curr_buffer_length = self.llm.get_num_tokens_from_messages(buffer) + if curr_buffer_length > self.max_token_limit: + pruned_memory = [] + while curr_buffer_length > self.max_token_limit: + pruned_memory.append(buffer.pop(0)) + curr_buffer_length = self.llm.get_num_tokens_from_messages(buffer) + self.moving_summary_buffer = self.predict_new_summary( + pruned_memory, + self.moving_summary_buffer, + ) + + async def aprune(self) -> None: + """Asynchronously prune buffer if it exceeds max token limit.""" + buffer = self.chat_memory.messages + curr_buffer_length = self.llm.get_num_tokens_from_messages(buffer) + if curr_buffer_length > self.max_token_limit: + pruned_memory = [] + while curr_buffer_length > self.max_token_limit: + pruned_memory.append(buffer.pop(0)) + curr_buffer_length = self.llm.get_num_tokens_from_messages(buffer) + self.moving_summary_buffer = await self.apredict_new_summary( + pruned_memory, + self.moving_summary_buffer, + ) + + def clear(self) -> None: + """Clear memory contents.""" + super().clear() + self.moving_summary_buffer = "" + + async def aclear(self) -> None: + """Asynchronously clear memory contents.""" + await super().aclear() + self.moving_summary_buffer = "" diff --git a/libs/langchain/langchain_classic/memory/token_buffer.py b/libs/langchain/langchain_classic/memory/token_buffer.py new file mode 100644 index 0000000000000..caa0f78bcefad --- /dev/null +++ b/libs/langchain/langchain_classic/memory/token_buffer.py @@ -0,0 +1,74 @@ +from typing import Any + +from langchain_core._api import deprecated +from langchain_core.language_models import BaseLanguageModel +from langchain_core.messages import BaseMessage, get_buffer_string +from typing_extensions import override + +from langchain_classic.memory.chat_memory import BaseChatMemory + + +@deprecated( + since="0.3.1", + removal="1.0.0", + message=( + "Please see the migration guide at: " + "https://python.langchain.com/docs/versions/migrating_memory/" + ), +) +class ConversationTokenBufferMemory(BaseChatMemory): + """Conversation chat memory with token limit. + + Keeps only the most recent messages in the conversation under the constraint + that the total number of tokens in the conversation does not exceed a certain limit. + """ + + human_prefix: str = "Human" + ai_prefix: str = "AI" + llm: BaseLanguageModel + memory_key: str = "history" + max_token_limit: int = 2000 + + @property + def buffer(self) -> Any: + """String buffer of memory.""" + return self.buffer_as_messages if self.return_messages else self.buffer_as_str + + @property + def buffer_as_str(self) -> str: + """Exposes the buffer as a string in case return_messages is False.""" + return get_buffer_string( + self.chat_memory.messages, + human_prefix=self.human_prefix, + ai_prefix=self.ai_prefix, + ) + + @property + def buffer_as_messages(self) -> list[BaseMessage]: + """Exposes the buffer as a list of messages in case return_messages is True.""" + return self.chat_memory.messages + + @property + def memory_variables(self) -> list[str]: + """Will always return list of memory variables. + + :meta private: + """ + return [self.memory_key] + + @override + def load_memory_variables(self, inputs: dict[str, Any]) -> dict[str, Any]: + """Return history buffer.""" + return {self.memory_key: self.buffer} + + def save_context(self, inputs: dict[str, Any], outputs: dict[str, str]) -> None: + """Save context from this conversation to buffer. Pruned.""" + super().save_context(inputs, outputs) + # Prune buffer if it exceeds max token limit + buffer = self.chat_memory.messages + curr_buffer_length = self.llm.get_num_tokens_from_messages(buffer) + if curr_buffer_length > self.max_token_limit: + pruned_memory = [] + while curr_buffer_length > self.max_token_limit: + pruned_memory.append(buffer.pop(0)) + curr_buffer_length = self.llm.get_num_tokens_from_messages(buffer) diff --git a/libs/langchain/langchain_classic/memory/vectorstore.py b/libs/langchain/langchain_classic/memory/vectorstore.py new file mode 100644 index 0000000000000..ce09cb3a3aa01 --- /dev/null +++ b/libs/langchain/langchain_classic/memory/vectorstore.py @@ -0,0 +1,122 @@ +"""Class for a VectorStore-backed memory object.""" + +from collections.abc import Sequence +from typing import Any + +from langchain_core._api import deprecated +from langchain_core.documents import Document +from langchain_core.memory import BaseMemory +from langchain_core.vectorstores import VectorStoreRetriever +from pydantic import Field + +from langchain_classic.memory.utils import get_prompt_input_key + + +@deprecated( + since="0.3.1", + removal="1.0.0", + message=( + "Please see the migration guide at: " + "https://python.langchain.com/docs/versions/migrating_memory/" + ), +) +class VectorStoreRetrieverMemory(BaseMemory): + """Vector Store Retriever Memory. + + Store the conversation history in a vector store and retrieves the relevant + parts of past conversation based on the input. + """ + + retriever: VectorStoreRetriever = Field(exclude=True) + """VectorStoreRetriever object to connect to.""" + + memory_key: str = "history" #: :meta private: + """Key name to locate the memories in the result of load_memory_variables.""" + + input_key: str | None = None + """Key name to index the inputs to load_memory_variables.""" + + return_docs: bool = False + """Whether or not to return the result of querying the database directly.""" + + exclude_input_keys: Sequence[str] = Field(default_factory=tuple) + """Input keys to exclude in addition to memory key when constructing the document""" + + @property + def memory_variables(self) -> list[str]: + """The list of keys emitted from the load_memory_variables method.""" + return [self.memory_key] + + def _get_prompt_input_key(self, inputs: dict[str, Any]) -> str: + """Get the input key for the prompt.""" + if self.input_key is None: + return get_prompt_input_key(inputs, self.memory_variables) + return self.input_key + + def _documents_to_memory_variables( + self, + docs: list[Document], + ) -> dict[str, list[Document] | str]: + result: list[Document] | str + if not self.return_docs: + result = "\n".join([doc.page_content for doc in docs]) + else: + result = docs + return {self.memory_key: result} + + def load_memory_variables( + self, + inputs: dict[str, Any], + ) -> dict[str, list[Document] | str]: + """Return history buffer.""" + input_key = self._get_prompt_input_key(inputs) + query = inputs[input_key] + docs = self.retriever.invoke(query) + return self._documents_to_memory_variables(docs) + + async def aload_memory_variables( + self, + inputs: dict[str, Any], + ) -> dict[str, list[Document] | str]: + """Return history buffer.""" + input_key = self._get_prompt_input_key(inputs) + query = inputs[input_key] + docs = await self.retriever.ainvoke(query) + return self._documents_to_memory_variables(docs) + + def _form_documents( + self, + inputs: dict[str, Any], + outputs: dict[str, str], + ) -> list[Document]: + """Format context from this conversation to buffer.""" + # Each document should only include the current turn, not the chat history + exclude = set(self.exclude_input_keys) + exclude.add(self.memory_key) + filtered_inputs = {k: v for k, v in inputs.items() if k not in exclude} + texts = [ + f"{k}: {v}" + for k, v in list(filtered_inputs.items()) + list(outputs.items()) + ] + page_content = "\n".join(texts) + return [Document(page_content=page_content)] + + def save_context(self, inputs: dict[str, Any], outputs: dict[str, str]) -> None: + """Save context from this conversation to buffer.""" + documents = self._form_documents(inputs, outputs) + self.retriever.add_documents(documents) + + async def asave_context( + self, + inputs: dict[str, Any], + outputs: dict[str, str], + ) -> None: + """Save context from this conversation to buffer.""" + documents = self._form_documents(inputs, outputs) + await self.retriever.aadd_documents(documents) + + def clear(self) -> None: + """Nothing to clear.""" + + async def aclear(self) -> None: + """Nothing to clear.""" From 2ff685f353c44aef6d46e1a061beedb2af4d3a74 Mon Sep 17 00:00:00 2001 From: Chester Curme Date: Wed, 15 Oct 2025 12:30:17 -0400 Subject: [PATCH 4/8] move BaseMemory to langchain-classic --- .../memory.py => langchain/langchain_classic/memory/base.py} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename libs/{core/langchain_core/memory.py => langchain/langchain_classic/memory/base.py} (100%) diff --git a/libs/core/langchain_core/memory.py b/libs/langchain/langchain_classic/memory/base.py similarity index 100% rename from libs/core/langchain_core/memory.py rename to libs/langchain/langchain_classic/memory/base.py From 68aa9123a26c1de259816a218aa1da1b23286214 Mon Sep 17 00:00:00 2001 From: Chester Curme Date: Wed, 15 Oct 2025 12:30:32 -0400 Subject: [PATCH 5/8] un-deprecate --- libs/langchain/langchain_classic/memory/base.py | 9 --------- 1 file changed, 9 deletions(-) diff --git a/libs/langchain/langchain_classic/memory/base.py b/libs/langchain/langchain_classic/memory/base.py index ec50cc01e0725..c3316d4d95df3 100644 --- a/libs/langchain/langchain_classic/memory/base.py +++ b/libs/langchain/langchain_classic/memory/base.py @@ -12,19 +12,10 @@ from pydantic import ConfigDict -from langchain_core._api import deprecated from langchain_core.load.serializable import Serializable from langchain_core.runnables import run_in_executor -@deprecated( - since="0.3.3", - removal="1.0.0", - message=( - "Please see the migration guide at: " - "https://python.langchain.com/docs/versions/migrating_memory/" - ), -) class BaseMemory(Serializable, ABC): """Abstract base class for memory in Chains. From 6cdda859d4aa3fd8f7c53ae80920c875e79745e9 Mon Sep 17 00:00:00 2001 From: Chester Curme Date: Wed, 15 Oct 2025 12:42:09 -0400 Subject: [PATCH 6/8] update imports --- .../conversational_retrieval/openai_functions.py | 2 +- libs/langchain/langchain_classic/chains/base.py | 2 +- libs/langchain/langchain_classic/chains/conversation/base.py | 2 +- libs/langchain/langchain_classic/memory/base.py | 3 +-- libs/langchain/langchain_classic/memory/buffer.py | 2 +- libs/langchain/langchain_classic/memory/chat_memory.py | 2 +- libs/langchain/langchain_classic/memory/combined.py | 2 +- libs/langchain/langchain_classic/memory/readonly.py | 2 +- libs/langchain/langchain_classic/memory/simple.py | 3 ++- libs/langchain/langchain_classic/memory/vectorstore.py | 2 +- libs/langchain/langchain_classic/schema/__init__.py | 3 ++- libs/langchain/langchain_classic/schema/memory.py | 2 +- libs/langchain/tests/unit_tests/chains/test_base.py | 2 +- libs/langchain/tests/unit_tests/chains/test_conversation.py | 2 +- libs/langchain/tests/unit_tests/chains/test_memory.py | 2 +- 15 files changed, 17 insertions(+), 16 deletions(-) diff --git a/libs/langchain/langchain_classic/agents/agent_toolkits/conversational_retrieval/openai_functions.py b/libs/langchain/langchain_classic/agents/agent_toolkits/conversational_retrieval/openai_functions.py index e95a233ec2afb..9d6f0f1c2e43d 100644 --- a/libs/langchain/langchain_classic/agents/agent_toolkits/conversational_retrieval/openai_functions.py +++ b/libs/langchain/langchain_classic/agents/agent_toolkits/conversational_retrieval/openai_functions.py @@ -1,7 +1,6 @@ from typing import Any from langchain_core.language_models import BaseLanguageModel -from langchain_core.memory import BaseMemory from langchain_core.messages import SystemMessage from langchain_core.prompts.chat import MessagesPlaceholder from langchain_core.tools import BaseTool @@ -11,6 +10,7 @@ AgentTokenBufferMemory, ) from langchain_classic.agents.openai_functions_agent.base import OpenAIFunctionsAgent +from langchain_classic.memory.base import BaseMemory from langchain_classic.memory.token_buffer import ConversationTokenBufferMemory diff --git a/libs/langchain/langchain_classic/chains/base.py b/libs/langchain/langchain_classic/chains/base.py index 6b9934a7912dd..c7b63ce320991 100644 --- a/libs/langchain/langchain_classic/chains/base.py +++ b/libs/langchain/langchain_classic/chains/base.py @@ -20,7 +20,6 @@ CallbackManagerForChainRun, Callbacks, ) -from langchain_core.memory import BaseMemory from langchain_core.outputs import RunInfo from langchain_core.runnables import ( RunnableConfig, @@ -38,6 +37,7 @@ ) from typing_extensions import override +from langchain_classic.memory.base import BaseMemory from langchain_classic.schema import RUN_KEY logger = logging.getLogger(__name__) diff --git a/libs/langchain/langchain_classic/chains/conversation/base.py b/libs/langchain/langchain_classic/chains/conversation/base.py index f1c403787deb0..26b2728d32743 100644 --- a/libs/langchain/langchain_classic/chains/conversation/base.py +++ b/libs/langchain/langchain_classic/chains/conversation/base.py @@ -1,13 +1,13 @@ """Chain that carries on a conversation and calls an LLM.""" from langchain_core._api import deprecated -from langchain_core.memory import BaseMemory from langchain_core.prompts import BasePromptTemplate from pydantic import ConfigDict, Field, model_validator from typing_extensions import Self, override from langchain_classic.chains.conversation.prompt import PROMPT from langchain_classic.chains.llm import LLMChain +from langchain_classic.memory.base import BaseMemory from langchain_classic.memory.buffer import ConversationBufferMemory diff --git a/libs/langchain/langchain_classic/memory/base.py b/libs/langchain/langchain_classic/memory/base.py index c3316d4d95df3..2b3c705a23421 100644 --- a/libs/langchain/langchain_classic/memory/base.py +++ b/libs/langchain/langchain_classic/memory/base.py @@ -10,10 +10,9 @@ from abc import ABC, abstractmethod from typing import Any -from pydantic import ConfigDict - from langchain_core.load.serializable import Serializable from langchain_core.runnables import run_in_executor +from pydantic import ConfigDict class BaseMemory(Serializable, ABC): diff --git a/libs/langchain/langchain_classic/memory/buffer.py b/libs/langchain/langchain_classic/memory/buffer.py index c356b70da08c3..a328458854d74 100644 --- a/libs/langchain/langchain_classic/memory/buffer.py +++ b/libs/langchain/langchain_classic/memory/buffer.py @@ -1,11 +1,11 @@ from typing import Any from langchain_core._api import deprecated -from langchain_core.memory import BaseMemory from langchain_core.messages import BaseMessage, get_buffer_string from langchain_core.utils import pre_init from typing_extensions import override +from langchain_classic.memory.base import BaseMemory from langchain_classic.memory.chat_memory import BaseChatMemory from langchain_classic.memory.utils import get_prompt_input_key diff --git a/libs/langchain/langchain_classic/memory/chat_memory.py b/libs/langchain/langchain_classic/memory/chat_memory.py index 5a86a78024e30..d624a42997366 100644 --- a/libs/langchain/langchain_classic/memory/chat_memory.py +++ b/libs/langchain/langchain_classic/memory/chat_memory.py @@ -7,10 +7,10 @@ BaseChatMessageHistory, InMemoryChatMessageHistory, ) -from langchain_core.memory import BaseMemory from langchain_core.messages import AIMessage, HumanMessage from pydantic import Field +from langchain_classic.memory.base import BaseMemory from langchain_classic.memory.utils import get_prompt_input_key diff --git a/libs/langchain/langchain_classic/memory/combined.py b/libs/langchain/langchain_classic/memory/combined.py index b19c97edec17b..be60214d1005d 100644 --- a/libs/langchain/langchain_classic/memory/combined.py +++ b/libs/langchain/langchain_classic/memory/combined.py @@ -1,9 +1,9 @@ import warnings from typing import Any -from langchain_core.memory import BaseMemory from pydantic import field_validator +from langchain_classic.memory.base import BaseMemory from langchain_classic.memory.chat_memory import BaseChatMemory diff --git a/libs/langchain/langchain_classic/memory/readonly.py b/libs/langchain/langchain_classic/memory/readonly.py index 42206123160d8..450ddeca6e02d 100644 --- a/libs/langchain/langchain_classic/memory/readonly.py +++ b/libs/langchain/langchain_classic/memory/readonly.py @@ -1,6 +1,6 @@ from typing import Any -from langchain_core.memory import BaseMemory +from langchain_classic.memory.base import BaseMemory class ReadOnlySharedMemory(BaseMemory): diff --git a/libs/langchain/langchain_classic/memory/simple.py b/libs/langchain/langchain_classic/memory/simple.py index 61fb2b273010d..23dad908d35ef 100644 --- a/libs/langchain/langchain_classic/memory/simple.py +++ b/libs/langchain/langchain_classic/memory/simple.py @@ -1,8 +1,9 @@ from typing import Any -from langchain_core.memory import BaseMemory from typing_extensions import override +from langchain_classic.memory.base import BaseMemory + class SimpleMemory(BaseMemory): """Simple Memory. diff --git a/libs/langchain/langchain_classic/memory/vectorstore.py b/libs/langchain/langchain_classic/memory/vectorstore.py index ce09cb3a3aa01..635bf4d4a94db 100644 --- a/libs/langchain/langchain_classic/memory/vectorstore.py +++ b/libs/langchain/langchain_classic/memory/vectorstore.py @@ -5,10 +5,10 @@ from langchain_core._api import deprecated from langchain_core.documents import Document -from langchain_core.memory import BaseMemory from langchain_core.vectorstores import VectorStoreRetriever from pydantic import Field +from langchain_classic.memory.base import BaseMemory from langchain_classic.memory.utils import get_prompt_input_key diff --git a/libs/langchain/langchain_classic/schema/__init__.py b/libs/langchain/langchain_classic/schema/__init__.py index 6c8c4f61a0a62..d3e06e9496e34 100644 --- a/libs/langchain/langchain_classic/schema/__init__.py +++ b/libs/langchain/langchain_classic/schema/__init__.py @@ -5,7 +5,6 @@ from langchain_core.chat_history import BaseChatMessageHistory from langchain_core.documents import BaseDocumentTransformer, Document from langchain_core.exceptions import LangChainException, OutputParserException -from langchain_core.memory import BaseMemory from langchain_core.messages import ( AIMessage, BaseMessage, @@ -36,6 +35,8 @@ from langchain_core.retrievers import BaseRetriever from langchain_core.stores import BaseStore +from langchain_classic.memory.base import BaseMemory + RUN_KEY = "__run" # Backwards compatibility. diff --git a/libs/langchain/langchain_classic/schema/memory.py b/libs/langchain/langchain_classic/schema/memory.py index d2f3d73e6138e..1012628300ea1 100644 --- a/libs/langchain/langchain_classic/schema/memory.py +++ b/libs/langchain/langchain_classic/schema/memory.py @@ -1,3 +1,3 @@ -from langchain_core.memory import BaseMemory +from langchain_classic.memory.base import BaseMemory __all__ = ["BaseMemory"] diff --git a/libs/langchain/tests/unit_tests/chains/test_base.py b/libs/langchain/tests/unit_tests/chains/test_base.py index a0e21fab2b682..7ed51d17b9cb1 100644 --- a/libs/langchain/tests/unit_tests/chains/test_base.py +++ b/libs/langchain/tests/unit_tests/chains/test_base.py @@ -6,11 +6,11 @@ import pytest from langchain_core.callbacks.manager import CallbackManagerForChainRun -from langchain_core.memory import BaseMemory from langchain_core.tracers.context import collect_runs from typing_extensions import override from langchain_classic.chains.base import Chain +from langchain_classic.memory.base import BaseMemory from langchain_classic.schema import RUN_KEY from tests.unit_tests.callbacks.fake_callback_handler import FakeCallbackHandler diff --git a/libs/langchain/tests/unit_tests/chains/test_conversation.py b/libs/langchain/tests/unit_tests/chains/test_conversation.py index 0913204b77e29..45d19744a3a94 100644 --- a/libs/langchain/tests/unit_tests/chains/test_conversation.py +++ b/libs/langchain/tests/unit_tests/chains/test_conversation.py @@ -6,11 +6,11 @@ import pytest from langchain_core.callbacks import CallbackManagerForLLMRun from langchain_core.language_models import LLM -from langchain_core.memory import BaseMemory from langchain_core.prompts.prompt import PromptTemplate from typing_extensions import override from langchain_classic.chains.conversation.base import ConversationChain +from langchain_classic.memory.base import BaseMemory from langchain_classic.memory.buffer import ConversationBufferMemory from langchain_classic.memory.buffer_window import ConversationBufferWindowMemory from langchain_classic.memory.summary import ConversationSummaryMemory diff --git a/libs/langchain/tests/unit_tests/chains/test_memory.py b/libs/langchain/tests/unit_tests/chains/test_memory.py index 2be4384c39769..0471c08567ab7 100644 --- a/libs/langchain/tests/unit_tests/chains/test_memory.py +++ b/libs/langchain/tests/unit_tests/chains/test_memory.py @@ -1,5 +1,4 @@ import pytest -from langchain_core.memory import BaseMemory from langchain_classic.chains.conversation.memory import ( ConversationBufferMemory, @@ -7,6 +6,7 @@ ConversationSummaryMemory, ) from langchain_classic.memory import ReadOnlySharedMemory, SimpleMemory +from langchain_classic.memory.base import BaseMemory from tests.unit_tests.llms.fake_llm import FakeLLM From 03c7ba3fa33aa12370a85d8738d2020ac2875f63 Mon Sep 17 00:00:00 2001 From: Chester Curme Date: Wed, 15 Oct 2025 12:46:19 -0400 Subject: [PATCH 7/8] add back deprecation --- libs/langchain/langchain_classic/memory/base.py | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/libs/langchain/langchain_classic/memory/base.py b/libs/langchain/langchain_classic/memory/base.py index 2b3c705a23421..eb178bb888cd7 100644 --- a/libs/langchain/langchain_classic/memory/base.py +++ b/libs/langchain/langchain_classic/memory/base.py @@ -10,11 +10,20 @@ from abc import ABC, abstractmethod from typing import Any +from langchain_core._api import deprecated from langchain_core.load.serializable import Serializable from langchain_core.runnables import run_in_executor from pydantic import ConfigDict +@deprecated( + since="0.3.3", + removal="1.0.0", + message=( + "Please see the migration guide at: " + "https://python.langchain.com/docs/versions/migrating_memory/" + ), +) class BaseMemory(Serializable, ABC): """Abstract base class for memory in Chains. From b2af88e948518bb4d7f2528f054c2e12e201a608 Mon Sep 17 00:00:00 2001 From: Chester Curme Date: Wed, 15 Oct 2025 12:52:30 -0400 Subject: [PATCH 8/8] move module to avoid circular import --- .../agent_toolkits/conversational_retrieval/openai_functions.py | 2 +- .../langchain_classic/{memory/base.py => base_memory.py} | 0 libs/langchain/langchain_classic/chains/base.py | 2 +- libs/langchain/langchain_classic/chains/conversation/base.py | 2 +- libs/langchain/langchain_classic/memory/buffer.py | 2 +- libs/langchain/langchain_classic/memory/chat_memory.py | 2 +- libs/langchain/langchain_classic/memory/combined.py | 2 +- libs/langchain/langchain_classic/memory/readonly.py | 2 +- libs/langchain/langchain_classic/memory/simple.py | 2 +- libs/langchain/langchain_classic/memory/vectorstore.py | 2 +- libs/langchain/langchain_classic/schema/__init__.py | 2 +- libs/langchain/langchain_classic/schema/memory.py | 2 +- libs/langchain/tests/unit_tests/chains/test_base.py | 2 +- libs/langchain/tests/unit_tests/chains/test_conversation.py | 2 +- libs/langchain/tests/unit_tests/chains/test_memory.py | 2 +- 15 files changed, 14 insertions(+), 14 deletions(-) rename libs/langchain/langchain_classic/{memory/base.py => base_memory.py} (100%) diff --git a/libs/langchain/langchain_classic/agents/agent_toolkits/conversational_retrieval/openai_functions.py b/libs/langchain/langchain_classic/agents/agent_toolkits/conversational_retrieval/openai_functions.py index 9d6f0f1c2e43d..5b8ddfc6e1ac8 100644 --- a/libs/langchain/langchain_classic/agents/agent_toolkits/conversational_retrieval/openai_functions.py +++ b/libs/langchain/langchain_classic/agents/agent_toolkits/conversational_retrieval/openai_functions.py @@ -10,7 +10,7 @@ AgentTokenBufferMemory, ) from langchain_classic.agents.openai_functions_agent.base import OpenAIFunctionsAgent -from langchain_classic.memory.base import BaseMemory +from langchain_classic.base_memory import BaseMemory from langchain_classic.memory.token_buffer import ConversationTokenBufferMemory diff --git a/libs/langchain/langchain_classic/memory/base.py b/libs/langchain/langchain_classic/base_memory.py similarity index 100% rename from libs/langchain/langchain_classic/memory/base.py rename to libs/langchain/langchain_classic/base_memory.py diff --git a/libs/langchain/langchain_classic/chains/base.py b/libs/langchain/langchain_classic/chains/base.py index c7b63ce320991..df2d4b4c3eb1e 100644 --- a/libs/langchain/langchain_classic/chains/base.py +++ b/libs/langchain/langchain_classic/chains/base.py @@ -37,7 +37,7 @@ ) from typing_extensions import override -from langchain_classic.memory.base import BaseMemory +from langchain_classic.base_memory import BaseMemory from langchain_classic.schema import RUN_KEY logger = logging.getLogger(__name__) diff --git a/libs/langchain/langchain_classic/chains/conversation/base.py b/libs/langchain/langchain_classic/chains/conversation/base.py index 26b2728d32743..98bc9a4f121d0 100644 --- a/libs/langchain/langchain_classic/chains/conversation/base.py +++ b/libs/langchain/langchain_classic/chains/conversation/base.py @@ -5,9 +5,9 @@ from pydantic import ConfigDict, Field, model_validator from typing_extensions import Self, override +from langchain_classic.base_memory import BaseMemory from langchain_classic.chains.conversation.prompt import PROMPT from langchain_classic.chains.llm import LLMChain -from langchain_classic.memory.base import BaseMemory from langchain_classic.memory.buffer import ConversationBufferMemory diff --git a/libs/langchain/langchain_classic/memory/buffer.py b/libs/langchain/langchain_classic/memory/buffer.py index a328458854d74..5a08db95c74f6 100644 --- a/libs/langchain/langchain_classic/memory/buffer.py +++ b/libs/langchain/langchain_classic/memory/buffer.py @@ -5,7 +5,7 @@ from langchain_core.utils import pre_init from typing_extensions import override -from langchain_classic.memory.base import BaseMemory +from langchain_classic.base_memory import BaseMemory from langchain_classic.memory.chat_memory import BaseChatMemory from langchain_classic.memory.utils import get_prompt_input_key diff --git a/libs/langchain/langchain_classic/memory/chat_memory.py b/libs/langchain/langchain_classic/memory/chat_memory.py index d624a42997366..c775c6ba31700 100644 --- a/libs/langchain/langchain_classic/memory/chat_memory.py +++ b/libs/langchain/langchain_classic/memory/chat_memory.py @@ -10,7 +10,7 @@ from langchain_core.messages import AIMessage, HumanMessage from pydantic import Field -from langchain_classic.memory.base import BaseMemory +from langchain_classic.base_memory import BaseMemory from langchain_classic.memory.utils import get_prompt_input_key diff --git a/libs/langchain/langchain_classic/memory/combined.py b/libs/langchain/langchain_classic/memory/combined.py index be60214d1005d..3a5781ce01a4c 100644 --- a/libs/langchain/langchain_classic/memory/combined.py +++ b/libs/langchain/langchain_classic/memory/combined.py @@ -3,7 +3,7 @@ from pydantic import field_validator -from langchain_classic.memory.base import BaseMemory +from langchain_classic.base_memory import BaseMemory from langchain_classic.memory.chat_memory import BaseChatMemory diff --git a/libs/langchain/langchain_classic/memory/readonly.py b/libs/langchain/langchain_classic/memory/readonly.py index 450ddeca6e02d..85ac1c626e154 100644 --- a/libs/langchain/langchain_classic/memory/readonly.py +++ b/libs/langchain/langchain_classic/memory/readonly.py @@ -1,6 +1,6 @@ from typing import Any -from langchain_classic.memory.base import BaseMemory +from langchain_classic.base_memory import BaseMemory class ReadOnlySharedMemory(BaseMemory): diff --git a/libs/langchain/langchain_classic/memory/simple.py b/libs/langchain/langchain_classic/memory/simple.py index 23dad908d35ef..c9163c396f375 100644 --- a/libs/langchain/langchain_classic/memory/simple.py +++ b/libs/langchain/langchain_classic/memory/simple.py @@ -2,7 +2,7 @@ from typing_extensions import override -from langchain_classic.memory.base import BaseMemory +from langchain_classic.base_memory import BaseMemory class SimpleMemory(BaseMemory): diff --git a/libs/langchain/langchain_classic/memory/vectorstore.py b/libs/langchain/langchain_classic/memory/vectorstore.py index 635bf4d4a94db..3f6eef1916e51 100644 --- a/libs/langchain/langchain_classic/memory/vectorstore.py +++ b/libs/langchain/langchain_classic/memory/vectorstore.py @@ -8,7 +8,7 @@ from langchain_core.vectorstores import VectorStoreRetriever from pydantic import Field -from langchain_classic.memory.base import BaseMemory +from langchain_classic.base_memory import BaseMemory from langchain_classic.memory.utils import get_prompt_input_key diff --git a/libs/langchain/langchain_classic/schema/__init__.py b/libs/langchain/langchain_classic/schema/__init__.py index d3e06e9496e34..c957fe8b9a528 100644 --- a/libs/langchain/langchain_classic/schema/__init__.py +++ b/libs/langchain/langchain_classic/schema/__init__.py @@ -35,7 +35,7 @@ from langchain_core.retrievers import BaseRetriever from langchain_core.stores import BaseStore -from langchain_classic.memory.base import BaseMemory +from langchain_classic.base_memory import BaseMemory RUN_KEY = "__run" diff --git a/libs/langchain/langchain_classic/schema/memory.py b/libs/langchain/langchain_classic/schema/memory.py index 1012628300ea1..238d3283936a7 100644 --- a/libs/langchain/langchain_classic/schema/memory.py +++ b/libs/langchain/langchain_classic/schema/memory.py @@ -1,3 +1,3 @@ -from langchain_classic.memory.base import BaseMemory +from langchain_classic.base_memory import BaseMemory __all__ = ["BaseMemory"] diff --git a/libs/langchain/tests/unit_tests/chains/test_base.py b/libs/langchain/tests/unit_tests/chains/test_base.py index 7ed51d17b9cb1..a607eada5b901 100644 --- a/libs/langchain/tests/unit_tests/chains/test_base.py +++ b/libs/langchain/tests/unit_tests/chains/test_base.py @@ -9,8 +9,8 @@ from langchain_core.tracers.context import collect_runs from typing_extensions import override +from langchain_classic.base_memory import BaseMemory from langchain_classic.chains.base import Chain -from langchain_classic.memory.base import BaseMemory from langchain_classic.schema import RUN_KEY from tests.unit_tests.callbacks.fake_callback_handler import FakeCallbackHandler diff --git a/libs/langchain/tests/unit_tests/chains/test_conversation.py b/libs/langchain/tests/unit_tests/chains/test_conversation.py index 45d19744a3a94..7ff07f45da6e3 100644 --- a/libs/langchain/tests/unit_tests/chains/test_conversation.py +++ b/libs/langchain/tests/unit_tests/chains/test_conversation.py @@ -9,8 +9,8 @@ from langchain_core.prompts.prompt import PromptTemplate from typing_extensions import override +from langchain_classic.base_memory import BaseMemory from langchain_classic.chains.conversation.base import ConversationChain -from langchain_classic.memory.base import BaseMemory from langchain_classic.memory.buffer import ConversationBufferMemory from langchain_classic.memory.buffer_window import ConversationBufferWindowMemory from langchain_classic.memory.summary import ConversationSummaryMemory diff --git a/libs/langchain/tests/unit_tests/chains/test_memory.py b/libs/langchain/tests/unit_tests/chains/test_memory.py index 0471c08567ab7..0894d5f92dfca 100644 --- a/libs/langchain/tests/unit_tests/chains/test_memory.py +++ b/libs/langchain/tests/unit_tests/chains/test_memory.py @@ -1,12 +1,12 @@ import pytest +from langchain_classic.base_memory import BaseMemory from langchain_classic.chains.conversation.memory import ( ConversationBufferMemory, ConversationBufferWindowMemory, ConversationSummaryMemory, ) from langchain_classic.memory import ReadOnlySharedMemory, SimpleMemory -from langchain_classic.memory.base import BaseMemory from tests.unit_tests.llms.fake_llm import FakeLLM