Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Agent history message #2234

Closed
wants to merge 4 commits into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
7 changes: 6 additions & 1 deletion .env.template
Original file line number Diff line number Diff line change
Expand Up @@ -333,4 +333,9 @@ DBGPT_LOG_LEVEL=INFO
# FIN_REPORT_MODEL=/app/models/bge-large-zh

## Turn off notebook display Python flow , which is enabled by default
NOTE_BOOK_ENABLE=False
NOTE_BOOK_ENABLE=False

## The agent historical message retention configuration defaults to the last two rounds.
# MESSAGES_KEEP_START_ROUNDS=0
# MESSAGES_KEEP_END_ROUNDS=2

7 changes: 7 additions & 0 deletions dbgpt/_private/config.py
Original file line number Diff line number Diff line change
Expand Up @@ -361,6 +361,13 @@ def __init__(self) -> None:
)
self.NOTE_BOOK_ROOT: str = os.getenv("NOTE_BOOK_ROOT", os.path.expanduser("~"))

self.MESSAGES_KEEP_START_ROUNDS: int = int(
os.getenv("MESSAGES_KEEP_START_ROUNDS", 0)
)
self.MESSAGES_KEEP_END_ROUNDS: int = int(
os.getenv("MESSAGES_KEEP_END_ROUNDS", 2)
)

@property
def local_db_manager(self) -> "ConnectorManager":
from dbgpt.datasource.manages import ConnectorManager
Expand Down
5 changes: 5 additions & 0 deletions dbgpt/agent/core/agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,8 @@ async def send(
silent: Optional[bool] = False,
is_retry_chat: bool = False,
last_speaker_name: Optional[str] = None,
rely_messages: Optional[List[AgentMessage]] = None,
historical_dialogues: Optional[List[AgentMessage]] = None,
) -> None:
"""Send a message to recipient agent.

Expand All @@ -52,6 +54,8 @@ async def receive(
is_recovery: Optional[bool] = False,
is_retry_chat: bool = False,
last_speaker_name: Optional[str] = None,
historical_dialogues: Optional[List[AgentMessage]] = None,
rely_messages: Optional[List[AgentMessage]] = None,
) -> None:
"""Receive a message from another agent.

Expand All @@ -74,6 +78,7 @@ async def generate_reply(
sender: Agent,
reviewer: Optional[Agent] = None,
rely_messages: Optional[List[AgentMessage]] = None,
historical_dialogues: Optional[List[AgentMessage]] = None,
is_retry_chat: bool = False,
last_speaker_name: Optional[str] = None,
**kwargs,
Expand Down
87 changes: 77 additions & 10 deletions dbgpt/agent/core/base_agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -209,6 +209,8 @@ async def send(
silent: Optional[bool] = False,
is_retry_chat: bool = False,
last_speaker_name: Optional[str] = None,
rely_messages: Optional[List[AgentMessage]] = None,
historical_dialogues: Optional[List[AgentMessage]] = None,
) -> None:
"""Send a message to recipient agent."""
with root_tracer.start_span(
Expand All @@ -232,6 +234,8 @@ async def send(
silent=silent,
is_retry_chat=is_retry_chat,
last_speaker_name=last_speaker_name,
historical_dialogues=historical_dialogues,
rely_messages=rely_messages,
)

async def receive(
Expand All @@ -244,6 +248,8 @@ async def receive(
is_recovery: Optional[bool] = False,
is_retry_chat: bool = False,
last_speaker_name: Optional[str] = None,
historical_dialogues: Optional[List[AgentMessage]] = None,
rely_messages: Optional[List[AgentMessage]] = None,
) -> None:
"""Receive a message from another agent."""
with root_tracer.start_span(
Expand Down Expand Up @@ -272,13 +278,17 @@ async def receive(
reviewer=reviewer,
is_retry_chat=is_retry_chat,
last_speaker_name=last_speaker_name,
historical_dialogues=historical_dialogues,
rely_messages=rely_messages,
)
else:
reply = await self.generate_reply(
received_message=message,
sender=sender,
reviewer=reviewer,
is_retry_chat=is_retry_chat,
historical_dialogues=historical_dialogues,
rely_messages=rely_messages,
)

if reply is not None:
Expand All @@ -289,6 +299,7 @@ def prepare_act_param(
received_message: Optional[AgentMessage],
sender: Agent,
rely_messages: Optional[List[AgentMessage]] = None,
**kwargs,
) -> Dict[str, Any]:
"""Prepare the parameters for the act method."""
return {}
Expand All @@ -300,6 +311,7 @@ async def generate_reply(
sender: Agent,
reviewer: Optional[Agent] = None,
rely_messages: Optional[List[AgentMessage]] = None,
historical_dialogues: Optional[List[AgentMessage]] = None,
is_retry_chat: bool = False,
last_speaker_name: Optional[str] = None,
**kwargs,
Expand Down Expand Up @@ -361,9 +373,10 @@ async def generate_reply(
f"Depends on the number of historical messages:{len(rely_messages) if rely_messages else 0}!" # noqa
)
thinking_messages, resource_info = await self._load_thinking_messages(
received_message,
sender,
rely_messages,
received_message=received_message,
sender=sender,
rely_messages=rely_messages,
historical_dialogues=historical_dialogues,
context=reply_message.get_dict_context(),
is_retry_chat=is_retry_chat,
)
Expand Down Expand Up @@ -400,7 +413,10 @@ async def generate_reply(
span.metadata["comments"] = comments

act_extent_param = self.prepare_act_param(
received_message, sender, rely_messages
received_message=received_message,
sender=sender,
rely_messages=rely_messages,
historical_dialogues=historical_dialogues,
)
with root_tracer.start_span(
"agent.generate_reply.act",
Expand Down Expand Up @@ -620,6 +636,8 @@ async def initiate_chat(
is_retry_chat: bool = False,
last_speaker_name: Optional[str] = None,
message_rounds: int = 0,
historical_dialogues: Optional[List[AgentMessage]] = None,
rely_messages: Optional[List[AgentMessage]] = None,
**context,
):
"""Initiate a chat with another agent.
Expand Down Expand Up @@ -652,6 +670,8 @@ async def initiate_chat(
agent_message,
recipient,
reviewer,
historical_dialogues=historical_dialogues,
rely_messages=rely_messages,
request_reply=request_reply,
is_retry_chat=is_retry_chat,
last_speaker_name=last_speaker_name,
Expand Down Expand Up @@ -825,6 +845,38 @@ def _excluded_models(

return can_uses

def convert_to_agent_message(
self,
gpts_messages: List[GptsMessage],
is_rery_chat: bool = False,
) -> Optional[List[AgentMessage]]:
"""Convert gptmessage to agent message."""
oai_messages: List[AgentMessage] = []
# Based on the current agent, all messages received are user, and all messages
# sent are assistant.
if not gpts_messages:
return None
for item in gpts_messages:
# Message conversion, priority is given to converting execution results,
# and only model output results will be used if not.
content = item.content
oai_messages.append(
AgentMessage(
content=content,
context=(
json.loads(item.context) if item.context is not None else None
),
action_report=ActionOutput.from_dict(json.loads(item.action_report))
if item.action_report
else None,
name=item.sender,
rounds=item.rounds,
model_name=item.model_name,
success=item.is_success,
)
)
return oai_messages

async def _a_select_llm_model(
self, excluded_models: Optional[List[str]] = None
) -> str:
Expand Down Expand Up @@ -959,6 +1011,7 @@ async def _load_thinking_messages(
received_message: AgentMessage,
sender: Agent,
rely_messages: Optional[List[AgentMessage]] = None,
historical_dialogues: Optional[List[AgentMessage]] = None,
context: Optional[Dict[str, Any]] = None,
is_retry_chat: bool = False,
) -> Tuple[List[AgentMessage], Optional[Dict]]:
Expand Down Expand Up @@ -1020,13 +1073,27 @@ async def _load_thinking_messages(
role=ModelMessageRoleType.SYSTEM,
)
)
if user_prompt:
agent_messages.append(
AgentMessage(
content=user_prompt,
role=ModelMessageRoleType.HUMAN,
)
# 关联上下文的历史消息
if historical_dialogues:
for i in range(len(historical_dialogues)):
if i % 2 == 0:
# 偶数开始, 偶数是用户信息
message = historical_dialogues[i]
message.role = ModelMessageRoleType.HUMAN
agent_messages.append(message)
else:
# 奇数是AI信息
message = historical_dialogues[i]
message.role = ModelMessageRoleType.AI
agent_messages.append(message)

# 当前的用户输入信息
agent_messages.append(
AgentMessage(
content=user_prompt,
role=ModelMessageRoleType.HUMAN,
)
)

return agent_messages, resource_references

Expand Down
3 changes: 2 additions & 1 deletion dbgpt/agent/core/base_team.py
Original file line number Diff line number Diff line change
Expand Up @@ -161,8 +161,9 @@ async def _load_thinking_messages(
received_message: AgentMessage,
sender: Agent,
rely_messages: Optional[List[AgentMessage]] = None,
historical_dialogues: Optional[List[AgentMessage]] = None,
context: Optional[Dict[str, Any]] = None,
is_retry_chat: Optional[bool] = False,
is_retry_chat: bool = False,
) -> Tuple[List[AgentMessage], Optional[Dict]]:
"""Load messages for thinking."""
return [AgentMessage(content=received_message.content)], None
2 changes: 1 addition & 1 deletion dbgpt/agent/core/memory/gpts/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -58,7 +58,7 @@ class GptsMessage:
receiver: str
role: str
content: str
rounds: Optional[int]
rounds: int = 0
is_success: bool = True
app_code: Optional[str] = None
app_name: Optional[str] = None
Expand Down
7 changes: 5 additions & 2 deletions dbgpt/agent/core/memory/gpts/gpts_memory.py
Original file line number Diff line number Diff line change
Expand Up @@ -127,8 +127,11 @@ async def append_message(self, conv_id: str, message: GptsMessage):
await self.push_message(conv_id)

async def get_messages(self, conv_id: str) -> List[GptsMessage]:
"""Get conversation message."""
return self.messages_cache[conv_id]
"""Get message by conv_id."""
messages = self.messages_cache[conv_id]
if not messages:
messages = self.message_memory.get_by_conv_id(conv_id)
return messages

async def get_agent_messages(
self, conv_id: str, agent_role: str
Expand Down
4 changes: 3 additions & 1 deletion dbgpt/agent/core/plan/awel/team_awel_layout.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@

import logging
from abc import ABC, abstractmethod
from typing import Optional, cast
from typing import List, Optional, cast

from dbgpt._private.config import Config
from dbgpt._private.pydantic import (
Expand Down Expand Up @@ -114,6 +114,8 @@ async def receive(
is_recovery: Optional[bool] = False,
is_retry_chat: bool = False,
last_speaker_name: Optional[str] = None,
historical_dialogues: Optional[List[AgentMessage]] = None,
rely_messages: Optional[List[AgentMessage]] = None,
) -> None:
"""Recive message by base team."""
if request_reply is False or request_reply is None:
Expand Down
1 change: 1 addition & 0 deletions dbgpt/agent/core/plan/planner_agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -169,6 +169,7 @@ def prepare_act_param(
received_message: Optional[AgentMessage],
sender: Agent,
rely_messages: Optional[List[AgentMessage]] = None,
**kwargs,
) -> Dict[str, Any]:
"""Prepare the parameters for the act method."""
return {
Expand Down
4 changes: 3 additions & 1 deletion dbgpt/agent/core/user_proxy_agent.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
"""A proxy agent for the user."""
from typing import Optional
from typing import List, Optional

from .. import ActionOutput, Agent, AgentMessage
from .base_agent import ConversableAgent
Expand Down Expand Up @@ -39,6 +39,8 @@ async def receive(
is_recovery: Optional[bool] = False,
is_retry_chat: bool = False,
last_speaker_name: Optional[str] = None,
historical_dialogues: Optional[List[AgentMessage]] = None,
rely_messages: Optional[List[AgentMessage]] = None,
) -> None:
"""Receive a message from another agent."""
if not silent:
Expand Down
43 changes: 42 additions & 1 deletion dbgpt/serve/agent/agents/controller.py
Original file line number Diff line number Diff line change
Expand Up @@ -206,11 +206,44 @@ async def agent_chat_v2(
if not gpt_app:
raise ValueError(f"Not found app {gpts_name}!")

historical_dialogues: List[GptsMessage] = []
if not is_retry_chat:
# 新建gpts对话记录
# Create a new gpts conversation record
gpt_app: GptsApp = self.gpts_app.app_detail(gpts_name)
if not gpt_app:
raise ValueError(f"Not found app {gpts_name}!")

## When creating a new gpts conversation record, determine whether to include the history of previous topics according to the application definition.
## TODO BEGIN
# Temporarily use system configuration management, and subsequently use application configuration management
if CFG.MESSAGES_KEEP_START_ROUNDS and CFG.MESSAGES_KEEP_START_ROUNDS > 0:
gpt_app.keep_start_rounds = CFG.MESSAGES_KEEP_START_ROUNDS
if CFG.MESSAGES_KEEP_END_ROUNDS and CFG.MESSAGES_KEEP_END_ROUNDS > 0:
gpt_app.keep_end_rounds = CFG.MESSAGES_KEEP_END_ROUNDS
## TODO END

if gpt_app.keep_start_rounds > 0 or gpt_app.keep_end_rounds > 0:
if gpts_conversations and len(gpts_conversations) > 0:
rely_conversations = []
if gpt_app.keep_start_rounds + gpt_app.keep_end_rounds < len(
gpts_conversations
):
if gpt_app.keep_start_rounds > 0:
front = gpts_conversations[gpt_app.keep_start_rounds :]
rely_conversations.extend(front)
if gpt_app.keep_end_rounds > 0:
back = gpts_conversations[-gpt_app.keep_end_rounds :]
rely_conversations.extend(back)
else:
rely_conversations = gpts_conversations
for gpts_conversation in rely_conversations:
temps: List[GptsMessage] = await self.memory.get_messages(
gpts_conversation.conv_id
)
if temps and len(temps) > 1:
historical_dialogues.append(temps[0])
historical_dialogues.append(temps[-1])

self.gpts_conversations.add(
GptsConversationsEntity(
conv_id=agent_conv_id,
Expand Down Expand Up @@ -277,6 +310,8 @@ async def agent_chat_v2(
is_retry_chat,
last_speaker_name=last_speaker_name,
init_message_rounds=message_round,
enable_verbose=enable_verbose,
historical_dialogues=historical_dialogues,
**ext_info,
)
)
Expand Down Expand Up @@ -418,6 +453,8 @@ async def agent_team_chat_new(
link_sender: ConversableAgent = None,
app_link_start: bool = False,
enable_verbose: bool = True,
historical_dialogues: Optional[List[GptsMessage]] = None,
rely_messages: Optional[List[GptsMessage]] = None,
**ext_info,
):
gpts_status = Status.COMPLETE.value
Expand Down Expand Up @@ -529,6 +566,10 @@ async def agent_team_chat_new(
is_retry_chat=is_retry_chat,
last_speaker_name=last_speaker_name,
message_rounds=init_message_rounds,
historical_dialogues=user_proxy.convert_to_agent_message(
historical_dialogues
),
rely_messages=rely_messages,
**ext_info,
)

Expand Down
Loading
Loading