Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Gpts app v0.4 #1170

Merged
merged 4 commits into from
Feb 20, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 3 additions & 1 deletion assets/schema/dbgpt.sql
Original file line number Diff line number Diff line change
Expand Up @@ -197,6 +197,8 @@ CREATE TABLE IF NOT EXISTS `prompt_manage`
`sys_code` varchar(255) DEFAULT NULL COMMENT 'system app ',
`created_at` datetime DEFAULT NULL COMMENT 'create time',
`updated_at` datetime DEFAULT NULL COMMENT 'last update time',
`team_mode` varchar(255) NULL COMMENT 'agent team work mode',

PRIMARY KEY (`id`),
UNIQUE KEY `uk_gpts_conversations` (`conv_id`),
KEY `idx_gpts_name` (`gpts_name`)
Expand Down Expand Up @@ -230,7 +232,7 @@ CREATE TABLE `gpts_messages` (
`model_name` varchar(255) DEFAULT NULL COMMENT 'message generate model',
`rounds` int(11) NOT NULL COMMENT 'dialogue turns',
`content` text COMMENT 'Content of the speech',
`current_gogal` text COMMENT 'The target corresponding to the current message',
`current_goal` text COMMENT 'The target corresponding to the current message',
`context` text COMMENT 'Current conversation context',
`review_info` text COMMENT 'Current conversation review info',
`action_report` text COMMENT 'Current conversation action report',
Expand Down
28 changes: 14 additions & 14 deletions dbgpt/agent/agents/base_agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -220,7 +220,7 @@ def append_message(self, message: Optional[Dict], role, sender: Agent) -> bool:
"context",
"action_report",
"review_info",
"current_gogal",
"current_goal",
"model_name",
)
if k in message
Expand All @@ -246,7 +246,7 @@ def append_message(self, message: Optional[Dict], role, sender: Agent) -> bool:
receiver=self.name,
role=role,
rounds=self.consecutive_auto_reply_counter,
current_gogal=oai_message.get("current_gogal", None),
current_goal=oai_message.get("current_goal", None),
content=oai_message.get("content", None),
context=json.dumps(oai_message["context"], ensure_ascii=False)
if "context" in oai_message
Expand Down Expand Up @@ -458,16 +458,16 @@ def process_now_message(
sender,
rely_messages: Optional[List[Dict]] = None,
):
current_gogal = current_message.get("current_gogal", None)
current_goal = current_message.get("current_goal", None)
### Convert and tailor the information in collective memory into contextual memory available to the current Agent
current_gogal_messages = self._gpts_message_to_ai_message(
current_goal_messages = self._gpts_message_to_ai_message(
self.memory.message_memory.get_between_agents(
self.agent_context.conv_id, self.name, sender.name, current_gogal
self.agent_context.conv_id, self.name, sender.name, current_goal
)
)
if current_gogal_messages is None or len(current_gogal_messages) <= 0:
if current_goal_messages is None or len(current_goal_messages) <= 0:
current_message["role"] = ModelMessageRoleType.HUMAN
current_gogal_messages = [current_message]
current_goal_messages = [current_message]
### relay messages
cut_messages = []
if rely_messages:
Expand All @@ -479,13 +479,13 @@ def process_now_message(
else:
cut_messages.extend(self._rely_messages)

if len(current_gogal_messages) < self.dialogue_memory_rounds:
cut_messages.extend(current_gogal_messages)
if len(current_goal_messages) < self.dialogue_memory_rounds:
cut_messages.extend(current_goal_messages)
else:
# TODO: allocate historical information based on token budget
cut_messages.extend(current_gogal_messages[:2])
cut_messages.extend(current_goal_messages[:2])
# end_round = self.dialogue_memory_rounds - 2
cut_messages.extend(current_gogal_messages[-3:])
cut_messages.extend(current_goal_messages[-3:])
return cut_messages

async def a_system_fill_param(self):
Expand All @@ -502,7 +502,7 @@ async def a_generate_reply(
## 0.New message build
new_message = {}
new_message["context"] = message.get("context", None)
new_message["current_gogal"] = message.get("current_gogal", None)
new_message["current_goal"] = message.get("current_goal", None)

## 1.LLM Reasonging
await self.a_system_fill_param()
Expand Down Expand Up @@ -576,7 +576,7 @@ async def a_verify_reply(
## Send error messages to yourself for retrieval optimization and increase the number of retrievals
retry_message = {}
retry_message["context"] = message.get("context", None)
retry_message["current_gogal"] = message.get("current_gogal", None)
retry_message["current_goal"] = message.get("current_goal", None)
retry_message["model_name"] = message.get("model_name", None)
retry_message["content"] = fail_reason
## Use the original sender to send the retry message to yourself
Expand All @@ -603,7 +603,7 @@ async def a_retry_chat(
"context": json.loads(last_message.context)
if last_message.context
else None,
"current_gogal": last_message.current_gogal,
"current_goal": last_message.current_goal,
"review_info": json.loads(last_message.review_info)
if last_message.review_info
else None,
Expand Down
26 changes: 13 additions & 13 deletions dbgpt/agent/agents/base_agent_new.py
Original file line number Diff line number Diff line change
Expand Up @@ -323,7 +323,7 @@ async def a_initiate_chat(
await self.a_send(
{
"content": context["message"],
"current_gogal": context["message"],
"current_goal": context["message"],
},
recipient,
reviewer,
Expand Down Expand Up @@ -352,7 +352,7 @@ async def _a_append_message(
"context",
"action_report",
"review_info",
"current_gogal",
"current_goal",
"model_name",
)
if k in message
Expand All @@ -364,7 +364,7 @@ async def _a_append_message(
receiver=self.profile,
role=role,
rounds=self.consecutive_auto_reply_counter,
current_gogal=oai_message.get("current_gogal", None),
current_goal=oai_message.get("current_goal", None),
content=oai_message.get("content", None),
context=json.dumps(oai_message["context"], ensure_ascii=False)
if "context" in oai_message
Expand Down Expand Up @@ -501,7 +501,7 @@ def _init_reply_message(self, recive_message):
"""
new_message = {}
new_message["context"] = recive_message.get("context", None)
new_message["current_gogal"] = recive_message.get("current_gogal", None)
new_message["current_goal"] = recive_message.get("current_goal", None)
return new_message

def _convert_to_ai_message(
Expand Down Expand Up @@ -544,19 +544,19 @@ def _load_thinking_messages(
sender,
rely_messages: Optional[List[Dict]] = None,
) -> Optional[List[Dict]]:
current_gogal = receive_message.get("current_gogal", None)
current_goal = receive_message.get("current_goal", None)

### Convert and tailor the information in collective memory into contextual memory available to the current Agent
current_gogal_messages = self._convert_to_ai_message(
current_goal_messages = self._convert_to_ai_message(
self.memory.message_memory.get_between_agents(
self.agent_context.conv_id, self.profile, sender.profile, current_gogal
self.agent_context.conv_id, self.profile, sender.profile, current_goal
)
)

# When there is no target and context, the current received message is used as the target problem
if current_gogal_messages is None or len(current_gogal_messages) <= 0:
if current_goal_messages is None or len(current_goal_messages) <= 0:
receive_message["role"] = ModelMessageRoleType.HUMAN
current_gogal_messages = [receive_message]
current_goal_messages = [receive_message]

### relay messages
cut_messages = []
Expand All @@ -572,14 +572,14 @@ def _load_thinking_messages(
cut_messages.extend(rely_messages)

# TODO: allocate historical information based on token budget
if len(current_gogal_messages) < 5:
cut_messages.extend(current_gogal_messages)
if len(current_goal_messages) < 5:
cut_messages.extend(current_goal_messages)
else:
# For the time being, the smallest size of historical message records will be used by default.
# Use the first two rounds of messages to understand the initial goals
cut_messages.extend(current_gogal_messages[:2])
cut_messages.extend(current_goal_messages[:2])
# Use information from the last three rounds of communication to ensure that current thinking knows what happened and what to do in the last communication
cut_messages.extend(current_gogal_messages[-3:])
cut_messages.extend(current_goal_messages[-3:])
return cut_messages

def _new_system_message(self, content):
Expand Down
2 changes: 1 addition & 1 deletion dbgpt/agent/agents/expand/code_assistant_agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -42,7 +42,7 @@ def __init__(self, **kwargs):
self._init_actions([CodeAction])

async def a_correctness_check(self, message: Optional[Dict]):
task_gogal = message.get("current_gogal", None)
task_gogal = message.get("current_goal", None)
action_report = message.get("action_report", None)
task_result = ""
if action_report:
Expand Down
6 changes: 3 additions & 3 deletions dbgpt/agent/agents/expand/retrieve_summary_assistant_agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -196,7 +196,7 @@ async def a_generate_reply(
## New message build
new_message = {}
new_message["context"] = current_messages[-1].get("context", None)
new_message["current_gogal"] = current_messages[-1].get("current_gogal", None)
new_message["current_goal"] = current_messages[-1].get("current_goal", None)
new_message["role"] = "assistant"
new_message["content"] = user_question
new_message["model_name"] = model
Expand All @@ -206,7 +206,7 @@ async def a_generate_reply(
## Summary message build
summary_message = {}
summary_message["context"] = message.get("context", None)
summary_message["current_gogal"] = message.get("current_gogal", None)
summary_message["current_goal"] = message.get("current_goal", None)

summaries = ""
count = 0
Expand Down Expand Up @@ -262,7 +262,7 @@ async def a_generate_reply(

async def a_verify(self, message: Optional[Dict]):
self.update_system_message(self.CHECK_RESULT_SYSTEM_MESSAGE)
current_goal = message.get("current_gogal", None)
current_goal = message.get("current_goal", None)
action_report = message.get("action_report", None)
task_result = ""
if action_report:
Expand Down
2 changes: 1 addition & 1 deletion dbgpt/agent/agents/expand/summary_assistant_agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,7 @@ def __init__(self, **kwargs):
self._init_actions([BlankAction])

# async def a_correctness_check(self, message: Optional[Dict]):
# current_goal = message.get("current_gogal", None)
# current_goal = message.get("current_goal", None)
# action_report = message.get("action_report", None)
# task_result = ""
# if action_report:
Expand Down
4 changes: 2 additions & 2 deletions dbgpt/agent/common/schema.py
Original file line number Diff line number Diff line change
Expand Up @@ -43,7 +43,7 @@ class GptsMessage:
role: str
content: str
rounds: Optional[int]
current_gogal: str = None
current_goal: str = None
context: Optional[str] = None
review_info: Optional[str] = None
action_report: Optional[str] = None
Expand All @@ -61,7 +61,7 @@ def from_dict(d: Dict[str, Any]) -> GptsMessage:
content=d["content"],
rounds=d["rounds"],
model_name=d["model_name"],
current_gogal=d["current_gogal"],
current_goal=d["current_goal"],
context=d["context"],
review_info=d["review_info"],
action_report=d["action_report"],
Expand Down
6 changes: 3 additions & 3 deletions dbgpt/agent/memory/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -57,7 +57,7 @@ class GptsMessage:
role: str
content: str
rounds: Optional[int]
current_gogal: str = None
current_goal: str = None
context: Optional[str] = None
review_info: Optional[str] = None
action_report: Optional[str] = None
Expand All @@ -75,7 +75,7 @@ def from_dict(d: Dict[str, Any]) -> GptsMessage:
content=d["content"],
rounds=d["rounds"],
model_name=d["model_name"],
current_gogal=d["current_gogal"],
current_goal=d["current_goal"],
context=d["context"],
review_info=d["review_info"],
action_report=d["action_report"],
Expand Down Expand Up @@ -203,7 +203,7 @@ def get_between_agents(
conv_id: str,
agent1: str,
agent2: str,
current_gogal: Optional[str] = None,
current_goal: Optional[str] = None,
) -> Optional[List[GptsMessage]]:
"""
Query information related to an agent
Expand Down
6 changes: 3 additions & 3 deletions dbgpt/agent/memory/default_gpts_memory.py
Original file line number Diff line number Diff line change
Expand Up @@ -100,11 +100,11 @@ def get_between_agents(
conv_id: str,
agent1: str,
agent2: str,
current_gogal: Optional[str] = None,
current_goal: Optional[str] = None,
) -> Optional[List[GptsMessage]]:
if current_gogal:
if current_goal:
result = self.df.query(
f"conv_id==@conv_id and ((sender==@agent1 and receiver==@agent2) or (sender==@agent2 and receiver==@agent1)) and current_gogal==@current_gogal"
f"conv_id==@conv_id and ((sender==@agent1 and receiver==@agent2) or (sender==@agent2 and receiver==@agent1)) and current_goal==@current_goal"
)
else:
result = self.df.query(
Expand Down
4 changes: 2 additions & 2 deletions dbgpt/agent/memory/gpts_memory.py
Original file line number Diff line number Diff line change
Expand Up @@ -58,7 +58,7 @@ async def one_chat_competions(self, conv_id: str):
count = count + 1
if count == 1:
continue
if not message.current_gogal or len(message.current_gogal) <= 0:
if not message.current_goal or len(message.current_goal) <= 0:
if len(temp_group) > 0:
vis_items.append(await self._plan_vis_build(temp_group))
temp_group.clear()
Expand All @@ -69,7 +69,7 @@ async def one_chat_competions(self, conv_id: str):
vis_items.append(await self._messages_to_agents_vis(temp_messages))
temp_messages.clear()

last_gogal = message.current_gogal
last_gogal = message.current_goal
temp_group[last_gogal].append(message)

if len(temp_group) > 0:
Expand Down
12 changes: 6 additions & 6 deletions dbgpt/agent/memory/gpts_memory_storage.py
Original file line number Diff line number Diff line change
Expand Up @@ -184,7 +184,7 @@ class GptsMessageStorage(StorageItem):
role: str
content: str
rounds: Optional[int]
current_gogal: str = None
current_goal: str = None
context: Optional[str] = None
review_info: Optional[str] = None
action_report: Optional[str] = None
Expand All @@ -204,7 +204,7 @@ def from_dict(d: Dict[str, Any]):
content=d["content"],
rounds=d["rounds"],
model_name=d["model_name"],
current_gogal=d["current_gogal"],
current_goal=d["current_goal"],
context=d["context"],
review_info=d["review_info"],
action_report=d["action_report"],
Expand Down Expand Up @@ -239,7 +239,7 @@ def to_gpts_message(self) -> GptsMessage:
role=self.role,
content=self.content,
rounds=self.rounds,
current_gogal=self.current_gogal,
current_goal=self.current_goal,
context=self.context,
review_info=self.review_info,
action_report=self.action_report,
Expand All @@ -258,7 +258,7 @@ def from_gpts_message(gpts_message: GptsMessage) -> "StoragePromptTemplate":
role=gpts_message.role,
content=gpts_message.content,
rounds=gpts_message.rounds,
current_gogal=gpts_message.current_gogal,
current_goal=gpts_message.current_goal,
context=gpts_message.context,
review_info=gpts_message.review_info,
action_report=gpts_message.action_report,
Expand Down Expand Up @@ -344,9 +344,9 @@ def get_between_agents(
conv_id: str,
agent1: str,
agent2: str,
current_gogal: Optional[str] = None,
current_goal: Optional[str] = None,
) -> Optional[List[GptsMessage]]:
return super().get_between_agents(conv_id, agent1, agent2, current_gogal)
return super().get_between_agents(conv_id, agent1, agent2, current_goal)

def get_by_conv_id(self, conv_id: str) -> Optional[List[GptsMessage]]:
return super().get_by_conv_id(conv_id)
Expand Down
2 changes: 1 addition & 1 deletion dbgpt/app/scene/chat_agent/chat.py
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,7 @@ def __init__(self, chat_param: Dict):

# load select plugin
agent_module = CFG.SYSTEM_APP.get_component(
ComponentType.AGENT_HUB, ModulePlugin
ComponentType.PLUGIN_HUB, ModulePlugin
)
self.plugins_prompt_generator = agent_module.load_select_plugin(
self.plugins_prompt_generator, self.select_plugins
Expand Down
4 changes: 2 additions & 2 deletions dbgpt/serve/agent/agents/db_gpts_memory.py
Original file line number Diff line number Diff line change
Expand Up @@ -94,10 +94,10 @@ def get_between_agents(
conv_id: str,
agent1: str,
agent2: str,
current_gogal: Optional[str] = None,
current_goal: Optional[str] = None,
) -> Optional[List[GptsMessage]]:
db_results = self.gpts_message.get_between_agents(
conv_id, agent1, agent2, current_gogal
conv_id, agent1, agent2, current_goal
)
results = []
db_results = sorted(db_results, key=lambda x: x.rounds)
Expand Down
Loading
Loading