Skip to content

Commit c78bd22

Browse files
authored
Gpts app v0.4 (#1170)
1 parent 63ab612 commit c78bd22

18 files changed

+65
-63
lines changed

assets/schema/dbgpt.sql

+3-1
Original file line numberDiff line numberDiff line change
@@ -197,6 +197,8 @@ CREATE TABLE IF NOT EXISTS `prompt_manage`
197197
`sys_code` varchar(255) DEFAULT NULL COMMENT 'system app ',
198198
`created_at` datetime DEFAULT NULL COMMENT 'create time',
199199
`updated_at` datetime DEFAULT NULL COMMENT 'last update time',
200+
`team_mode` varchar(255) NULL COMMENT 'agent team work mode',
201+
200202
PRIMARY KEY (`id`),
201203
UNIQUE KEY `uk_gpts_conversations` (`conv_id`),
202204
KEY `idx_gpts_name` (`gpts_name`)
@@ -230,7 +232,7 @@ CREATE TABLE `gpts_messages` (
230232
`model_name` varchar(255) DEFAULT NULL COMMENT 'message generate model',
231233
`rounds` int(11) NOT NULL COMMENT 'dialogue turns',
232234
`content` text COMMENT 'Content of the speech',
233-
`current_gogal` text COMMENT 'The target corresponding to the current message',
235+
`current_goal` text COMMENT 'The target corresponding to the current message',
234236
`context` text COMMENT 'Current conversation context',
235237
`review_info` text COMMENT 'Current conversation review info',
236238
`action_report` text COMMENT 'Current conversation action report',

dbgpt/agent/agents/base_agent.py

+14-14
Original file line numberDiff line numberDiff line change
@@ -220,7 +220,7 @@ def append_message(self, message: Optional[Dict], role, sender: Agent) -> bool:
220220
"context",
221221
"action_report",
222222
"review_info",
223-
"current_gogal",
223+
"current_goal",
224224
"model_name",
225225
)
226226
if k in message
@@ -246,7 +246,7 @@ def append_message(self, message: Optional[Dict], role, sender: Agent) -> bool:
246246
receiver=self.name,
247247
role=role,
248248
rounds=self.consecutive_auto_reply_counter,
249-
current_gogal=oai_message.get("current_gogal", None),
249+
current_goal=oai_message.get("current_goal", None),
250250
content=oai_message.get("content", None),
251251
context=json.dumps(oai_message["context"], ensure_ascii=False)
252252
if "context" in oai_message
@@ -458,16 +458,16 @@ def process_now_message(
458458
sender,
459459
rely_messages: Optional[List[Dict]] = None,
460460
):
461-
current_gogal = current_message.get("current_gogal", None)
461+
current_goal = current_message.get("current_goal", None)
462462
### Convert and tailor the information in collective memory into contextual memory available to the current Agent
463-
current_gogal_messages = self._gpts_message_to_ai_message(
463+
current_goal_messages = self._gpts_message_to_ai_message(
464464
self.memory.message_memory.get_between_agents(
465-
self.agent_context.conv_id, self.name, sender.name, current_gogal
465+
self.agent_context.conv_id, self.name, sender.name, current_goal
466466
)
467467
)
468-
if current_gogal_messages is None or len(current_gogal_messages) <= 0:
468+
if current_goal_messages is None or len(current_goal_messages) <= 0:
469469
current_message["role"] = ModelMessageRoleType.HUMAN
470-
current_gogal_messages = [current_message]
470+
current_goal_messages = [current_message]
471471
### relay messages
472472
cut_messages = []
473473
if rely_messages:
@@ -479,13 +479,13 @@ def process_now_message(
479479
else:
480480
cut_messages.extend(self._rely_messages)
481481

482-
if len(current_gogal_messages) < self.dialogue_memory_rounds:
483-
cut_messages.extend(current_gogal_messages)
482+
if len(current_goal_messages) < self.dialogue_memory_rounds:
483+
cut_messages.extend(current_goal_messages)
484484
else:
485485
# TODO: allocate historical information based on token budget
486-
cut_messages.extend(current_gogal_messages[:2])
486+
cut_messages.extend(current_goal_messages[:2])
487487
# end_round = self.dialogue_memory_rounds - 2
488-
cut_messages.extend(current_gogal_messages[-3:])
488+
cut_messages.extend(current_goal_messages[-3:])
489489
return cut_messages
490490

491491
async def a_system_fill_param(self):
@@ -502,7 +502,7 @@ async def a_generate_reply(
502502
## 0.New message build
503503
new_message = {}
504504
new_message["context"] = message.get("context", None)
505-
new_message["current_gogal"] = message.get("current_gogal", None)
505+
new_message["current_goal"] = message.get("current_goal", None)
506506

507507
## 1.LLM Reasonging
508508
await self.a_system_fill_param()
@@ -576,7 +576,7 @@ async def a_verify_reply(
576576
## Send error messages to yourself for retrieval optimization and increase the number of retrievals
577577
retry_message = {}
578578
retry_message["context"] = message.get("context", None)
579-
retry_message["current_gogal"] = message.get("current_gogal", None)
579+
retry_message["current_goal"] = message.get("current_goal", None)
580580
retry_message["model_name"] = message.get("model_name", None)
581581
retry_message["content"] = fail_reason
582582
## Use the original sender to send the retry message to yourself
@@ -603,7 +603,7 @@ async def a_retry_chat(
603603
"context": json.loads(last_message.context)
604604
if last_message.context
605605
else None,
606-
"current_gogal": last_message.current_gogal,
606+
"current_goal": last_message.current_goal,
607607
"review_info": json.loads(last_message.review_info)
608608
if last_message.review_info
609609
else None,

dbgpt/agent/agents/base_agent_new.py

+13-13
Original file line numberDiff line numberDiff line change
@@ -323,7 +323,7 @@ async def a_initiate_chat(
323323
await self.a_send(
324324
{
325325
"content": context["message"],
326-
"current_gogal": context["message"],
326+
"current_goal": context["message"],
327327
},
328328
recipient,
329329
reviewer,
@@ -352,7 +352,7 @@ async def _a_append_message(
352352
"context",
353353
"action_report",
354354
"review_info",
355-
"current_gogal",
355+
"current_goal",
356356
"model_name",
357357
)
358358
if k in message
@@ -364,7 +364,7 @@ async def _a_append_message(
364364
receiver=self.profile,
365365
role=role,
366366
rounds=self.consecutive_auto_reply_counter,
367-
current_gogal=oai_message.get("current_gogal", None),
367+
current_goal=oai_message.get("current_goal", None),
368368
content=oai_message.get("content", None),
369369
context=json.dumps(oai_message["context"], ensure_ascii=False)
370370
if "context" in oai_message
@@ -501,7 +501,7 @@ def _init_reply_message(self, recive_message):
501501
"""
502502
new_message = {}
503503
new_message["context"] = recive_message.get("context", None)
504-
new_message["current_gogal"] = recive_message.get("current_gogal", None)
504+
new_message["current_goal"] = recive_message.get("current_goal", None)
505505
return new_message
506506

507507
def _convert_to_ai_message(
@@ -544,19 +544,19 @@ def _load_thinking_messages(
544544
sender,
545545
rely_messages: Optional[List[Dict]] = None,
546546
) -> Optional[List[Dict]]:
547-
current_gogal = receive_message.get("current_gogal", None)
547+
current_goal = receive_message.get("current_goal", None)
548548

549549
### Convert and tailor the information in collective memory into contextual memory available to the current Agent
550-
current_gogal_messages = self._convert_to_ai_message(
550+
current_goal_messages = self._convert_to_ai_message(
551551
self.memory.message_memory.get_between_agents(
552-
self.agent_context.conv_id, self.profile, sender.profile, current_gogal
552+
self.agent_context.conv_id, self.profile, sender.profile, current_goal
553553
)
554554
)
555555

556556
# When there is no target and context, the current received message is used as the target problem
557-
if current_gogal_messages is None or len(current_gogal_messages) <= 0:
557+
if current_goal_messages is None or len(current_goal_messages) <= 0:
558558
receive_message["role"] = ModelMessageRoleType.HUMAN
559-
current_gogal_messages = [receive_message]
559+
current_goal_messages = [receive_message]
560560

561561
### relay messages
562562
cut_messages = []
@@ -572,14 +572,14 @@ def _load_thinking_messages(
572572
cut_messages.extend(rely_messages)
573573

574574
# TODO: allocate historical information based on token budget
575-
if len(current_gogal_messages) < 5:
576-
cut_messages.extend(current_gogal_messages)
575+
if len(current_goal_messages) < 5:
576+
cut_messages.extend(current_goal_messages)
577577
else:
578578
# For the time being, the smallest size of historical message records will be used by default.
579579
# Use the first two rounds of messages to understand the initial goals
580-
cut_messages.extend(current_gogal_messages[:2])
580+
cut_messages.extend(current_goal_messages[:2])
581581
# Use information from the last three rounds of communication to ensure that current thinking knows what happened and what to do in the last communication
582-
cut_messages.extend(current_gogal_messages[-3:])
582+
cut_messages.extend(current_goal_messages[-3:])
583583
return cut_messages
584584

585585
def _new_system_message(self, content):

dbgpt/agent/agents/expand/code_assistant_agent.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -42,7 +42,7 @@ def __init__(self, **kwargs):
4242
self._init_actions([CodeAction])
4343

4444
async def a_correctness_check(self, message: Optional[Dict]):
45-
task_gogal = message.get("current_gogal", None)
45+
task_gogal = message.get("current_goal", None)
4646
action_report = message.get("action_report", None)
4747
task_result = ""
4848
if action_report:

dbgpt/agent/agents/expand/retrieve_summary_assistant_agent.py

+3-3
Original file line numberDiff line numberDiff line change
@@ -196,7 +196,7 @@ async def a_generate_reply(
196196
## New message build
197197
new_message = {}
198198
new_message["context"] = current_messages[-1].get("context", None)
199-
new_message["current_gogal"] = current_messages[-1].get("current_gogal", None)
199+
new_message["current_goal"] = current_messages[-1].get("current_goal", None)
200200
new_message["role"] = "assistant"
201201
new_message["content"] = user_question
202202
new_message["model_name"] = model
@@ -206,7 +206,7 @@ async def a_generate_reply(
206206
## Summary message build
207207
summary_message = {}
208208
summary_message["context"] = message.get("context", None)
209-
summary_message["current_gogal"] = message.get("current_gogal", None)
209+
summary_message["current_goal"] = message.get("current_goal", None)
210210

211211
summaries = ""
212212
count = 0
@@ -262,7 +262,7 @@ async def a_generate_reply(
262262

263263
async def a_verify(self, message: Optional[Dict]):
264264
self.update_system_message(self.CHECK_RESULT_SYSTEM_MESSAGE)
265-
current_goal = message.get("current_gogal", None)
265+
current_goal = message.get("current_goal", None)
266266
action_report = message.get("action_report", None)
267267
task_result = ""
268268
if action_report:

dbgpt/agent/agents/expand/summary_assistant_agent.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -35,7 +35,7 @@ def __init__(self, **kwargs):
3535
self._init_actions([BlankAction])
3636

3737
# async def a_correctness_check(self, message: Optional[Dict]):
38-
# current_goal = message.get("current_gogal", None)
38+
# current_goal = message.get("current_goal", None)
3939
# action_report = message.get("action_report", None)
4040
# task_result = ""
4141
# if action_report:

dbgpt/agent/common/schema.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -43,7 +43,7 @@ class GptsMessage:
4343
role: str
4444
content: str
4545
rounds: Optional[int]
46-
current_gogal: str = None
46+
current_goal: str = None
4747
context: Optional[str] = None
4848
review_info: Optional[str] = None
4949
action_report: Optional[str] = None
@@ -61,7 +61,7 @@ def from_dict(d: Dict[str, Any]) -> GptsMessage:
6161
content=d["content"],
6262
rounds=d["rounds"],
6363
model_name=d["model_name"],
64-
current_gogal=d["current_gogal"],
64+
current_goal=d["current_goal"],
6565
context=d["context"],
6666
review_info=d["review_info"],
6767
action_report=d["action_report"],

dbgpt/agent/memory/base.py

+3-3
Original file line numberDiff line numberDiff line change
@@ -57,7 +57,7 @@ class GptsMessage:
5757
role: str
5858
content: str
5959
rounds: Optional[int]
60-
current_gogal: str = None
60+
current_goal: str = None
6161
context: Optional[str] = None
6262
review_info: Optional[str] = None
6363
action_report: Optional[str] = None
@@ -75,7 +75,7 @@ def from_dict(d: Dict[str, Any]) -> GptsMessage:
7575
content=d["content"],
7676
rounds=d["rounds"],
7777
model_name=d["model_name"],
78-
current_gogal=d["current_gogal"],
78+
current_goal=d["current_goal"],
7979
context=d["context"],
8080
review_info=d["review_info"],
8181
action_report=d["action_report"],
@@ -203,7 +203,7 @@ def get_between_agents(
203203
conv_id: str,
204204
agent1: str,
205205
agent2: str,
206-
current_gogal: Optional[str] = None,
206+
current_goal: Optional[str] = None,
207207
) -> Optional[List[GptsMessage]]:
208208
"""
209209
Query information related to an agent

dbgpt/agent/memory/default_gpts_memory.py

+3-3
Original file line numberDiff line numberDiff line change
@@ -100,11 +100,11 @@ def get_between_agents(
100100
conv_id: str,
101101
agent1: str,
102102
agent2: str,
103-
current_gogal: Optional[str] = None,
103+
current_goal: Optional[str] = None,
104104
) -> Optional[List[GptsMessage]]:
105-
if current_gogal:
105+
if current_goal:
106106
result = self.df.query(
107-
f"conv_id==@conv_id and ((sender==@agent1 and receiver==@agent2) or (sender==@agent2 and receiver==@agent1)) and current_gogal==@current_gogal"
107+
f"conv_id==@conv_id and ((sender==@agent1 and receiver==@agent2) or (sender==@agent2 and receiver==@agent1)) and current_goal==@current_goal"
108108
)
109109
else:
110110
result = self.df.query(

dbgpt/agent/memory/gpts_memory.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -58,7 +58,7 @@ async def one_chat_competions(self, conv_id: str):
5858
count = count + 1
5959
if count == 1:
6060
continue
61-
if not message.current_gogal or len(message.current_gogal) <= 0:
61+
if not message.current_goal or len(message.current_goal) <= 0:
6262
if len(temp_group) > 0:
6363
vis_items.append(await self._plan_vis_build(temp_group))
6464
temp_group.clear()
@@ -69,7 +69,7 @@ async def one_chat_competions(self, conv_id: str):
6969
vis_items.append(await self._messages_to_agents_vis(temp_messages))
7070
temp_messages.clear()
7171

72-
last_gogal = message.current_gogal
72+
last_gogal = message.current_goal
7373
temp_group[last_gogal].append(message)
7474

7575
if len(temp_group) > 0:

dbgpt/agent/memory/gpts_memory_storage.py

+6-6
Original file line numberDiff line numberDiff line change
@@ -184,7 +184,7 @@ class GptsMessageStorage(StorageItem):
184184
role: str
185185
content: str
186186
rounds: Optional[int]
187-
current_gogal: str = None
187+
current_goal: str = None
188188
context: Optional[str] = None
189189
review_info: Optional[str] = None
190190
action_report: Optional[str] = None
@@ -204,7 +204,7 @@ def from_dict(d: Dict[str, Any]):
204204
content=d["content"],
205205
rounds=d["rounds"],
206206
model_name=d["model_name"],
207-
current_gogal=d["current_gogal"],
207+
current_goal=d["current_goal"],
208208
context=d["context"],
209209
review_info=d["review_info"],
210210
action_report=d["action_report"],
@@ -239,7 +239,7 @@ def to_gpts_message(self) -> GptsMessage:
239239
role=self.role,
240240
content=self.content,
241241
rounds=self.rounds,
242-
current_gogal=self.current_gogal,
242+
current_goal=self.current_goal,
243243
context=self.context,
244244
review_info=self.review_info,
245245
action_report=self.action_report,
@@ -258,7 +258,7 @@ def from_gpts_message(gpts_message: GptsMessage) -> "StoragePromptTemplate":
258258
role=gpts_message.role,
259259
content=gpts_message.content,
260260
rounds=gpts_message.rounds,
261-
current_gogal=gpts_message.current_gogal,
261+
current_goal=gpts_message.current_goal,
262262
context=gpts_message.context,
263263
review_info=gpts_message.review_info,
264264
action_report=gpts_message.action_report,
@@ -344,9 +344,9 @@ def get_between_agents(
344344
conv_id: str,
345345
agent1: str,
346346
agent2: str,
347-
current_gogal: Optional[str] = None,
347+
current_goal: Optional[str] = None,
348348
) -> Optional[List[GptsMessage]]:
349-
return super().get_between_agents(conv_id, agent1, agent2, current_gogal)
349+
return super().get_between_agents(conv_id, agent1, agent2, current_goal)
350350

351351
def get_by_conv_id(self, conv_id: str) -> Optional[List[GptsMessage]]:
352352
return super().get_by_conv_id(conv_id)

dbgpt/app/scene/chat_agent/chat.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -40,7 +40,7 @@ def __init__(self, chat_param: Dict):
4040

4141
# load select plugin
4242
agent_module = CFG.SYSTEM_APP.get_component(
43-
ComponentType.AGENT_HUB, ModulePlugin
43+
ComponentType.PLUGIN_HUB, ModulePlugin
4444
)
4545
self.plugins_prompt_generator = agent_module.load_select_plugin(
4646
self.plugins_prompt_generator, self.select_plugins

dbgpt/serve/agent/agents/db_gpts_memory.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -94,10 +94,10 @@ def get_between_agents(
9494
conv_id: str,
9595
agent1: str,
9696
agent2: str,
97-
current_gogal: Optional[str] = None,
97+
current_goal: Optional[str] = None,
9898
) -> Optional[List[GptsMessage]]:
9999
db_results = self.gpts_message.get_between_agents(
100-
conv_id, agent1, agent2, current_gogal
100+
conv_id, agent1, agent2, current_goal
101101
)
102102
results = []
103103
db_results = sorted(db_results, key=lambda x: x.rounds)

0 commit comments

Comments
 (0)