Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

chore(build): Fix typo and new pre-commit config #987

Merged
merged 6 commits into from
Dec 28, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
23 changes: 23 additions & 0 deletions .pre-commit-config.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,23 @@
# Please run command `pre-commit install` to install pre-commit hook
repos:
- repo: local
hooks:
- id: python-fmt
name: Python Format
entry: make fmt
language: system
exclude: '^dbgpt/app/static/|^web/'
types: [python]
stages: [commit]
pass_filenames: false
args: []
- id: python-test
name: Python Unit Test
entry: make test
language: system
exclude: '^dbgpt/app/static/|^web/'
types: [python]
stages: [commit]
pass_filenames: false
args: []

2 changes: 1 addition & 1 deletion CONTRIBUTING.md
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@ pip install -e ".[default]"
```
4. Install pre-commit hooks
```
pre-commit install --allow-missing-config
pre-commit install
```
5. Create a new branch for your changes using the following command:

Expand Down
14 changes: 13 additions & 1 deletion Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -24,11 +24,23 @@ fmt: setup ## Format Python code
# TODO: Use isort to sort Python imports.
# https://github.com/PyCQA/isort
# $(VENV_BIN)/isort .
$(VENV_BIN)/isort dbgpt/agent/
$(VENV_BIN)/isort dbgpt/core/
$(VENV_BIN)/isort dbgpt/serve/core/
$(VENV_BIN)/isort dbgpt/serve/agent/
$(VENV_BIN)/isort dbgpt/serve/conversation/
$(VENV_BIN)/isort dbgpt/serve/utils/_template_files
$(VENV_BIN)/isort --extend-skip="examples/notebook" examples
# https://github.com/psf/black
$(VENV_BIN)/black .
$(VENV_BIN)/black --extend-exclude="examples/notebook" .
# TODO: Use blackdoc to format Python doctests.
# https://blackdoc.readthedocs.io/en/latest/
# $(VENV_BIN)/blackdoc .
$(VENV_BIN)/blackdoc dbgpt/core/
$(VENV_BIN)/blackdoc dbgpt/agent/
$(VENV_BIN)/blackdoc dbgpt/model/
$(VENV_BIN)/blackdoc dbgpt/serve/
$(VENV_BIN)/blackdoc examples
# TODO: Type checking of Python code.
# https://github.com/python/mypy
# $(VENV_BIN)/mypy dbgpt
Expand Down
33 changes: 17 additions & 16 deletions dbgpt/agent/agents/agent.py
Original file line number Diff line number Diff line change
@@ -1,18 +1,16 @@
from __future__ import annotations

import dataclasses
from collections import defaultdict
from dataclasses import asdict, dataclass, fields
from typing import Any, Dict, List, Optional, Union
from typing import Any, Dict, List, Optional, Tuple, Union

from ..memory.gpts_memory import GptsMemory
from dbgpt.core import LLMClient
from dbgpt.core.interface.llm import ModelMetadata

from ..memory.gpts_memory import GptsMemory


class Agent:
"""
An interface for AI agent.
"""An interface for AI agent.
An agent can communicate with other agents and perform actions.
"""

Expand All @@ -33,16 +31,16 @@ def __init__(
self._memory = memory

@property
def name(self):
def name(self) -> str:
"""Get the name of the agent."""
return self._name

@property
def memory(self):
def memory(self) -> GptsMemory:
return self._memory

@property
def describe(self):
def describe(self) -> str:
"""Get the name of the agent."""
return self._describe

Expand All @@ -53,7 +51,7 @@ async def a_send(
reviewer: Agent,
request_reply: Optional[bool] = True,
is_recovery: Optional[bool] = False,
):
) -> None:
"""(Abstract async method) Send a message to another agent."""

async def a_receive(
Expand All @@ -64,21 +62,24 @@ async def a_receive(
request_reply: Optional[bool] = None,
silent: Optional[bool] = False,
is_recovery: Optional[bool] = False,
):
) -> None:
"""(Abstract async method) Receive a message from another agent."""

async def a_review(self, message: Union[Dict, str], censored: Agent):
async def a_review(
self, message: Union[Dict, str], censored: Agent
) -> Tuple[bool, Any]:
"""

Args:
message:
censored:

Returns:

bool: whether the message is censored
Any: the censored message
"""

def reset(self):
def reset(self) -> None:
"""(Abstract method) Reset the agent."""

async def a_generate_reply(
Expand Down Expand Up @@ -145,7 +146,7 @@ async def a_verify_reply(
"""


@dataclass
@dataclasses.dataclass
class AgentResource:
type: str
name: str
Expand All @@ -165,7 +166,7 @@ def to_dict(self) -> Dict[str, Any]:
return dataclasses.asdict(self)


@dataclass
@dataclasses.dataclass
class AgentContext:
conv_id: str
llm_provider: LLMClient
Expand Down
2 changes: 1 addition & 1 deletion dbgpt/agent/agents/agents_mange.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
from collections import defaultdict
from typing import Any, Callable, Dict, List, Literal, Optional, Tuple, Type, Union
from typing import Optional, Type

from .agent import Agent
from .expand.code_assistant_agent import CodeAssistantAgent
Expand Down
28 changes: 10 additions & 18 deletions dbgpt/agent/agents/base_agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,22 +5,14 @@
from typing import Any, Callable, Dict, List, Optional, Type, Union

from dbgpt.agent.agents.llm.llm_client import AIWrapper
from dbgpt.core.awel import BaseOperator
from dbgpt.core.interface.message import ModelMessageRoleType
from dbgpt.util.error_types import LLMChatError
from dbgpt.util.utils import colored

from ..memory.base import GptsMessage
from ..memory.gpts_memory import GptsMemory
from .agent import Agent, AgentContext

try:
from termcolor import colored
except ImportError:

def colored(x, *args, **kwargs):
return x


logger = logging.getLogger(__name__)


Expand Down Expand Up @@ -249,13 +241,13 @@ def append_message(self, message: Optional[Dict], role, sender: Agent) -> bool:
rounds=self.consecutive_auto_reply_counter,
current_gogal=oai_message.get("current_gogal", None),
content=oai_message.get("content", None),
context=json.dumps(oai_message["context"])
context=json.dumps(oai_message["context"], ensure_ascii=False)
if "context" in oai_message
else None,
review_info=json.dumps(oai_message["review_info"])
review_info=json.dumps(oai_message["review_info"], ensure_ascii=False)
if "review_info" in oai_message
else None,
action_report=json.dumps(oai_message["action_report"])
action_report=json.dumps(oai_message["action_report"], ensure_ascii=False)
if "action_report" in oai_message
else None,
model_name=oai_message.get("model_name", None),
Expand Down Expand Up @@ -300,7 +292,7 @@ def _print_received_message(self, message: Union[Dict, str], sender: Agent):
print(message["content"], flush=True)
print(colored("*" * len(func_print), "green"), flush=True)
else:
content = json.dumps(message.get("content"))
content = json.dumps(message.get("content"), ensure_ascii=False)
if content is not None:
if "context" in message:
content = AIWrapper.instantiate(
Expand Down Expand Up @@ -360,7 +352,7 @@ def _gpts_message_to_ai_message(
self, gpts_messages: Optional[List[GptsMessage]]
) -> List[Dict]:
oai_messages: List[Dict] = []
###Based on the current agent, all messages received are user, and all messages sent are assistant.
# Based on the current agent, all messages received are user, and all messages sent are assistant.
for item in gpts_messages:
role = ""
if item.role:
Expand Down Expand Up @@ -390,21 +382,21 @@ def _gpts_message_to_ai_message(
return oai_messages

def process_now_message(self, sender, current_gogal: Optional[str] = None):
### Convert and tailor the information in collective memory into contextual memory available to the current Agent
# Convert and tailor the information in collective memory into contextual memory available to the current Agent
current_gogal_messages = self._gpts_message_to_ai_message(
self.memory.message_memory.get_between_agents(
self.agent_context.conv_id, self.name, sender.name, current_gogal
)
)

### relay messages
# relay messages
cut_messages = []
cut_messages.extend(self._rely_messages)

if len(current_gogal_messages) < self.dialogue_memory_rounds:
cut_messages.extend(current_gogal_messages)
else:
### TODO 基于token预算来分配历史信息
# TODO: allocate historical information based on token budget
cut_messages.extend(current_gogal_messages[:2])
# end_round = self.dialogue_memory_rounds - 2
cut_messages.extend(current_gogal_messages[-3:])
Expand Down Expand Up @@ -488,7 +480,7 @@ async def a_receive(
logger.info(
"The generated answer failed to verify, so send it to yourself for optimization."
)
### TODO 自优化最大轮次后,异常退出
# TODO: Exit after the maximum number of rounds of self-optimization
await sender.a_send(
message=reply, recipient=self, reviewer=reviewer, silent=silent
)
Expand Down
24 changes: 9 additions & 15 deletions dbgpt/agent/agents/expand/code_assistant_agent.py
Original file line number Diff line number Diff line change
@@ -1,21 +1,14 @@
import json
from typing import Any, Callable, Dict, List, Literal, Optional, Tuple, Type, Union
from typing import Callable, Dict, Literal, Optional, Union

from dbgpt.core.awel import BaseOperator
from dbgpt.core.interface.message import ModelMessageRoleType
from dbgpt.util.code_utils import UNKNOWN, execute_code, extract_code, infer_lang
from dbgpt.util.string_utils import str_to_bool
from dbgpt.util.utils import colored

from ...memory.gpts_memory import GptsMemory
from ..agent import Agent, AgentContext
from ..base_agent import ConversableAgent
from dbgpt.core.interface.message import ModelMessageRoleType

try:
from termcolor import colored
except ImportError:

def colored(x, *args, **kwargs):
return x


class CodeAssistantAgent(ConversableAgent):
Expand Down Expand Up @@ -109,7 +102,7 @@ def _vis_code_idea(self, code, exit_success, log, language):
param["code"] = code
param["log"] = log

return f"```vis-code\n{json.dumps(param)}\n```"
return f"```vis-code\n{json.dumps(param, ensure_ascii=False)}\n```"

async def generate_code_execution_reply(
self,
Expand Down Expand Up @@ -171,7 +164,7 @@ async def a_verify(self, message: Optional[Dict]):
if action_report:
task_result = action_report.get("content", "")

check_reult, model = await self.a_reasoning_reply(
check_result, model = await self.a_reasoning_reply(
[
{
"role": ModelMessageRoleType.HUMAN,
Expand All @@ -183,11 +176,11 @@ async def a_verify(self, message: Optional[Dict]):
}
]
)
sucess = str_to_bool(check_reult)
success = str_to_bool(check_result)
fail_reason = None
if sucess == False:
if not success:
fail_reason = "The execution result of the code you wrote is judged as not answering the task question. Please re-understand and complete the task."
return sucess, fail_reason
return success, fail_reason

@property
def use_docker(self) -> Union[bool, str, None]:
Expand Down Expand Up @@ -219,6 +212,7 @@ def run_code(self, code, **kwargs):
def execute_code_blocks(self, code_blocks):
"""Execute the code blocks and return the result."""
logs_all = ""
exitcode = -1
for i, code_block in enumerate(code_blocks):
lang, code = code_block
if not lang:
Expand Down
29 changes: 10 additions & 19 deletions dbgpt/agent/agents/expand/dashboard_assistant_agent.py
Original file line number Diff line number Diff line change
@@ -1,24 +1,15 @@
import json
from typing import Any, Callable, Dict, List, Literal, Optional, Tuple, Type, Union
from typing import Callable, Dict, Literal, Optional, Union

from dbgpt._private.config import Config
from dbgpt.agent.plugin.commands.command_mange import ApiCall
from dbgpt.util.json_utils import find_json_objects

from ...memory.gpts_memory import GptsMemory
from ..agent import Agent, AgentContext
from ..base_agent import ConversableAgent

try:
from termcolor import colored
except ImportError:

def colored(x, *args, **kwargs):
return x


from dbgpt._private.config import Config
from dbgpt.core.awel import BaseOperator

# TODO: remove global config
CFG = Config()


Expand Down Expand Up @@ -93,29 +84,29 @@ async def generate_dashboard_reply(
"Please recheck your answer,no usable plans generated in correct format,"
)
json_count = len(json_objects)
rensponse_succ = True
response_success = True
view = None
content = None
if json_count != 1:
### Answer failed, turn on automatic repair
# Answer failed, turn on automatic repair
fail_reason += f"There are currently {json_count} json contents"
rensponse_succ = False
response_success = False
else:
try:
chart_objs = json_objects[0]
content = json.dumps(chart_objs)
content = json.dumps(chart_objs, ensure_ascii=False)
vis_client = ApiCall()
view = vis_client.display_dashboard_vis(
chart_objs, self.db_connect.run_to_df
)
except Exception as e:
fail_reason += f"Return json structure error and cannot be converted to a sql-rendered chart,{str(e)}"
rensponse_succ = False
response_success = False

if not rensponse_succ:
if not response_success:
content = fail_reason
return True, {
"is_exe_success": rensponse_succ,
"is_exe_success": response_success,
"content": content,
"view": view,
}
Loading