diff --git a/agentverse/agents/base.py b/agentverse/agents/base.py index 4f118e08c..1cc2cbb49 100644 --- a/agentverse/agents/base.py +++ b/agentverse/agents/base.py @@ -4,8 +4,9 @@ from string import Template from pydantic import BaseModel, Field - from agentverse.llms import BaseLLM + +from agentverse.llms.utils import count_string_tokens from agentverse.memory import BaseMemory, ChatHistoryMemory from agentverse.message import Message from agentverse.output_parser import OutputParser @@ -62,7 +63,20 @@ def get_all_prompts(self, **kwargs): **kwargs ) append_prompt = Template(self.append_prompt_template).safe_substitute(**kwargs) - return prepend_prompt, append_prompt + + # TODO: self.llm.args.model is not generalizable + num_prepend_prompt_token = count_string_tokens( + prepend_prompt, self.llm.args.model + ) + num_append_prompt_token = count_string_tokens( + append_prompt, self.llm.args.model + ) + + return ( + prepend_prompt, + append_prompt, + num_prepend_prompt_token + num_append_prompt_token, + ) def get_receiver(self) -> Set[str]: return self.receiver diff --git a/agentverse/agents/tasksolving_agent/critic.py b/agentverse/agents/tasksolving_agent/critic.py index 6cbe46148..5133e82e8 100644 --- a/agentverse/agents/tasksolving_agent/critic.py +++ b/agentverse/agents/tasksolving_agent/critic.py @@ -62,7 +62,7 @@ async def astep( ) -> CriticMessage: """Asynchronous version of step""" logger.debug("", self.name, Fore.MAGENTA) - prepend_prompt, append_prompt = self.get_all_prompts( + prepend_prompt, append_prompt, prompt_token = self.get_all_prompts( preliminary_solution=preliminary_solution, advice=advice, task_description=task_description, @@ -72,7 +72,16 @@ async def astep( # tool_names=self.tool_names, tool_descriptions=self.tool_descriptions, ) - history = self.memory.to_messages(self.name, start_index=-self.max_history) + + max_send_token = self.llm.send_token_limit(self.llm.args.model) + max_send_token -= prompt_token + + history = await self.memory.to_messages( + self.name, + start_index=-self.max_history, + max_send_token=max_send_token, + model=self.llm.args.model, + ) parsed_response: Union[AgentCriticism, None] = None for i in range(self.max_retry): try: diff --git a/agentverse/agents/tasksolving_agent/evaluator.py b/agentverse/agents/tasksolving_agent/evaluator.py index abd40642b..a97455b0a 100644 --- a/agentverse/agents/tasksolving_agent/evaluator.py +++ b/agentverse/agents/tasksolving_agent/evaluator.py @@ -19,6 +19,8 @@ @agent_registry.register("evaluator") class EvaluatorAgent(BaseAgent): + max_history: int = 5 + def step( self, solution: str, @@ -26,18 +28,38 @@ def step( task_description: str, all_role_description: str, ) -> EvaluatorMessage: + pass + # return parsed_response + + async def astep( + self, + solution: str, + result: str, + task_description: str, + all_role_description: str, + ) -> EvaluatorMessage: + """Asynchronous version of step""" logger.debug("", self.name, Fore.MAGENTA) - prepend_prompt, append_prompt = self.get_all_prompts( + prepend_prompt, append_prompt, prompt_token = self.get_all_prompts( solution=solution, result=result, task_description=task_description, all_role_description=all_role_description, ) - history = self.memory.to_messages(self.name) + + max_send_token = self.llm.send_token_limit(self.llm.args.model) + max_send_token -= prompt_token + + history = await self.memory.to_messages( + self.name, + start_index=-self.max_history, + max_send_token=max_send_token, + model=self.llm.args.model, + ) parsed_response = None for i in range(self.max_retry): try: - response = self.llm.generate_response( + response = await self.llm.agenerate_response( prepend_prompt, history, append_prompt ) parsed_response = self.output_parser.parse(response) @@ -58,11 +80,6 @@ def step( advice=parsed_response[1] if parsed_response is not None else "", ) return message - # return parsed_response - - async def astep(self, solution: str) -> EvaluatorMessage: - """Asynchronous version of step""" - pass def _fill_prompt_template(self, solution: str, task_description: str) -> str: """Fill the placeholders in the prompt template diff --git a/agentverse/agents/tasksolving_agent/executor.py b/agentverse/agents/tasksolving_agent/executor.py index 38294453d..50bd237df 100644 --- a/agentverse/agents/tasksolving_agent/executor.py +++ b/agentverse/agents/tasksolving_agent/executor.py @@ -23,65 +23,28 @@ class ExecutorAgent(BaseAgent): def step( self, task_description: str, solution: str, tools: List[dict] = [], **kwargs ) -> ExecutorMessage: - logger.debug("", self.name, Fore.MAGENTA) - prepend_prompt, append_prompt = self.get_all_prompts( - task_description=task_description, - solution=solution, - agent_name=self.name, - **kwargs, - ) - - history = self.memory.to_messages(self.name, start_index=-self.max_history) - parsed_response = None - for i in range(self.max_retry): - try: - response = self.llm.generate_response( - prepend_prompt, history, append_prompt, tools - ) - parsed_response = self.output_parser.parse(response) - break - except (KeyboardInterrupt, bdb.BdbQuit): - raise - except Exception as e: - logger.error(e) - logger.warn("Retrying...") - continue - - if parsed_response is None: - logger.error(f"{self.name} failed to generate valid response.") - if isinstance(parsed_response, AgentFinish): - message = ExecutorMessage( - content=parsed_response.return_values["output"], - sender=self.name, - sender_agent=self, - ) - elif isinstance(parsed_response, AgentAction): - message = ExecutorMessage( - content=parsed_response.log, - sender=self.name, - sender_agent=self, - tool_name=parsed_response.tool, - tool_input=parsed_response.tool_input, - ) - else: - raise ValueError( - f"Error response type: {type(parsed_response)}. Only support \ - AgentFinish and AgentAction. Modify your output parser." - ) - return message + pass async def astep( self, task_description: str, solution: str, tools: List[dict] = [], **kwargs ) -> ExecutorMessage: logger.debug("", self.name, Fore.MAGENTA) - prepend_prompt, append_prompt = self.get_all_prompts( + prepend_prompt, append_prompt, prompt_token = self.get_all_prompts( task_description=task_description, solution=solution, agent_name=self.name, **kwargs, ) - history = self.memory.to_messages(self.name, start_index=-self.max_history) + max_send_token = self.llm.send_token_limit(self.llm.args.model) + max_send_token -= prompt_token + + history = await self.memory.to_messages( + self.name, + start_index=-self.max_history, + max_send_token=max_send_token, + model=self.llm.args.model, + ) parsed_response = None for i in range(self.max_retry): try: diff --git a/agentverse/agents/tasksolving_agent/role_assigner.py b/agentverse/agents/tasksolving_agent/role_assigner.py index 9b93833e6..ffec70ef7 100644 --- a/agentverse/agents/tasksolving_agent/role_assigner.py +++ b/agentverse/agents/tasksolving_agent/role_assigner.py @@ -19,20 +19,37 @@ @agent_registry.register("role_assigner") class RoleAssignerAgent(BaseAgent): + max_history: int = 5 + def step( self, advice: str, task_description: str, cnt_critic_agents: int ) -> RoleAssignerMessage: + pass + + async def astep( + self, advice: str, task_description: str, cnt_critic_agents: int + ) -> RoleAssignerMessage: + """Asynchronous version of step""" logger.debug("", self.name, Fore.MAGENTA) - prepend_prompt, append_prompt = self.get_all_prompts( + prepend_prompt, append_prompt, prompt_token = self.get_all_prompts( advice=advice, task_description=task_description, cnt_critic_agents=cnt_critic_agents, ) - history = self.memory.to_messages(self.name) + + max_send_token = self.llm.send_token_limit(self.llm.args.model) + max_send_token -= prompt_token + + history = await self.memory.to_messages( + self.name, + start_index=-self.max_history, + max_send_token=max_send_token, + model=self.llm.args.model, + ) parsed_response = None for i in range(self.max_retry): try: - response = self.llm.generate_response( + response = await self.llm.agenerate_response( prepend_prompt, history, append_prompt ) parsed_response = self.output_parser.parse(response) @@ -58,10 +75,6 @@ def step( ) return message - async def astep(self, env_description: str = "") -> RoleAssignerMessage: - """Asynchronous version of step""" - pass - def _fill_prompt_template( self, advice, task_description: str, cnt_critic_agents: int ) -> str: diff --git a/agentverse/agents/tasksolving_agent/solver.py b/agentverse/agents/tasksolving_agent/solver.py index 6db77561d..42ec5ba6a 100644 --- a/agentverse/agents/tasksolving_agent/solver.py +++ b/agentverse/agents/tasksolving_agent/solver.py @@ -15,33 +15,47 @@ from agentverse.agents.base import BaseAgent from agentverse.utils import AgentCriticism - logger = get_logger() @agent_registry.register("solver") class SolverAgent(BaseAgent): - max_history: int = 3 + max_history: int = 5 def step( self, former_solution: str, advice: str, task_description: str = "", **kwargs ) -> SolverMessage: + pass + + async def astep( + self, former_solution: str, advice: str, task_description: str = "", **kwargs + ) -> SolverMessage: + """Asynchronous version of step""" logger.debug("", self.name, Fore.MAGENTA) # prompt = self._fill_prompt_template( # former_solution, critic_opinions, advice, task_description # ) - prepend_prompt, append_prompt = self.get_all_prompts( + prepend_prompt, append_prompt, prompt_token = self.get_all_prompts( former_solution=former_solution, task_description=task_description, advice=advice, role_description=self.role_description, **kwargs, ) - history = self.memory.to_messages(self.name, start_index=-self.max_history) + + max_send_token = self.llm.send_token_limit(self.llm.args.model) + max_send_token -= prompt_token + + history = await self.memory.to_messages( + self.name, + start_index=-self.max_history, + max_send_token=max_send_token, + model=self.llm.args.model, + ) parsed_response = None for i in range(self.max_retry): try: - response = self.llm.generate_response( + response = await self.llm.agenerate_response( prepend_prompt, history, append_prompt ) parsed_response = self.output_parser.parse(response) @@ -65,10 +79,6 @@ def step( ) return message - async def astep(self, env_description: str = "") -> SolverMessage: - """Asynchronous version of step""" - pass - def _fill_prompt_template( self, former_solution: str, diff --git a/agentverse/environments/tasksolving_env/basic.py b/agentverse/environments/tasksolving_env/basic.py index 8e4631a24..457430311 100644 --- a/agentverse/environments/tasksolving_env/basic.py +++ b/agentverse/environments/tasksolving_env/basic.py @@ -51,7 +51,7 @@ async def step( logger.info(f"Loop Round {self.cnt_turn}") # ================== EXPERT RECRUITMENT ================== - agents = self.rule.role_assign( + agents = await self.rule.role_assign( self.task_description, self.agents, self.cnt_turn, advice ) description = "\n".join([agent.role_description for agent in agents]) @@ -79,7 +79,7 @@ async def step( # ================== EXECUTION ================== # ================== EVALUATION ================== - score, advice = self.rule.evaluate( + score, advice = await self.rule.evaluate( self.task_description, self.agents, plan, result ) logs.append( diff --git a/agentverse/environments/tasksolving_env/rules/base.py b/agentverse/environments/tasksolving_env/rules/base.py index 2e25c017c..a1fcb0dc9 100644 --- a/agentverse/environments/tasksolving_env/rules/base.py +++ b/agentverse/environments/tasksolving_env/rules/base.py @@ -68,7 +68,7 @@ def build_components(config: Dict, registry): **kwargs, ) - def role_assign( + async def role_assign( self, task_description: str, agents: List[BaseAgent], @@ -79,7 +79,7 @@ def role_assign( if self.role_assign_only_once and cnt_turn > 0: agents = [agents[AGENT_TYPES.SOLVER]] + agents[AGENT_TYPES.CRITIC] else: - agents = self.role_assigner.step( + agents = await self.role_assigner.astep( role_assigner=agents[AGENT_TYPES.ROLE_ASSIGNMENT], group_members=[agents[AGENT_TYPES.SOLVER]] + agents[AGENT_TYPES.CRITIC], advice=advice, @@ -137,7 +137,7 @@ async def execute( agents[AGENT_TYPES.SOLVER].add_message_to_memory(results) return results - def evaluate( + async def evaluate( self, task_description: str, agents: List[BaseAgent], @@ -162,7 +162,7 @@ def evaluate( # logger.error("Bad response from human evaluator!") # return ([comprehensiveness, detailedness, feasibility, novelty], advice) # else: - evaluation = self.evaluator.step( + evaluation = await self.evaluator.astep( agent=agents[AGENT_TYPES.EVALUATION], solution=solution, result=result, diff --git a/agentverse/environments/tasksolving_env/rules/decision_maker/brainstorming.py b/agentverse/environments/tasksolving_env/rules/decision_maker/brainstorming.py index a6db1a5f6..39bd35ae9 100644 --- a/agentverse/environments/tasksolving_env/rules/decision_maker/brainstorming.py +++ b/agentverse/environments/tasksolving_env/rules/decision_maker/brainstorming.py @@ -53,7 +53,7 @@ async def astep( Fore.YELLOW, ) - result = agents[0].step(previous_plan, advice, task_description) + result = await agents[0].astep(previous_plan, advice, task_description) for agent in agents: agent.memory.reset() self.broadcast_messages( diff --git a/agentverse/environments/tasksolving_env/rules/decision_maker/central.py b/agentverse/environments/tasksolving_env/rules/decision_maker/central.py index 5d7bf5702..1d682e3da 100644 --- a/agentverse/environments/tasksolving_env/rules/decision_maker/central.py +++ b/agentverse/environments/tasksolving_env/rules/decision_maker/central.py @@ -47,7 +47,7 @@ async def astep( ), ) agents[1].add_message_to_memory([result]) - result = agents[0].step( + result = await agents[0].astep( previous_plan, advice, task_description, chat_record=result.content ) return [result] diff --git a/agentverse/environments/tasksolving_env/rules/decision_maker/concurrent.py b/agentverse/environments/tasksolving_env/rules/decision_maker/concurrent.py index cc34e00b7..f3979b851 100644 --- a/agentverse/environments/tasksolving_env/rules/decision_maker/concurrent.py +++ b/agentverse/environments/tasksolving_env/rules/decision_maker/concurrent.py @@ -59,7 +59,7 @@ async def astep( last_reviews = nonempty_reviews agents[0].add_message_to_memory(last_reviews) - result = agents[0].step(previous_plan, advice, task_description) + result = await agents[0].astep(previous_plan, advice, task_description) # agents[0].add_message_to_memory([result]) self.broadcast_messages(agents, [result]) return [result] diff --git a/agentverse/environments/tasksolving_env/rules/decision_maker/dynamic.py b/agentverse/environments/tasksolving_env/rules/decision_maker/dynamic.py index d6b6d72fe..c1fccf923 100644 --- a/agentverse/environments/tasksolving_env/rules/decision_maker/dynamic.py +++ b/agentverse/environments/tasksolving_env/rules/decision_maker/dynamic.py @@ -56,7 +56,7 @@ async def astep( # Fore.YELLOW, # ) - previous_sentence = manager.step( + previous_sentence = await manager.astep( previous_plan, review, advice, task_description, previous_sentence ) reviews.append(previous_sentence) @@ -76,7 +76,7 @@ async def astep( nonempty_reviews.append(review) agents[0].add_message_to_memory(nonempty_reviews) - result = agents[0].step(previous_plan, advice, task_description) + result = await agents[0].astep(previous_plan, advice, task_description) return [result] diff --git a/agentverse/environments/tasksolving_env/rules/decision_maker/horizontal.py b/agentverse/environments/tasksolving_env/rules/decision_maker/horizontal.py index b2a8c5703..ef9f43108 100644 --- a/agentverse/environments/tasksolving_env/rules/decision_maker/horizontal.py +++ b/agentverse/environments/tasksolving_env/rules/decision_maker/horizontal.py @@ -50,7 +50,7 @@ async def astep( Fore.YELLOW, ) - result = agents[0].step(previous_plan, advice, task_description) + result = await agents[0].astep(previous_plan, advice, task_description) return [result] def reset(self): diff --git a/agentverse/environments/tasksolving_env/rules/decision_maker/horizontal_tool.py b/agentverse/environments/tasksolving_env/rules/decision_maker/horizontal_tool.py index 5cea85eab..b30e88046 100644 --- a/agentverse/environments/tasksolving_env/rules/decision_maker/horizontal_tool.py +++ b/agentverse/environments/tasksolving_env/rules/decision_maker/horizontal_tool.py @@ -77,7 +77,9 @@ async def astep( if end_flag: break - result: SolverMessage = agents[0].step(previous_plan, advice, task_description) + result: SolverMessage = await agents[0].astep( + previous_plan, advice, task_description + ) result_list = [] for res in result.content: res_tmp = deepcopy(result) diff --git a/agentverse/environments/tasksolving_env/rules/decision_maker/vertical.py b/agentverse/environments/tasksolving_env/rules/decision_maker/vertical.py index d8adf594d..0bcc4688f 100644 --- a/agentverse/environments/tasksolving_env/rules/decision_maker/vertical.py +++ b/agentverse/environments/tasksolving_env/rules/decision_maker/vertical.py @@ -50,7 +50,7 @@ async def astep( if not review.is_agree and review.content != "": nonempty_reviews.append(review) agents[0].add_message_to_memory(nonempty_reviews) - result = agents[0].step(previous_plan, advice, task_description) + result = await agents[0].astep(previous_plan, advice, task_description) agents[0].add_message_to_memory([result]) return [result] diff --git a/agentverse/environments/tasksolving_env/rules/decision_maker/vertical_solver_first.py b/agentverse/environments/tasksolving_env/rules/decision_maker/vertical_solver_first.py index 97114f455..c3c38af8e 100644 --- a/agentverse/environments/tasksolving_env/rules/decision_maker/vertical_solver_first.py +++ b/agentverse/environments/tasksolving_env/rules/decision_maker/vertical_solver_first.py @@ -38,7 +38,7 @@ async def astep( self.broadcast_messages( agents, [Message(content=advice, sender="Evaluator")] ) - previous_plan = agents[0].step(previous_plan, advice, task_description) + previous_plan = await agents[0].astep(previous_plan, advice, task_description) self.broadcast_messages(agents, [previous_plan]) logger.info("", f"Initial Plan:\n{previous_plan.content}", Fore.BLUE) for i in range(self.max_inner_turns): @@ -65,7 +65,7 @@ async def astep( logger.info("", "Consensus Reached!.", Fore.GREEN) break self.broadcast_messages(agents, nonempty_reviews) - previous_plan = agents[0].step(previous_plan, advice, task_description) + previous_plan = await agents[0].astep(previous_plan, advice, task_description) logger.info("", f"Updated Plan:\n{previous_plan.content}", Fore.BLUE) self.broadcast_messages(agents, [previous_plan]) result = previous_plan diff --git a/agentverse/environments/tasksolving_env/rules/evaluator/base.py b/agentverse/environments/tasksolving_env/rules/evaluator/base.py index f3d72ad98..96a00ff59 100644 --- a/agentverse/environments/tasksolving_env/rules/evaluator/base.py +++ b/agentverse/environments/tasksolving_env/rules/evaluator/base.py @@ -20,7 +20,7 @@ class BaseEvaluator(BaseModel): """ @abstractmethod - def step( + async def astep( self, agent: EvaluatorAgent, solution: List[SolverMessage], @@ -38,7 +38,7 @@ def reset(self): @evaluator_registry.register("none") class NoneEvaluator(BaseEvaluator): - def step( + async def astep( self, agent: EvaluatorAgent, solution: List[SolverMessage], @@ -56,7 +56,7 @@ def step( @evaluator_registry.register("dummy") class DummyEvaluator(BaseEvaluator): - def step( + async def astep( self, agent: EvaluatorAgent, solution: List[SolverMessage], @@ -72,7 +72,7 @@ def step( @evaluator_registry.register("dummy") class DummyEvaluator(BaseEvaluator): - def step( + async def astep( self, agent: EvaluatorAgent, solution: List[str] | str, diff --git a/agentverse/environments/tasksolving_env/rules/evaluator/basic.py b/agentverse/environments/tasksolving_env/rules/evaluator/basic.py index a7738fe21..847234f03 100644 --- a/agentverse/environments/tasksolving_env/rules/evaluator/basic.py +++ b/agentverse/environments/tasksolving_env/rules/evaluator/basic.py @@ -14,7 +14,7 @@ class BasicEvaluator(BaseEvaluator): cnt_agents: int = 0 - def step( + async def astep( self, agent: EvaluatorAgent, solution: List[SolverMessage], @@ -27,7 +27,7 @@ def step( flatten_solution = "\n".join([s.content for s in solution]) flatten_result = "\n".join([r.content for r in result]) flatten_all_role_description = "\n".join(all_role_description) - evaluation = agent.step( + evaluation = await agent.astep( flatten_solution, flatten_result, task_description, @@ -40,7 +40,7 @@ def step( class BasicEvaluator(BaseEvaluator): cnt_agents: int = 0 - def step( + async def astep( self, agent: EvaluatorAgent, solution: List[SolverMessage], @@ -54,7 +54,7 @@ def step( flatten_result = "\n".join([r.content for r in result]) flatten_all_role_description = "\n".join(all_role_description) agent.add_message_to_memory(result) - evaluation = agent.step( + evaluation = await agent.astep( flatten_solution, flatten_result, task_description, diff --git a/agentverse/environments/tasksolving_env/rules/executor/code_test.py b/agentverse/environments/tasksolving_env/rules/executor/code_test.py index 121aabc67..1a60b720e 100644 --- a/agentverse/environments/tasksolving_env/rules/executor/code_test.py +++ b/agentverse/environments/tasksolving_env/rules/executor/code_test.py @@ -71,35 +71,7 @@ def step( *args, **kwargs, ) -> Any: - solution = solution[0].content - os.makedirs("tmp", exist_ok=True) - self.write_to_file("tmp/main.py", solution) - manager = multiprocessing.Manager() - result = manager.list() - if task_description not in self.has_test: - response = agent.step(task_description, solution).content - self.write_to_file(response["file_path"], response["code"]) - self.has_test[task_description] = f"python {response['file_path']}" - p = multiprocessing.Process( - target=execute_command, args=(f"python {response['file_path']}", result) - ) - p.start() - p.join(timeout=self.timeout + 1) - if p.is_alive(): - p.kill() - # result = execute_command(f"python {response['file_path']}") - else: - # result = execute_command(self.has_test[task_description]) - p = multiprocessing.Process( - target=execute_command, args=(self.has_test[task_description], result) - ) - p.start() - p.join(timeout=self.timeout + 1) - if p.is_alive(): - p.kill() - if not result: - result.append("Execution timed out.") - return [ExecutorMessage(content=result[0], sender="Code Tester")] + pass def write_to_file(self, file_name, file_content): # TODO: generalize this method to a common tool diff --git a/agentverse/environments/tasksolving_env/rules/executor/tool_using.py b/agentverse/environments/tasksolving_env/rules/executor/tool_using.py index 65bc8ff24..9ae70a78d 100644 --- a/agentverse/environments/tasksolving_env/rules/executor/tool_using.py +++ b/agentverse/environments/tasksolving_env/rules/executor/tool_using.py @@ -13,7 +13,7 @@ from . import BaseExecutor, executor_registry import asyncio - +from agentverse.llms.utils.jsonrepair import JsonRepair url = "http://127.0.0.1:8080" @@ -85,7 +85,6 @@ async def astep( self.agent_names.append(name) plan_this_turn[name] = plans[i].content.split("-")[1].strip() agent_name_this_turn.append(name) - # agents = [deepcopy(agent) for _ in range(len(plans))] if self.tool_retrieval: # We retrieve 5 related tools for each agent @@ -109,7 +108,6 @@ async def astep( # Record the indices of agents that have finished their tasks # so that they will not be called again finished_agent_names = set() - # result = ["" for _ in range(len(plan_this_turn))] result = {name: "" for name in agent_name_this_turn} for current_turn in range(self.max_tool_call_times): if len(finished_agent_names) == len(agent_name_this_turn): @@ -224,10 +222,59 @@ async def _summarize_webpage(webpage, question): response = await openai.ChatCompletion.acreate( messages=[{"role": "user", "content": summarize_prompt}], model="gpt-3.5-turbo-16k", + functions=[ + { + "name": "parse_web_text", + "description": "Parse the text of the webpage based on tthe question. Extract all related infomation about `Question` from the webpage. ! Don't provide information that is not shown in the webpage! ! Don't provide your own opinion!", + "parameters": { + "type": "object", + "properties": { + "summary": { + "type": "string", + "description": "Summary of the webpage with 50 words. Make sure all important information about `Question` is included. ! Don't provide information that is not shown in the webpage! ! Don't provide your own opinion!", + }, + "related_details": { + "type": "string", + "description": "List all webpage details related to the question. Maximum 400 words. ! Don't provide information that is not shown in the webpage! ! Don't provide your own opinion!", + }, + "useful_hyperlinks": { + "type": "array", + "description": "Maximum 3 items. Select useful hyperlinks in the webpage that related to the question. Make sure the url is useful for further browse. Don't provide repeated hyperlinks.", + "items": { + "type": "string", + "description": "! Don't provide hyperlinks that is not shown in the webpage! ! Don't provide your own opinion!", + }, + }, + }, + "required": [ + "summary", + "related_details", + "useful_hyperlinks", + ], + }, + } + ], + function_call={"name": "parse_web_text"}, ) - except: + except Exception as e: + logger.error("Failed to call the tool. Exception: " + str(e)) continue - return response["choices"][0]["message"]["content"] + arguments = ast.literal_eval( + JsonRepair( + response["choices"][0]["message"]["function_call"]["arguments"] + ).repair() + ) + ret = ( + "summary: " + + arguments["summary"] + + "\nrelated_details: " + + arguments["related_details"] + + "\nuseful_hyperlinks: [" + + ",".join(arguments["useful_hyperlinks"]) + + "]\n" + ) + + return ret if command == "submit_task": return { @@ -251,7 +298,6 @@ async def _summarize_webpage(webpage, question): "is_finish": False, "cookies": cookies, } - for i in range(3): try: async with ClientSession(cookies=cookies, trust_env=True) as session: @@ -277,20 +323,38 @@ async def _summarize_webpage(webpage, question): async with session.post( f"{url}/execute_tool", json=payload, - headers={ - "toolbench_key": "p5ZASSLBO0EknAQLE5ecNZ7kq5i1YfY9eoWUXNxL3TM6lXwdXs" - }, timeout=30, ) as response: content = await response.text() if command == "WebEnv_browse_website": openai.aiosession.set(session) - content = await _summarize_webpage( - content, arguments["question"] + result = await _summarize_webpage( + content, arguments["goals_to_browse"] ) + elif command == "WebEnv_search_and_browse": + openai.aiosession.set(session) + content = json.loads(content) + # for i in range(len(content)): + summarized = await asyncio.gather( + *[ + _summarize_webpage( + content[i]["page"], arguments["goals_to_browse"] + ) + for i in range(len(content)) + ] + ) + for i in range(len(content)): + content[i]["page"] = summarized[i] + result = "" + for i in range(len(content)): + result += f"SEARCH_REASULT {i}:\n" + result += content[i]["page"].strip() + "\n\n" + result = result.strip() + else: + result = content message = ExecutorMessage( - content=content, + content=result, sender="function", tool_name=command, tool_input=arguments, diff --git a/agentverse/environments/tasksolving_env/rules/role_assigner/base.py b/agentverse/environments/tasksolving_env/rules/role_assigner/base.py index 726abf52a..cd8f843bf 100644 --- a/agentverse/environments/tasksolving_env/rules/role_assigner/base.py +++ b/agentverse/environments/tasksolving_env/rules/role_assigner/base.py @@ -19,7 +19,7 @@ class BaseRoleAssigner(BaseModel): """ @abstractmethod - def step( + async def astep( self, role_assigner: RoleAssignerAgent, group_members: List[CriticAgent], @@ -40,7 +40,7 @@ class DummyRoleAssigner(BaseRoleAssigner): The base class of role assignment class. """ - def step( + async def astep( self, role_assigner: RoleAssignerAgent, group_members: List[CriticAgent], diff --git a/agentverse/environments/tasksolving_env/rules/role_assigner/role_description.py b/agentverse/environments/tasksolving_env/rules/role_assigner/role_description.py index 1d7490c83..b28b64408 100644 --- a/agentverse/environments/tasksolving_env/rules/role_assigner/role_description.py +++ b/agentverse/environments/tasksolving_env/rules/role_assigner/role_description.py @@ -16,7 +16,7 @@ class DescriptionAssigner(BaseRoleAssigner): Generates descriptions for each agent. """ - def step( + async def astep( self, role_assigner: RoleAssignerAgent, group_members: List[CriticAgent], @@ -28,7 +28,7 @@ def step( assert task_description != "" assert len(group_members) > 0 - roles = role_assigner.step(advice, task_description, len(group_members)) + roles = await role_assigner.astep(advice, task_description, len(group_members)) if len(roles.content) < len(group_members): raise ValueError( f"Number of roles ({len(roles.content)}) and number of group members ({len(group_members)}) do not match." @@ -50,7 +50,7 @@ class DescriptionNameAssigner(BaseRoleAssigner): Generates description and name for each agent. """ - def step( + async def astep( self, role_assigner: RoleAssignerAgent, group_members: List[CriticAgent], @@ -63,7 +63,7 @@ def step( assert len(group_members) > 0 # roles: [{'name': 'xxx', 'description': 'xxx'}, ...] - roles = role_assigner.step(advice, task_description, len(group_members)) + roles = await role_assigner.astep(advice, task_description, len(group_members)) if len(group_members) < 2: pass diff --git a/agentverse/llms/__init__.py b/agentverse/llms/__init__.py index 5c0cd5047..0902d3116 100644 --- a/agentverse/llms/__init__.py +++ b/agentverse/llms/__init__.py @@ -1,6 +1,7 @@ from agentverse.registry import Registry llm_registry = Registry(name="LLMRegistry") +LOCAL_LLMS = ["llama-2-7b-chat-hf"] from .base import BaseLLM, BaseChatModel, BaseCompletionModel, LLMResult from .openai import OpenAIChat diff --git a/agentverse/llms/openai.py b/agentverse/llms/openai.py index 08547c300..1331d9155 100644 --- a/agentverse/llms/openai.py +++ b/agentverse/llms/openai.py @@ -13,7 +13,7 @@ from agentverse.logging import logger from agentverse.message import Message -from . import llm_registry +from . import llm_registry, LOCAL_LLMS from .base import BaseChatModel, BaseCompletionModel, BaseModelArgs from .utils.jsonrepair import JsonRepair @@ -92,8 +92,8 @@ class OpenAIChatArgs(BaseModelArgs): # total_tokens=response["usage"]["total_tokens"], # ) + # To support your own local LLMs, register it here and add it into LOCAL_LLMS. -LOCAL_LLMS = ['llama-2-7b-chat-hf'] @llm_registry.register("gpt-35-turbo") @llm_registry.register("gpt-3.5-turbo") @llm_registry.register("gpt-4") @@ -111,10 +111,23 @@ def __init__(self, max_retry: int = 3, **kwargs): args[k] = kwargs.pop(k, v) if len(kwargs) > 0: logging.warning(f"Unused arguments: {kwargs}") - if args['model'] in LOCAL_LLMS: + if args["model"] in LOCAL_LLMS: openai.api_base = "http://localhost:5000/v1" super().__init__(args=args, max_retry=max_retry) + @classmethod + def send_token_limit(self, model: str) -> int: + send_token_limit_dict = { + "gpt-3.5-turbo": 4096, + "gpt-35-turbo": 4096, + "gpt-3.5-turbo-16k": 16384, + "gpt-4": 8192, + "gpt-4-32k": 32768, + "llama-2-7b-chat-hf": 4096, + } + + return send_token_limit_dict[model] + # def _construct_messages(self, history: List[Message]): # return history + [{"role": "user", "content": query}] @retry( diff --git a/agentverse/llms/utils/__init__.py b/agentverse/llms/utils/__init__.py index e122a4906..d64f068ef 100644 --- a/agentverse/llms/utils/__init__.py +++ b/agentverse/llms/utils/__init__.py @@ -1 +1,2 @@ -from . jsonrepair import JsonRepair +from .jsonrepair import JsonRepair +from .token_counter import count_string_tokens, count_message_tokens diff --git a/agentverse/llms/utils/token_counter.py b/agentverse/llms/utils/token_counter.py new file mode 100644 index 000000000..cdff86ad9 --- /dev/null +++ b/agentverse/llms/utils/token_counter.py @@ -0,0 +1,59 @@ +# Modified from AutoGPT https://github.com/Significant-Gravitas/AutoGPT/blob/release-v0.4.7/autogpt/llm/utils/token_counter.py + +import tiktoken +from typing import List, Union, Dict +from agentverse.logging import logger +from agentverse.message import Message +from agentverse.llms import LOCAL_LLMS +from transformers import AutoTokenizer + + +def count_string_tokens(prompt: str = "", model: str = "gpt-3.5-turbo") -> int: + return len(tiktoken.encoding_for_model(model).encode(prompt)) + + +def count_message_tokens( + messages: Union[Dict, List[Dict]], model: str = "gpt-3.5-turbo" +) -> int: + if isinstance(messages, dict): + messages = [messages] + + if model.startswith("gpt-3.5-turbo"): + tokens_per_message = ( + 4 # every message follows <|start|>{role/name}\n{content}<|end|>\n + ) + tokens_per_name = -1 # if there's a name, the role is omitted + encoding_model = "gpt-3.5-turbo" + elif model.startswith("gpt-4"): + tokens_per_message = 3 + tokens_per_name = 1 + encoding_model = "gpt-4" + elif model in LOCAL_LLMS: + encoding = AutoTokenizer.from_pretrained(model) + else: + raise NotImplementedError( + f"count_message_tokens() is not implemented for model {model}.\n" + " See https://github.com/openai/openai-python/blob/main/chatml.md for" + " information on how messages are converted to tokens." + ) + if model.startswith("gpt-3.5-turbo") or model.startswith("gpt-4"): + try: + encoding = tiktoken.encoding_for_model(encoding_model) + except KeyError: + logger.warn("Warning: model not found. Using cl100k_base encoding.") + encoding = tiktoken.get_encoding("cl100k_base") + + num_tokens = 0 + for message in messages: + num_tokens += tokens_per_message + for key, value in message.items(): + # TODO: count number of function_call's token more accurately + if key == "function_call": + num_tokens += len(encoding.encode(value["name"])) + num_tokens += len(encoding.encode(value["arguments"])) + else: + num_tokens += len(encoding.encode(value)) + if key == "name": + num_tokens += tokens_per_name + num_tokens += 3 # every reply is primed with <|start|>assistant<|message|> + return num_tokens diff --git a/agentverse/memory/chat_history.py b/agentverse/memory/chat_history.py index 38649d402..e4bc508a6 100644 --- a/agentverse/memory/chat_history.py +++ b/agentverse/memory/chat_history.py @@ -1,17 +1,40 @@ import json -from typing import List +import logging +import os +import openai +import copy +from typing import List, Optional, Tuple, Dict from pydantic import Field from agentverse.message import Message, ExecutorMessage - from . import memory_registry from .base import BaseMemory +from agentverse.llms.utils import count_message_tokens, count_string_tokens +from agentverse.llms import OpenAIChat @memory_registry.register("chat_history") class ChatHistoryMemory(BaseMemory): messages: List[Message] = Field(default=[]) + has_summary: bool = False + max_summary_tlength: int = 500 + last_trimmed_index: int = 0 + summary: str = "" + SUMMARIZATION_PROMPT = '''Your task is to create a concise running summary of actions and information results in the provided text, focusing on key and potentially important information to remember. + +You will receive the current summary and your latest actions. Combine them, adding relevant key information from the latest development in 1st person past tense and keeping the summary concise. + +Summary So Far: +""" +{summary} +""" + +Latest Development: +""" +{new_events} +""" +''' def add_message(self, messages: List[Message]) -> None: for message in messages: @@ -30,8 +53,19 @@ def to_string(self, add_sender_prefix: bool = False) -> str: else: return "\n".join([message.content for message in self.messages]) - def to_messages(self, my_name: str = "", start_index: int = 0) -> List[dict]: + async def to_messages( + self, + my_name: str = "", + start_index: int = 0, + max_summary_length: int = 0, + max_send_token: int = 0, + model: str = "gpt-3.5-turbo", + ) -> List[dict]: messages = [] + + if self.has_summary: + start_index = self.last_trimmed_index + for message in self.messages[start_index:]: if message.sender == my_name: if isinstance(message, ExecutorMessage): @@ -71,7 +105,138 @@ def to_messages(self, my_name: str = "", start_index: int = 0) -> List[dict]: "content": f"[{message.sender}]: {message.content}", } ) + + # summary message + if self.has_summary: + """https://github.com/Significant-Gravitas/AutoGPT/blob/release-v0.4.7/autogpt/memory/message_history.py""" + if max_summary_length == 0: + max_summary_length = self.max_summary_tlength + max_send_token -= max_summary_length + prompt = [] + trimmed_history = add_history_upto_token_limit( + prompt, messages, max_send_token, model + ) + if trimmed_history: + import pdb + + pdb.set_trace() + new_summary_msg, _ = await self.trim_messages( + list(prompt), model, messages + ) + prompt.append(new_summary_msg) + messages = prompt return messages def reset(self) -> None: self.messages = [] + + async def trim_messages( + self, current_message_chain: List[Dict], model: str, history: List[Dict] + ) -> Tuple[Dict, List[Dict]]: + new_messages_not_in_chain = [ + msg for msg in history if msg not in current_message_chain + ] + + if not new_messages_not_in_chain: + return self.summary_message(), [] + + new_summary_message = await self.update_running_summary( + new_events=new_messages_not_in_chain, model=model + ) + + last_message = new_messages_not_in_chain[-1] + self.last_trimmed_index += history.index(last_message) + + return new_summary_message, new_messages_not_in_chain + + async def update_running_summary( + self, + new_events: List[Dict], + model: str = "gpt-3.5-turbo", + max_summary_length: Optional[int] = None, + ) -> dict: + if not new_events: + return self.summary_message() + if max_summary_length is None: + max_summary_length = self.max_summary_tlength + + new_events = copy.deepcopy(new_events) + + # Replace "assistant" with "you". This produces much better first person past tense results. + for event in new_events: + if event["role"].lower() == "assistant": + event["role"] = "you" + + elif event["role"].lower() == "system": + event["role"] = "your computer" + + # Delete all user messages + elif event["role"] == "user": + new_events.remove(event) + + prompt_template_length = len( + self.SUMMARIZATION_PROMPT.format(summary="", new_events="") + ) + max_input_tokens = OpenAIChat.send_token_limit(model) - max_summary_length + summary_tlength = count_string_tokens(self.summary, model) + batch: List[Dict] = [] + batch_tlength = 0 + + for event in new_events: + event_tlength = count_message_tokens(event, model) + + if ( + batch_tlength + event_tlength + > max_input_tokens - prompt_template_length - summary_tlength + ): + await self._update_summary_with_batch(batch, model, max_summary_length) + summary_tlength = count_string_tokens(self.summary, model) + batch = [event] + batch_tlength = event_tlength + else: + batch.append(event) + batch_tlength += event_tlength + + if batch: + await self._update_summary_with_batch(batch, model, max_summary_length) + + return self.summary_message() + + async def _update_summary_with_batch( + self, new_events_batch: List[dict], model: str, max_summary_length: int + ) -> None: + prompt = self.SUMMARIZATION_PROMPT.format( + summary=self.summary, new_events=new_events_batch + ) + + self.summary = await openai.ChatCompletion.acreate( + messages=[{"role": "user", "content": prompt}], + model=model, + max_tokens=max_summary_length, + temperature=0.5, + )["choices"][0]["message"]["content"] + + def summary_message(self) -> dict: + return { + "role": "system", + "content": f"This reminds you of these events from your past: \n{self.summary}", + } + + +def add_history_upto_token_limit( + prompt: List[dict], history: List[dict], t_limit: int, model: str +) -> List[Message]: + limit_reached = False + current_prompt_length = 0 + trimmed_messages: List[Dict] = [] + for message in history[::-1]: + token_to_add = count_message_tokens(message, model) + if current_prompt_length + token_to_add > t_limit: + limit_reached = True + + if not limit_reached: + prompt.insert(0, message) + current_prompt_length += token_to_add + else: + trimmed_messages.insert(0, message) + return trimmed_messages diff --git a/agentverse/tasks/tasksolving/tool_using/24point/config.yaml b/agentverse/tasks/tasksolving/tool_using/24point/config.yaml index 2ac61f385..e26ecd6a3 100644 --- a/agentverse/tasks/tasksolving/tool_using/24point/config.yaml +++ b/agentverse/tasks/tasksolving/tool_using/24point/config.yaml @@ -147,6 +147,7 @@ agents: append_prompt_template: *role_assigner_append_prompt memory: memory_type: chat_history + has_summary: true llm: llm_type: gpt-4 model: "gpt-4" @@ -163,6 +164,7 @@ agents: max_retry: 100 memory: memory_type: chat_history + has_summary: true llm: llm_type: gpt-4 model: "gpt-4" @@ -186,6 +188,7 @@ agents: append_prompt_template: *critic_append_prompt memory: memory_type: chat_history + has_summary: true llm: llm_type: gpt-4 model: "gpt-4" @@ -204,6 +207,7 @@ agents: max_retry: 100 memory: memory_type: chat_history + has_summary: true llm: llm_type: gpt-4 model: gpt-4 @@ -222,6 +226,7 @@ agents: append_prompt_template: *evaluator_append_prompt memory: memory_type: chat_history + has_summary: true llm: llm_type: gpt-4 model: gpt-4 @@ -237,6 +242,7 @@ agents: prompt_template: *manager_prompt memory: memory_type: chat_history + has_summary: true llm: llm_type: gpt-4 model: "gpt-4" diff --git a/agentverse/tasks/tasksolving/tool_using/tools_simplified.json b/agentverse/tasks/tasksolving/tool_using/tools_simplified.json index 9ddfeffa2..609926c7f 100644 --- a/agentverse/tasks/tasksolving/tool_using/tools_simplified.json +++ b/agentverse/tasks/tasksolving/tool_using/tools_simplified.json @@ -1,35 +1,12 @@ { "available_envs": [ { - "name": "FileSystemEnv", - "description": "Provide a file system operation environment for Agent.\n ", - "total_tools": 5, - "tools": [ - "is_path_exist", - "modify_file", - "print_filesys_struture", - "read_from_file", - "write_to_file" - ] - }, - { - "name": "PDBCodingEnv", - "description": "Note: This env is subclass of ['FileSystemEnv', 'ShellEnv'], and all tools of parent envs are inherited and not visible. You can try call parent tools or check this env's defination to show them.\nPython Debugging Coding Environment.\n Always run code with `python -m pdb {python_file.py}`.\n ", - "total_tools": 11, - "tools": [ - "run_code" - ] - }, - { - "name": "ShellEnv", - "description": "Provide and maintain an interactive shell environment.\n ", - "total_tools": 5, + "name": "PythonNotebook", + "description": "Python Notebook Environment. Provide a notebook interface to run python code.", + "total_tools": 2, "tools": [ - "kill", - "read_stdout", - "restart", - "terminate", - "write_stdin" + "execute_cell", + "print_cells_outputs" ] }, { @@ -38,105 +15,217 @@ "total_tools": 2, "tools": [ "browse_website", - "scrape_text" + "search_and_browse" ] }, { - "name": "RapidAPIEnv", - "description": "Note: All tools of this env are invisible during all tools display, please check this env's defination to show all tools.\nRapidAPI Env delivers rapid api for tool server.", - "total_tools": 4208, + "name": "FileSystemEnv", + "description": "Provide a file system operation environment for Agent.\n ", + "total_tools": 4, "tools": [ - "rapi_100_success_instagram_api_scalable_robust_media_info", - "rapi_100_success_instagram_api_scalable_robust_post_comments", - "rapi_100_success_instagram_api_scalable_robust_user_followers", - "rapi_13f918yf19o1t1f1of1t9_endpoint1", - "rapi_3d_services_thumbnail", - "rapi_4d_dream_dictionary_get_dream_number", - "rapi_50k_radio_stations_get_channels", - "rapi_50k_radio_stations_get_cities", - "rapi_50k_radio_stations_get_countries", - "rapi_50k_radio_stations_get_genres", - "..." + "modify_file", + "print_filesys_struture", + "read_from_file", + "write_to_file" ] } ], "available_tools": [ - "FileSystemEnv_is_path_exist", + "PythonNotebook_execute_cell", + "PythonNotebook_print_cells_outputs", + "WebEnv_browse_website", + "WebEnv_search_and_browse", "FileSystemEnv_modify_file", "FileSystemEnv_print_filesys_struture", "FileSystemEnv_read_from_file", "FileSystemEnv_write_to_file", - "PDBCodingEnv_run_code", - "ShellEnv_kill", - "ShellEnv_read_stdout", - "ShellEnv_restart", - "ShellEnv_terminate", - "ShellEnv_write_stdin", - "WebEnv_browse_website", - "WebEnv_scrape_text", - "query_wolfram", - "bing_search" + "shell_command_executor" ], "tools_json": [ - { - "name": "WebEnv_browse_website", - "description": "Browse a website and return the page. Note some websites may not be accessable due to network error.", - "parameters": { - "type": "object", - "properties": { - "url": { - "type": "string", - "description": "The realworld URL to scrape text from. Started with http:// or https://." - }, - "question": { - "type": "string", - "description": "The question for the website. The function will answer the question by browsing the url." - } - }, - "required": [ - "url", - "question" - ] - } - }, { - "name": "run_interpreter", - "description": "The code interpreter tool that runs code and returns the output.\nThe `code` will be written to file `filename` and the `command` will be executed in a shell. The returned value of this tool is the stdout content. So use `print` to get the important information.\nExample:\n```\nrun_interpreter(code='print(\"hello world\")',command='python code.py')\n```", + "name": "PythonNotebook_execute_cell", + "description": "Create or replace a notebook cell and execute it, return the output.\nUse this tool to test your idea quickly.\n\nExample:\n```\nIn[0]: code='print(\"hello world\")' # This will create a new cell and execute it.\nOut[0]: ['cell_index: 0', 'hello world']\nIn[1]: code='print(\"hello world\")',cell_index=0 # This will overwrite the first cell and execute it.\nIn[2]: code='print(\"hello world\")',cell_index=-1 # This will overwrite the last cell and execute it.\n```", "parameters": { "type": "object", "properties": { "code": { "type": "string", - "description": "The code to be written, default to `None`, which means no code will be written to file." + "description": "python code to be executed, make sure it is valid python code with right format. don't provide shell command that started with '!' here. don't make your code print too much output." + }, + "cell_index": { + "type": "integer", + "description": "the index of the cell to be insert and overwrite `code`, default to `None`, which means append new cell." }, - "filename": { + "reset": { + "type": "boolean", + "description": "whether to reset the kernel before executing the code. Default to `False`." + } + }, + "required": [ + "code" + ] + } + }, + { + "name": "PythonNotebook_print_cells_outputs", + "description": "print all notebook cells' content and output.", + "parameters": { + "type": "object", + "properties": {}, + "required": [] + } + }, + { + "name": "WebEnv_browse_website", + "description": "Give a http or https url to browse a website and return the summarize text. Note some websites may not be accessable due to network error. This tool only return the content of give url and cannot provide any information need interaction with the website.", + "parameters": { + "type": "object", + "properties": { + "url": { "type": "string", - "description": "The filename to be written in mode `w`, default to `code.py`." + "description": "The realworld Uniform Resource Locator (web address) to scrape text from. Never provide something like \"\", give real url!!! Example: 'https://www.deepmind.com/'" }, - "command": { + "goals_to_browse": { "type": "string", - "description": "The shell command to be executed should avoid requiring additional user input, default to `python {filename}`." + "description": "The goals for browse the given `url` (e.g. what you want to find on webpage.). If you need more details, request it in here." } }, - "required": [] + "required": [ + "url", + "goals_to_browse" + ] } }, { - "name": "bing_search", - "description": "Return 3 most relevant results of a Bing search using the official Bing API. This tool does not provide website details, use other tools to browse website if you need.", + "name": "WebEnv_search_and_browse", + "description": "Search with search tools and browse the website returned by search. Note some websites may not be accessable due to network error.", "parameters": { "type": "object", "properties": { - "query": { + "search_query": { "type": "string", "description": "The search query." + }, + "goals_to_browse": { + "type": "string", + "description": "What's you want to find on the website returned by search. If you need more details, request it in here. Examples: 'What is latest news about deepmind?', 'What is the main idea of this article?'" + }, + "region": { + "type": "string", + "description": "The region code of the search, default to `en-US`. Available regions: `en-US`, `zh-CN`, `ja-JP`, `de-DE`, `fr-FR`, `en-GB`." } }, "required": [ - "query" + "search_query", + "goals_to_browse" ] } }, + { + "name": "FileSystemEnv_modify_file", + "description": "Modify the textual file lines in slice [start_index:end_index] based on `new_content` provided. Return content of the file after modification so no further need to call `read_from_file`.\nfilepath_content_lines[start_index:end_index] = new_content\n\nExample:\n```\nIn[0]: modify_file('test.txt', 'Hello World!') # This will insert a new line `Hello World!` at the end of the file `test.txt`.\nIn[1]: modify_file('test.txt', 'Hello World!', 0) # This will insert a new line `Hello World!` at the begin of the file `test.txt`.\nIn[2]: modify_file('test.txt', 'Hello World!', 0, 1) # This will replace the first line of the file `test.txt` with `Hello World!`. \n```", + "parameters": { + "type": "object", + "properties": { + "filepath": { + "type": "string", + "description": "The path to the file to be modified, always use relative path to the workspace root." + }, + "new_content": { + "type": "string", + "description": "The new content to be replaced with the old content." + }, + "start_index": { + "type": "integer", + "description": "The start position in slice to modified file lines. Defaults to `None`, which means insert the new content at the end of the file. So do not provide this if you want to append the new content to the file." + }, + "end_index": { + "type": "integer", + "description": "The end posistion in slice to modified file lines. Defaults to the value of `start_index`, which means if `start_index` provided, insert the new content at the `start_index` line." + } + }, + "required": [ + "filepath", + "new_content" + ] + } + }, + { + "name": "FileSystemEnv_print_filesys_struture", + "description": "Return a tree-like structure for all files and folders in the workspace. Use this tool if you are not sure what files are in the workspace.\nThis function recursively walks through all the directories in the workspace\nand return them in a tree-like structure, \ndisplaying all the files under each directory.\n\nExample:\n```\n- root/\n - sub_directory1/\n - file1.txt\n - file2.txt\n - sub_directory2/\n - file3.txt\n```", + "parameters": { + "type": "object", + "properties": {}, + "required": [] + } + }, + { + "name": "FileSystemEnv_read_from_file", + "description": "Open and read the textual file content in the workspace, you will see the content of the target file.\nDon't use this if the give `filepath` is writen or modified before, the content in `filepath` should be already returned.", + "parameters": { + "type": "object", + "properties": { + "filepath": { + "type": "string", + "description": "The path to the file to be opened, always use relative path to the workspace root." + }, + "start_index": { + "type": "integer", + "description": "The starting line number of the content to be opened. Defaults to 0." + } + }, + "required": [ + "filepath" + ] + } + }, + { + "name": "FileSystemEnv_write_to_file", + "description": "Write the textual file in the workspace with the content provided. \nWill automatically create the file if it does not exist. Also overwrite the file content if it already exists. If you want to append content to the file, use `modify_file` instead.\nBetter check if the file exists before directly writing to it. \nReturn content of the file after writing.", + "parameters": { + "type": "object", + "properties": { + "filepath": { + "type": "string", + "description": "The path to the file to be saved, always use relative path to the workspace root." + }, + "content": { + "type": "string", + "description": "The content to be saved." + } + }, + "required": [ + "filepath", + "content" + ] + } + }, + { + "name": "shell_command_executor", + "description": "The shell tool that execute shell command in root privilege, return the output and error. \nYou can use this tool to install packages, download files, run programs, etc.\nSet run_async=True to run the command in a new thread and return instantly if your command is time costly like install packages, host services. \nExample:\n```\nIn: shell_command_executor(command='echo \"hello world\"')\nOut: \"hello world\"\nIn: shell_command_executor(command='sleep 10', run_async=True)\nOut: {'shell_id': 0} # You can use this id to read the output and error later.\nIn: shell_command_executor(shell_id=0, kill=True)\nOut: \"\" # The shell 0 will be killed.\n```", + "parameters": { + "type": "object", + "properties": { + "command": { + "type": "string", + "description": "The shell command to be executed, must avoid command requiring additional user input. Default is empty string." + }, + "run_async": { + "type": "boolean", + "description": "Whether to run the command asynchronously, default is False. If True, call this tool again with shell_id to get the final output and error." + }, + "shell_id": { + "type": "integer", + "description": "The id of shell to execute command, default is None, which means running in a new shell. Change this to execute command in the same shell." + }, + "kill": { + "type": "boolean", + "description": "If True, kill the shell which runs the command after execution. Default is False. Don't use any other kill command!" + } + }, + "required": [] + } + }, { "name": "submit_task", "description": "Submit your conclusion to the task", diff --git a/agentverse_command/main_tasksolving_cli.py b/agentverse_command/main_tasksolving_cli.py index 2382adf90..8e66fc9ae 100644 --- a/agentverse_command/main_tasksolving_cli.py +++ b/agentverse_command/main_tasksolving_cli.py @@ -3,7 +3,8 @@ # from agentverse.agentverse import AgentVerse from agentverse.tasksolving import TaskSolving -from agentverse.gui import GUI + +# from agentverse.gui import GUI from agentverse.logging import logger from argparse import ArgumentParser diff --git a/requirements.txt b/requirements.txt index e93df8d9f..adaa4834c 100644 --- a/requirements.txt +++ b/requirements.txt @@ -18,3 +18,4 @@ rapidfuzz spacy colorama==0.4.6 fschat[model_worker,webui] +tiktoken==0.5.1