diff --git a/flaml/autogen/agent/human_proxy_agent.py b/flaml/autogen/agent/human_proxy_agent.py new file mode 100644 index 0000000000..70cd285e4c --- /dev/null +++ b/flaml/autogen/agent/human_proxy_agent.py @@ -0,0 +1,117 @@ +from .agent import Agent +from flaml.autogen.code_utils import extract_code, execute_code +from collections import defaultdict + + +class HumanProxyAgent(Agent): + """(Experimental) A proxy agent for human, that can execute code and provide feedback to the other agents.""" + + MAX_CONSECUTIVE_AUTO_REPLY = 100 # maximum number of consecutive auto replies (subject to future change) + + def __init__( + self, + name, + system_message="", + work_dir=None, + human_input_mode="ALWAYS", + max_consecutive_auto_reply=None, + is_termination_msg=None, + **config, + ): + """ + Args: + name (str): name of the agent + system_message (str): system message to be sent to the agent + work_dir (str): working directory for the agent + human_input_mode (bool): whether to ask for human inputs every time a message is received. + Possible values are "ALWAYS", "TERMINATE", "NEVER". + (1) When "ALWAYS", the agent prompts for human input every time a message is received. + Under this mode, the conversation stops when the human input is "exit", + or when is_termination_msg is True and there is no human input. + (2) When "TERMINATE", the agent only prompts for human input only when a termination message is received or + the number of auto reply reaches the max_consecutive_auto_reply. + (3) When "NEVER", the agent will never prompt for human input. Under this mode, the conversation stops + when the number of auto reply reaches the max_consecutive_auto_reply or or when is_termination_msg is True. + max_consecutive_auto_reply (int): the maximum number of consecutive auto replies. + default: None (no limit provided, class attribute MAX_CONSECUTIVE_AUTO_REPLY will be used as the limit in this case). + The limit only plays a role when human_input_mode is not "ALWAYS". + is_termination_msg (function): a function that takes a message and returns a boolean value. + This function is used to determine if a received message is a termination message. + config (dict): other configurations. + + """ + super().__init__(name, system_message) + self._work_dir = work_dir + self._human_input_mode = human_input_mode + self._is_termination_msg = ( + is_termination_msg if is_termination_msg is not None else (lambda x: x == "TERMINATE") + ) + self._config = config + self._max_consecutive_auto_reply = ( + max_consecutive_auto_reply if max_consecutive_auto_reply is not None else self.MAX_CONSECUTIVE_AUTO_REPLY + ) + self._consecutive_auto_reply_counter = defaultdict(int) + + def _execute_code(self, code, lang): + """Execute the code and return the result.""" + if lang == "bash": + assert code.startswith("python "), code + file_name = code[len("python ") :] + exitcode, logs = execute_code(filename=file_name, work_dir=self._work_dir) + elif lang == "python": + if code.startswith("# filename: "): + filename = code[11 : code.find("\n")].strip() + else: + filename = None + exitcode, logs = execute_code(code, work_dir=self._work_dir, filename=filename) + else: + # TODO: could this happen? + exitcode, logs = 1, "unknown language" + # raise NotImplementedError + return exitcode, logs + + def auto_reply(self, message, sender, default_reply=""): + """Generate an auto reply.""" + code, lang = extract_code(message) + if lang == "unknown": + # no code block is found, lang should be "unknown" + self._send(default_reply, sender) + else: + # try to execute the code + exitcode, logs = self._execute_code(code, lang) + exitcode2str = "execution succeeded" if exitcode == 0 else "execution failed" + self._send(f"exitcode: {exitcode} ({exitcode2str})\nCode output: {logs.decode('utf-8')}", sender) + + def receive(self, message, sender): + """Receive a message from the sender agent. + Once a message is received, this function sends a reply to the sender or simply stop. + The reply can be generated automatically or entered manually by a human. + """ + super().receive(message, sender) + # default reply is empty (i.e., no reply, in this case we will try to generate auto reply) + reply = "" + if self._human_input_mode == "ALWAYS": + reply = input( + "Provide feedback to the sender. Press enter to skip and use auto-reply, or type 'exit' to end the conversation: " + ) + elif self._consecutive_auto_reply_counter[ + sender.name + ] >= self._max_consecutive_auto_reply or self._is_termination_msg(message): + if self._human_input_mode == "TERMINATE": + reply = input( + "Please give feedback to the sender. (Press enter or type 'exit' to stop the conversation): " + ) + reply = reply if reply else "exit" + else: + # this corresponds to the case when self._human_input_mode == "NEVER" + reply = "exit" + if reply == "exit" or (self._is_termination_msg(message) and not reply): + return + elif reply: + # reset the consecutive_auto_reply_counter + self._consecutive_auto_reply_counter[sender.name] = 0 + self._send(reply, sender) + return + + self._consecutive_auto_reply_counter[sender.name] += 1 + self.auto_reply(message, sender, default_reply=reply) diff --git a/flaml/autogen/agent/meta_agent.py b/flaml/autogen/agent/meta_agent.py new file mode 100644 index 0000000000..bbd0825332 --- /dev/null +++ b/flaml/autogen/agent/meta_agent.py @@ -0,0 +1,105 @@ +from collections import defaultdict +from typing import Optional +from flaml.autogen.agent.agent import Agent +from flaml import oai +from flaml.autogen.code_utils import DEFAULT_MODEL, FAST_MODEL + + +class MetaAgent(Agent): + """(Experimental) A meta agent that can wrap other agents and perform actions based on the messages received.""" + + DEFAULT_CONFIG = { + "model": DEFAULT_MODEL, + } + + DEFAULT_SYSTEM_MESSAGE = """ + Assistant has just had the below interactions with a User. Assistant followed their "Instructions" closely. + Your job is to critique the Assistant's performance and then revise the Instructions so that Assistant would + quickly and correctly respond in the future. + + #### + + {chat_history} + + #### + + Please reflect on these interactions. + + You should first critique Assistant's performance. What could Assistant have done better? + What should the Assistant remember about this user? Are there things this user always wants? + Indicate this with "Critique: ...". + + You should next revise the Instructions so that Assistant would quickly and correctly respond in the future. + Assistant's goal is to satisfy the user in as few interactions as possible. Assistant will only see the new Instructions, not the interaction history, so anything important must be summarized in the Instructions. Don't forget any important details in the current Instructions! Indicate the new Instructions by "Instructions: ...". + """ + + def __init__( + self, + name, + system_message="", + agent: Optional[Agent] = None, + dev_agent: Optional[Agent] = None, + ): + """ + Args: + name (str): name of the meta agent + system_message (str): system message to be sent to the meta agent + user_agent (optional): the agent to be wrapped + dev_agent (optional): the agent to be wrapped for development + """ + # use super() to call the parent class's __init__ method + super().__init__(name, system_message=system_message) + self._system_message = system_message if system_message else self.DEFAULT_SYSTEM_MESSAGE + # TODO: do we only need to have only user_agent or dev_agent? + self._agent = agent + self._dev_agent = dev_agent + self._meta_prompt_template = """ + Assistant has just had the below interactions with a User. Assistant followed their "Instructions" closely. + Your job is to critique the Assistant's performance and then revise the Instructions so that Assistant would + quickly and correctly respond in the future. + + #### + + {chat_history} + + #### + + Please reflect on these interactions. + + You should first reflect on Assistant's performance. What could Assistant have done better? + What should the Assistant remember about this user? Are there things this user always wants? + Indicate this with "Reflection: ...". + + You should next revise the Instructions so that Assistant would quickly and correctly respond in the future. + Assistant's goal is to satisfy the user in as few interactions as possible. Assistant will only see the new Instructions, not the interaction history, so anything important must be summarized in the Instructions. + Indicate the new Instructions by "Instructions: ...". + """ + + def _receive(self, message, sender): + """Receive a message from another agent.""" + if self._agent: + self._agent.receive(message, sender) + # if self._dev_agent: + # self._dev_agent.receive(message, sender) + + def _get_chat_history(self): + """Get the chat history of the agent.""" + chat_history = "" + for conversation in self._agent._conversations.values(): + for message in conversation: + if message["role"] == "user": + chat_history += "User: " + message["content"] + "\n" + else: + chat_history += "Assistant: " + message["content"] + "\n" + return chat_history + + def reflect(self): + self.receive("reflect", self._dev_agent) + # """Reflect on the conversations with the agents.""" + # chat_history = self._get_chat_history() + # meta_prompt = self._meta_prompt_template.format(chat_history=chat_history) + # responses = oai.ChatCompletion.create(messages=[{"content": meta_prompt, "role": "user"}], **self._config) + # response = oai.ChatCompletion.extract_text(responses)[0] + # print(f"Reflecting.....\n{self._name}", response) + # self._agent = self._agent_class(self._name, meta_prompt=response) + # TODO: maybe we should also consider adding the instruction as the init prompt diff --git a/test/autogen/test_human_proxy_agent.py b/test/autogen/test_human_proxy_agent.py new file mode 100644 index 0000000000..335552f8e6 --- /dev/null +++ b/test/autogen/test_human_proxy_agent.py @@ -0,0 +1,39 @@ +from flaml import oai + + +def test_human_agent(): + try: + import openai + except ImportError: + return + from flaml.autogen.agent.chat_agent import ChatAgent + from flaml.autogen.agent.human_proxy_agent import HumanProxyAgent + + conversations = {} + oai.ChatCompletion.start_logging(conversations) + agent = ChatAgent("chat_agent") + user = HumanProxyAgent("human_user", human_input_mode="NEVER", max_consecutive_auto_reply=2) + agent.receive( + """Write python code to solve the equation x^3=125. You must write code in the following format. You must always print the result. + Wait for me to return the result. + ```python + # your code + print(your_result) + ``` + """, + user, + ) + print(conversations) + + +if __name__ == "__main__": + import openai + + openai.api_key_path = "test/openai/key.txt" + # if you use Azure OpenAI, comment the above line and uncomment the following lines + # openai.api_type = "azure" + # openai.api_base = "https://.openai.azure.com/" + # openai.api_version = "2023-03-15-preview" # change if necessary + # openai.api_key = "" + # test_extract_code() + test_human_agent() diff --git a/test/autogen/test_meta_agent.py b/test/autogen/test_meta_agent.py new file mode 100644 index 0000000000..15b210af0b --- /dev/null +++ b/test/autogen/test_meta_agent.py @@ -0,0 +1,42 @@ +from flaml.autogen.agent.coding_agent import PythonAgent +from flaml.autogen.agent.user_proxy_agent import UserProxyAgent +from flaml.autogen.agent.meta_agent import MetaAgent + + +def test_meta_prompt(): + tasks = [ + "Create and execute a script to plot a rocket without using matplotlib.", + "Create and execute a script to plot a helicopter without using matplotlib", + ] + + ## User mode: + python_agent = PythonAgent("python agent") + assistant = MetaAgent(name="meta agent", agent=python_agent) + user = UserProxyAgent("human user", work_dir="test/autogen", human_input_mode="ALWAYS") + for i, task in enumerate(tasks): + print(f".........Starting the {i+1}-th task!.........") + assistant.receive(task, user) + print(f".........{i+1}-th task finished!.........") + + ## Dev mode: + dev = UserProxyAgent("expert", work_dir="test/autogen", human_input_mode="ALWAYS") + assistant = MetaAgent(name="meta agent", agent=python_agent, dev_agent=dev) + user = UserProxyAgent("human user", work_dir="test/autogen", human_input_mode="ALWAYS") + for i, task in enumerate(tasks[0:2]): + assistant.receive(task, user) + assistant.reflect() + + ### Can also be used in the following way: + for i, task in enumerate(tasks): + print(f".........Starting the {i+1}-th task!.........") + assistant.receive(task, dev) + assistant.reflect() + print(f".........{i+1}-th task finished!.........") + + +if __name__ == "__main__": + import openai + + openai.api_key_path = "test/openai/key.txt" + # openai.api_key_path = "test/openai/key_gpt3.txt" + test_meta_prompt()