Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Feedback for prepared transactions #173

Merged
merged 8 commits into from
Apr 11, 2024
169 changes: 104 additions & 65 deletions autotx/AutoTx.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,10 @@
class Config:
verbose: bool

@dataclass
class PastRun:
feedback: str
transactions_info: str
class AutoTx:
manager: SafeManager
config: Config = Config(verbose=False)
Expand All @@ -37,82 +41,117 @@ def __init__(
self.agents = agents

def run(self, prompt: str, non_interactive: bool, silent: bool = False) -> None:
print("Running AutoTx with the following prompt: ", prompt)

user_proxy = UserProxyAgent(
name="user_proxy",
is_termination_msg=lambda x: x.get("content", "") and x.get("content", "").rstrip().endswith("TERMINATE"),
human_input_mode="NEVER",
max_consecutive_auto_reply=20,
system_message=f"You are a user proxy. You will be interacting with the agents to accomplish the tasks.",
llm_config=self.get_llm_config(),
code_execution_config=False,
)

agents_information = self.get_agents_information(self.agents)

goal = build_goal(prompt, agents_information, self.manager.address, non_interactive)

verifier_agent = AssistantAgent(
name="verifier",
is_termination_msg=lambda x: x.get("content", "") and x.get("content", "").rstrip().endswith("TERMINATE"),
system_message=dedent(
original_prompt = prompt
past_runs: list[PastRun] = []

while True:
if past_runs:
self.transactions.clear()

prev_history = "".join(
[
dedent(f"""
Then you prepared these transactions to accomplish the goal:
{run.transactions_info}
Then the user provided feedback:
{run.feedback}
""")
for run in past_runs
]
)

prompt = (f"\nOriginaly the user said: {original_prompt}"
+ prev_history
+ "Pay close attention to the user's feedback and try again.\n")

print("Running AutoTx with the following prompt: ", prompt)

user_proxy = UserProxyAgent(
name="user_proxy",
is_termination_msg=lambda x: x.get("content", "") and x.get("content", "").rstrip().endswith("TERMINATE"),
human_input_mode="NEVER",
max_consecutive_auto_reply=20,
system_message=f"You are a user proxy. You will be interacting with the agents to accomplish the tasks.",
llm_config=self.get_llm_config(),
code_execution_config=False,
)

agents_information = self.get_agents_information(self.agents)

goal = build_goal(prompt, agents_information, self.manager.address, non_interactive)

verifier_agent = AssistantAgent(
name="verifier",
is_termination_msg=lambda x: x.get("content", "") and x.get("content", "").rstrip().endswith("TERMINATE"),
system_message=dedent(
"""
Verifier is an expert in verifiying if user goals are met.
Verifier analyzes chat and responds with TERMINATE if the goal is met.
Verifier can consider the goal met if the other agents have prepared the necessary transactions.

If some information needs to be returned to the user or if there are any errors encountered during the process, add this in your answer.
Start any error messages with "ERROR:" to clearly indicate the issue. Then say the word TERMINATE.
Make sure to only add information if the user explicitly asks for a question that needs to be answered
or error details if user's request can not be completed.
"""
Verifier is an expert in verifiying if user goals are met.
Verifier analyzes chat and responds with TERMINATE if the goal is met.
Verifier can consider the goal met if the other agents have prepared the necessary transactions.

If some information needs to be returned to the user or if there are any errors encountered during the process, add this in your answer.
Start any error messages with "ERROR:" to clearly indicate the issue. Then say the word TERMINATE.
Make sure to only add information if the user explicitly asks for a question that needs to be answered
or error details if user's request can not be completed.
),
llm_config=self.get_llm_config(),
human_input_mode="NEVER",
code_execution_config=False,
)

autogen_agents = [agent.build_autogen_agent(self, user_proxy, self.get_llm_config()) for agent in self.agents]

groupchat = GroupChat(
agents=autogen_agents + [user_proxy, verifier_agent],
messages=[],
max_round=20,
select_speaker_prompt_template = (
"""
),
llm_config=self.get_llm_config(),
human_input_mode="NEVER",
code_execution_config=False,
)

autogen_agents = [agent.build_autogen_agent(self, user_proxy, self.get_llm_config()) for agent in self.agents]

groupchat = GroupChat(
agents=autogen_agents + [user_proxy, verifier_agent],
messages=[],
max_round=20,
select_speaker_prompt_template = (
"""
Read the above conversation. Then select the next role from {agentlist} to play. Only return the role and NOTHING else.
"""
Read the above conversation. Then select the next role from {agentlist} to play. Only return the role and NOTHING else.
"""
)
)
)
manager = GroupChatManager(groupchat=groupchat, llm_config=self.get_llm_config())

if silent:
IOStream.set_global_default(IOSilent())
else:
IOStream.set_global_default(IOConsole())

chat = user_proxy.initiate_chat(manager, message=dedent(
f"""
My goal is: {prompt}
Advisor reworded: {goal}
"""
))

if chat.summary:
manager = GroupChatManager(groupchat=groupchat, llm_config=self.get_llm_config())

if silent:
IOStream.set_global_default(IOSilent())
else:
IOStream.set_global_default(IOConsole())

chat = user_proxy.initiate_chat(manager, message=dedent(
f"""
My goal is: {prompt}
Advisor reworded: {goal}
"""
))

if "ERROR:" in chat.summary:
error_message = chat.summary.replace("ERROR: ", "").replace("\n", "")
cprint(error_message, "red")
else:
cprint(chat.summary.replace("\n", ""), "green")

try:
self.manager.send_tx_batch(self.transactions, require_approval=not non_interactive)
except Exception as e:
cprint(e, "red")
try:
result = self.manager.send_tx_batch(self.transactions, require_approval=not non_interactive)

if isinstance(result, str):
transactions_info ="\n".join(
[
f"{i + 1}. {tx.summary}"
for i, tx in enumerate(self.transactions)
]
)

past_runs.append(PastRun(result, transactions_info))
else:
break

except Exception as e:
cprint(e, "red")
break

self.transactions.clear()


def get_agents_information(self, agents: list[AutoTxAgent]) -> str:
agent_descriptions = []
Expand Down
4 changes: 2 additions & 2 deletions autotx/build_check.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,6 @@ def run() -> None:
result = subprocess.run(["mypy", "."], capture_output=True)
print(result.stdout.decode())
if result.returncode != 0:
print("Type checking failed.")
print(result.stderr.decode())
sys.exit(1)
print("Type checking failed")
sys.exit(1)
36 changes: 29 additions & 7 deletions autotx/utils/ethereum/SafeManager.py
Original file line number Diff line number Diff line change
Expand Up @@ -238,7 +238,7 @@ def send_tx(self, tx: TxParams, safe_nonce: Optional[int] = None) -> str | None:
hash = self.execute_tx(tx, safe_nonce)
return hash.hex()

def send_tx_batch(self, txs: list[PreparedTx], require_approval: bool, safe_nonce: Optional[int] = None) -> bool: # Returns true if successful
def send_tx_batch(self, txs: list[PreparedTx], require_approval: bool, safe_nonce: Optional[int] = None) -> bool | str: # True if sent, False if declined, str if feedback
print("=" * 50)

if not txs:
Expand All @@ -254,15 +254,23 @@ def send_tx_batch(self, txs: list[PreparedTx], require_approval: bool, safe_nonc
]
)

print(f"Batched transactions:\n{transactions_info}")
print(f"Prepared transactions:\n{transactions_info}")

if self.use_tx_service:
if require_approval:
response = input("Do you want the above transactions to be sent to your smart account? (y/n): ")
response = input("Do you want the above transactions to be sent to your smart account?\nRespond (y/n) or write feedback: ")

if response.lower() != "y":
if response.lower() == "n" or response.lower() == "no":
print("Transactions not sent to your smart account (declined).")

self.reset_nonce(start_nonce)

return False
elif response.lower() != "y" and response.lower() != "yes":

self.reset_nonce(start_nonce)

return response
else:
print("Non-interactive mode enabled. Transactions will be sent to your smart account without approval.")

Expand All @@ -276,11 +284,19 @@ def send_tx_batch(self, txs: list[PreparedTx], require_approval: bool, safe_nonc
return True
else:
if require_approval:
response = input("Do you want to execute the above transactions? (y/n): ")
response = input("Do you want to execute the above transactions?\nRespond (y/n) or write feedback: ")

if response.lower() != "y":
if response.lower() == "n" or response.lower() == "no":
print("Transactions not executed (declined).")

self.reset_nonce(start_nonce)

return False
elif response.lower() != "y" and response.lower() != "yes":

self.reset_nonce(start_nonce)

return response
else:
print("Non-interactive mode enabled. Transactions will be executed without approval.")

Expand Down Expand Up @@ -317,7 +333,7 @@ def balance_of(self, token_address: ETHAddress | None = None) -> float:

def nonce(self) -> int:
return self.safe.retrieve_nonce()

def gas_price(self) -> int:
return self.web3.eth.gas_price if self.gas_multiplier is None else int(self.web3.eth.gas_price * self.gas_multiplier)

Expand All @@ -331,6 +347,12 @@ def track_nonce(self, safe_nonce: Optional[int] = None) -> int:
else:
return safe_nonce

def reset_nonce(self, starting_safe_nonce: Optional[int] = None) -> None:
if starting_safe_nonce is None:
self.safe_nonce = None
else:
self.safe_nonce = starting_safe_nonce - 1 # -1 because it will be incremented in track_nonce

@staticmethod
def is_valid_safe(client: EthereumClient, address: ETHAddress) -> bool:
return is_valid_safe(client, address)
4 changes: 4 additions & 0 deletions autotx/utils/ethereum/uniswap/swap.py
Original file line number Diff line number Diff line change
Expand Up @@ -201,6 +201,10 @@ def build_swap_transaction(
"gasPrice": Wei(int(web3.eth.gas_price * GAS_PRICE_MULTIPLIER)),
}
)

token_in_symbol = "ETH" if token_in_is_native else token_in_symbol
token_out_symbol = "ETH" if token_out_is_native else token_out_symbol

transactions.append(
PreparedTx(
f"Swap {Decimal(amount_in) / 10 ** token_in_decimals} {token_in_symbol} for {Decimal(amount_out) / 10 ** token_out_decimals} {token_out_symbol}",
Expand Down
8 changes: 1 addition & 7 deletions pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -37,30 +37,24 @@ requires = ["poetry-core"]
build-backend = "poetry.core.masonry.api"

[tool.mypy]

strict = true

exclude = [
"autotx/tests/",
"benchmarks.py",
"autotx/utils/agent/build_goal.py"
]

ignore_missing_imports = true
install_types = true
non_interactive = true

plugins = [
"pydantic.mypy"
]

# from https://blog.wolt.com/engineering/2021/09/30/professional-grade-mypy-configuration/
disallow_untyped_defs = true
no_implicit_optional = true
check_untyped_defs = true
warn_return_any = true
show_error_codes = true
warn_unused_ignores = true

disallow_incomplete_defs = true
disallow_untyped_decorators = true
disallow_untyped_decorators = true
Loading