Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add AgentOps monitoring functionality and example #1

Open
wants to merge 5 commits into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from 4 commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
21 changes: 21 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -144,6 +144,27 @@ response = autogen.Completion.create(context=test_instance, **config)

Please find more [code examples](https://microsoft.github.io/autogen/docs/Examples#tune-gpt-models) for this feature. -->

## Monitoring
Basic monitoring is demonstrated in `test/twoagent-monitored.py`

Monitoring can be implemented using the AgentOps library.

1. `pip install agentops`
2. Create an account at https://agentops.ai and generate an API key
3. `export AGENTOPS_API_KEY=<your_key>`
4. When creating a `ConversableAgent` or `AssistantAgent`, first create an Agent Ops client instance
```python
from agentops import Client
ao_client = Client(api_key=os.environ.get('AGENTOPS_API_KEY'),
tags=['describe your session here'])
```
5. Pass this client instance into the constructor arguments of your Agent
```python
assistant = AssistantAgent("assistant", llm_config={"config_list": config_list}, ao_client=ao_client)
```
6. Run your agent(s) and view the results of your session on the [AgentOps Dashboard](https://app.agentops.ai/dashboard)


## Documentation

You can find detailed documentation about AutoGen [here](https://microsoft.github.io/autogen/).
Expand Down
3 changes: 3 additions & 0 deletions autogen/agentchat/assistant_agent.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
from typing import Callable, Dict, Literal, Optional, Union
from agentops import Client as AOClient

from .conversable_agent import ConversableAgent

Expand Down Expand Up @@ -39,6 +40,7 @@ def __init__(
human_input_mode: Optional[str] = "NEVER",
code_execution_config: Optional[Union[Dict, Literal[False]]] = False,
description: Optional[str] = None,
ao_client: AOClient = None,
**kwargs,
):
"""
Expand Down Expand Up @@ -67,6 +69,7 @@ def __init__(
code_execution_config=code_execution_config,
llm_config=llm_config,
description=description,
ao_client=ao_client,
**kwargs,
)

Expand Down
8 changes: 8 additions & 0 deletions autogen/agentchat/conversable_agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,7 @@
from autogen.code_utils import DEFAULT_MODEL, UNKNOWN, content_str, execute_code, extract_code, infer_lang

from .agent import Agent
from agentops import Client as AOClient

try:
from termcolor import colored
Expand Down Expand Up @@ -56,6 +57,7 @@ def __init__(
llm_config: Optional[Union[Dict, Literal[False]]] = None,
default_auto_reply: Optional[Union[str, Dict, None]] = "",
description: Optional[str] = None,
ao_client: AOClient = None
):
"""
Args:
Expand Down Expand Up @@ -99,6 +101,7 @@ def __init__(
default_auto_reply (str or dict or None): default auto reply when no code execution or llm-based reply is generated.
description (str): a short description of the agent. This description is used by other agents
(e.g. the GroupChatManager) to decide when to call upon this agent. (Default: system_message)
ao_client (AgentOps Client Instance): an instance of and AgentOps client. All agents should share one instance. (Default: None)
"""
super().__init__(name)
# a dictionary of conversations, default value is list
Expand Down Expand Up @@ -140,6 +143,7 @@ def __init__(
self.register_reply([Agent, None], ConversableAgent.a_generate_function_call_reply)
self.register_reply([Agent, None], ConversableAgent.check_termination_and_human_reply)
self.register_reply([Agent, None], ConversableAgent.a_check_termination_and_human_reply)
self.ao_client = ao_client

def register_reply(
self,
Expand Down Expand Up @@ -349,6 +353,8 @@ def send(
"""
# When the agent composes and sends the message, the role of the message is "assistant"
# unless it's "function".
if self.ao_client:
self.ao_client.record_action("send_message_to_another_agent", tags=['conversable_agent', str(self.name())])
valid = self._append_oai_message(message, "assistant", recipient)
if valid:
recipient.receive(message, self, request_reply, silent)
Expand Down Expand Up @@ -481,6 +487,8 @@ def receive(
Raises:
ValueError: if the message can't be converted into a valid ChatCompletion message.
"""
if self.ao_client:
self.ao_client.record_action("received_message_from_another_agent", tags=['conversable_agent', str(self.name())])
self._process_received_message(message, sender, silent)
if request_reply is False or request_reply is None and self.reply_at_receive[sender] is False:
return
Expand Down
1 change: 1 addition & 0 deletions setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,7 @@
"flaml",
"python-dotenv",
"tiktoken",
"agentops"
]

setuptools.setup(
Expand Down
16 changes: 16 additions & 0 deletions test/twoagent-monitored.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,16 @@
import os
from autogen import AssistantAgent, UserProxyAgent, config_list_from_json
from agentops import Client

ao_client = Client(api_key=os.environ.get('AGENTOPS_API_KEY'),

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I added a placeholder to the first file, test_utils.py.

OAI_CONFIG_LIST only has variables corresponding to OpenAI. All other environment variables seem to be expected to be exported previous to running scripts.

tags=['autogen', 'Autogen Example'])

# Load LLM inference endpoints from an env variable or a file
# See https://microsoft.github.io/autogen/docs/FAQ#set-your-api-endpoints
# and OAI_CONFIG_LIST_sample
config_list = config_list_from_json(env_or_file="OAI_CONFIG_LIST")

# pass in the AgentOps client instance for agent monitoring and visibility
assistant = AssistantAgent("assistant", llm_config={"config_list": config_list}, ao_client=ao_client)
user_proxy = UserProxyAgent("user_proxy", code_execution_config={"work_dir": "coding"})
user_proxy.initiate_chat(assistant, message="Plot a chart of NVDA and TESLA stock price change YTD.")