Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[Feat] OpenAI Assistants (Beta) Integration #609

Merged
merged 26 commits into from
Jan 4, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
26 commits
Select commit Hold shift + click to select a range
0a1e754
some working code
the-praxs Dec 28, 2024
c8e6e29
refactored and cleaned code
the-praxs Dec 29, 2024
08b60ff
add exception handling
the-praxs Dec 29, 2024
6961ebf
add assistants example notebook with images and pdf
the-praxs Dec 29, 2024
45319c5
Merge branch 'main' into feat/openai-assistants-beta
the-praxs Dec 29, 2024
be7e702
linting
the-praxs Dec 29, 2024
7c6a803
modify notebook name and link
the-praxs Dec 29, 2024
6fbb111
remove agentops github link from notebook
the-praxs Dec 29, 2024
36bc5a2
modify image links to github urls
the-praxs Dec 29, 2024
cf9ae0c
remove agentops content from conclusion section
the-praxs Dec 29, 2024
908b219
add assistants examples page
the-praxs Dec 29, 2024
b218b5a
add assistants example to openai integrations page
the-praxs Dec 29, 2024
4e4dde0
Merge branch 'main' into feat/openai-assistants-beta
the-praxs Dec 29, 2024
71791d1
modify variable name from `original` to `original_func` when patching…
the-praxs Dec 29, 2024
a6de520
Merge branch 'main' into feat/openai-assistants-beta
the-praxs Dec 29, 2024
9712636
remove casting response to str for `returns` attribute
the-praxs Dec 30, 2024
30ed383
add partial `LLMEvent` for calculating costs
the-praxs Dec 30, 2024
dd79008
add logger to error event
the-praxs Dec 30, 2024
72365b3
Merge branch 'main' into feat/openai-assistants-beta
the-praxs Dec 30, 2024
a77e3ff
check if `usage` is not `None`
the-praxs Dec 30, 2024
d42d75b
add test for assistants api
the-praxs Dec 30, 2024
fe1d727
add more tests
the-praxs Dec 30, 2024
9d7e29e
Merge branch 'main' into feat/openai-assistants-beta
the-praxs Dec 31, 2024
0eccdfa
fix typo (no idea how it occurred in the first place)
the-praxs Dec 31, 2024
c8cfcbb
Merge branch 'main' into feat/openai-assistants-beta
the-praxs Jan 2, 2025
5ec56c1
quick hack to store model name for run steps associated with a run
the-praxs Jan 3, 2025
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
109 changes: 108 additions & 1 deletion agentops/llms/providers/openai.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,3 @@
import inspect
import pprint
from typing import Optional

Expand All @@ -16,6 +15,8 @@
class OpenAiProvider(InstrumentedProvider):
original_create = None
original_create_async = None
original_assistant_methods = None
assistants_run_steps = {}

def __init__(self, client):
super().__init__(client)
Expand Down Expand Up @@ -138,6 +139,7 @@
def override(self):
self._override_openai_v1_completion()
self._override_openai_v1_async_completion()
self._override_openai_assistants_beta()

Check warning on line 142 in agentops/llms/providers/openai.py

View check run for this annotation

Codecov / codecov/patch

agentops/llms/providers/openai.py#L142

Added line #L142 was not covered by tests

def _override_openai_v1_completion(self):
from openai.resources.chat import completions
Expand Down Expand Up @@ -228,9 +230,114 @@
# Override the original method with the patched one
completions.AsyncCompletions.create = patched_function

def _override_openai_assistants_beta(self):
"""Override OpenAI Assistants API methods"""
from openai._legacy_response import LegacyAPIResponse
from openai.resources import beta
from openai.pagination import BasePage

Check warning on line 237 in agentops/llms/providers/openai.py

View check run for this annotation

Codecov / codecov/patch

agentops/llms/providers/openai.py#L235-L237

Added lines #L235 - L237 were not covered by tests

def handle_response(response, kwargs, init_timestamp, session: Optional[Session] = None) -> dict:

Check warning on line 239 in agentops/llms/providers/openai.py

View check run for this annotation

Codecov / codecov/patch

agentops/llms/providers/openai.py#L239

Added line #L239 was not covered by tests
"""Handle response based on return type"""
action_event = ActionEvent(init_timestamp=init_timestamp, params=kwargs)

Check warning on line 241 in agentops/llms/providers/openai.py

View check run for this annotation

Codecov / codecov/patch

agentops/llms/providers/openai.py#L241

Added line #L241 was not covered by tests
if session is not None:
action_event.session_id = session.session_id

Check warning on line 243 in agentops/llms/providers/openai.py

View check run for this annotation

Codecov / codecov/patch

agentops/llms/providers/openai.py#L243

Added line #L243 was not covered by tests

try:

Check warning on line 245 in agentops/llms/providers/openai.py

View check run for this annotation

Codecov / codecov/patch

agentops/llms/providers/openai.py#L245

Added line #L245 was not covered by tests
# Set action type and returns
action_event.action_type = (

Check warning on line 247 in agentops/llms/providers/openai.py

View check run for this annotation

Codecov / codecov/patch

agentops/llms/providers/openai.py#L247

Added line #L247 was not covered by tests
response.__class__.__name__.split("[")[1][:-1]
if isinstance(response, BasePage)
else response.__class__.__name__
)
action_event.returns = response.model_dump() if hasattr(response, "model_dump") else response
action_event.end_timestamp = get_ISO_time()
self._safe_record(session, action_event)

Check warning on line 254 in agentops/llms/providers/openai.py

View check run for this annotation

Codecov / codecov/patch

agentops/llms/providers/openai.py#L252-L254

Added lines #L252 - L254 were not covered by tests

# Create LLMEvent if usage data exists
response_dict = response.model_dump() if hasattr(response, "model_dump") else {}

Check warning on line 257 in agentops/llms/providers/openai.py

View check run for this annotation

Codecov / codecov/patch

agentops/llms/providers/openai.py#L257

Added line #L257 was not covered by tests

if "id" in response_dict and response_dict.get("id").startswith("run"):
if response_dict["id"] not in self.assistants_run_steps:
self.assistants_run_steps[response_dict.get("id")] = {"model": response_dict.get("model")}

Check warning on line 261 in agentops/llms/providers/openai.py

View check run for this annotation

Codecov / codecov/patch

agentops/llms/providers/openai.py#L261

Added line #L261 was not covered by tests

if "usage" in response_dict and response_dict["usage"] is not None:
llm_event = LLMEvent(init_timestamp=init_timestamp, params=kwargs)

Check warning on line 264 in agentops/llms/providers/openai.py

View check run for this annotation

Codecov / codecov/patch

agentops/llms/providers/openai.py#L264

Added line #L264 was not covered by tests
if session is not None:
llm_event.session_id = session.session_id

Check warning on line 266 in agentops/llms/providers/openai.py

View check run for this annotation

Codecov / codecov/patch

agentops/llms/providers/openai.py#L266

Added line #L266 was not covered by tests

llm_event.model = response_dict.get("model")
llm_event.prompt_tokens = response_dict["usage"]["prompt_tokens"]
llm_event.completion_tokens = response_dict["usage"]["completion_tokens"]
llm_event.end_timestamp = get_ISO_time()
self._safe_record(session, llm_event)

Check warning on line 272 in agentops/llms/providers/openai.py

View check run for this annotation

Codecov / codecov/patch

agentops/llms/providers/openai.py#L268-L272

Added lines #L268 - L272 were not covered by tests

elif "data" in response_dict:
for item in response_dict["data"]:
if "usage" in item and item["usage"] is not None:
llm_event = LLMEvent(init_timestamp=init_timestamp, params=kwargs)

Check warning on line 277 in agentops/llms/providers/openai.py

View check run for this annotation

Codecov / codecov/patch

agentops/llms/providers/openai.py#L277

Added line #L277 was not covered by tests
if session is not None:
llm_event.session_id = session.session_id

Check warning on line 279 in agentops/llms/providers/openai.py

View check run for this annotation

Codecov / codecov/patch

agentops/llms/providers/openai.py#L279

Added line #L279 was not covered by tests

llm_event.model = self.assistants_run_steps[item["run_id"]]["model"]
llm_event.prompt_tokens = item["usage"]["prompt_tokens"]
llm_event.completion_tokens = item["usage"]["completion_tokens"]
llm_event.end_timestamp = get_ISO_time()
self._safe_record(session, llm_event)

Check warning on line 285 in agentops/llms/providers/openai.py

View check run for this annotation

Codecov / codecov/patch

agentops/llms/providers/openai.py#L281-L285

Added lines #L281 - L285 were not covered by tests

except Exception as e:
self._safe_record(session, ErrorEvent(trigger_event=action_event, exception=e))

Check warning on line 288 in agentops/llms/providers/openai.py

View check run for this annotation

Codecov / codecov/patch

agentops/llms/providers/openai.py#L287-L288

Added lines #L287 - L288 were not covered by tests

kwargs_str = pprint.pformat(kwargs)
response = pprint.pformat(response)
logger.warning(

Check warning on line 292 in agentops/llms/providers/openai.py

View check run for this annotation

Codecov / codecov/patch

agentops/llms/providers/openai.py#L290-L292

Added lines #L290 - L292 were not covered by tests
f"Unable to parse response for Assistants API. Skipping upload to AgentOps\n"
f"response:\n {response}\n"
f"kwargs:\n {kwargs_str}\n"
)

return response

Check warning on line 298 in agentops/llms/providers/openai.py

View check run for this annotation

Codecov / codecov/patch

agentops/llms/providers/openai.py#L298

Added line #L298 was not covered by tests

def create_patched_function(original_func):
def patched_function(*args, **kwargs):
init_timestamp = get_ISO_time()

Check warning on line 302 in agentops/llms/providers/openai.py

View check run for this annotation

Codecov / codecov/patch

agentops/llms/providers/openai.py#L300-L302

Added lines #L300 - L302 were not covered by tests

session = kwargs.get("session", None)

Check warning on line 304 in agentops/llms/providers/openai.py

View check run for this annotation

Codecov / codecov/patch

agentops/llms/providers/openai.py#L304

Added line #L304 was not covered by tests
if "session" in kwargs.keys():
del kwargs["session"]

Check warning on line 306 in agentops/llms/providers/openai.py

View check run for this annotation

Codecov / codecov/patch

agentops/llms/providers/openai.py#L306

Added line #L306 was not covered by tests

response = original_func(*args, **kwargs)

Check warning on line 308 in agentops/llms/providers/openai.py

View check run for this annotation

Codecov / codecov/patch

agentops/llms/providers/openai.py#L308

Added line #L308 was not covered by tests
if isinstance(response, LegacyAPIResponse):
return response

Check warning on line 310 in agentops/llms/providers/openai.py

View check run for this annotation

Codecov / codecov/patch

agentops/llms/providers/openai.py#L310

Added line #L310 was not covered by tests

return handle_response(response, kwargs, init_timestamp, session=session)

Check warning on line 312 in agentops/llms/providers/openai.py

View check run for this annotation

Codecov / codecov/patch

agentops/llms/providers/openai.py#L312

Added line #L312 was not covered by tests

return patched_function

Check warning on line 314 in agentops/llms/providers/openai.py

View check run for this annotation

Codecov / codecov/patch

agentops/llms/providers/openai.py#L314

Added line #L314 was not covered by tests

# Store and patch Assistant API methods
assistant_api_methods = {

Check warning on line 317 in agentops/llms/providers/openai.py

View check run for this annotation

Codecov / codecov/patch

agentops/llms/providers/openai.py#L317

Added line #L317 was not covered by tests
beta.Assistants: ["create", "retrieve", "update", "delete", "list"],
beta.Threads: ["create", "retrieve", "update", "delete"],
beta.threads.Messages: ["create", "retrieve", "update", "list"],
beta.threads.Runs: ["create", "retrieve", "update", "list", "submit_tool_outputs", "cancel"],
beta.threads.runs.steps.Steps: ["retrieve", "list"],
}

self.original_assistant_methods = {

Check warning on line 325 in agentops/llms/providers/openai.py

View check run for this annotation

Codecov / codecov/patch

agentops/llms/providers/openai.py#L325

Added line #L325 was not covered by tests
(cls, method): getattr(cls, method) for cls, methods in assistant_api_methods.items() for method in methods
}

# Override methods and verify
for (cls, method), original_func in self.original_assistant_methods.items():
patched_function = create_patched_function(original_func)
setattr(cls, method, patched_function)

Check warning on line 332 in agentops/llms/providers/openai.py

View check run for this annotation

Codecov / codecov/patch

agentops/llms/providers/openai.py#L331-L332

Added lines #L331 - L332 were not covered by tests

def undo_override(self):
if self.original_create is not None and self.original_create_async is not None:
from openai.resources.chat import completions

completions.AsyncCompletions.create = self.original_create_async
completions.Completions.create = self.original_create

if self.original_assistant_methods is not None:
for (cls, method), original in self.original_assistant_methods.items():
setattr(cls, method, original)

Check warning on line 343 in agentops/llms/providers/openai.py

View check run for this annotation

Codecov / codecov/patch

agentops/llms/providers/openai.py#L343

Added line #L343 was not covered by tests
36 changes: 35 additions & 1 deletion agentops/llms/tracker.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,41 @@ class LlmTracker:
SUPPORTED_APIS = {
"litellm": {"1.3.1": ("openai_chat_completions.completion",)},
"openai": {
"1.0.0": ("chat.completions.create",),
"1.0.0": (
"chat.completions.create",
# Assistants
"beta.assistants.create",
"beta.assistants.retrieve",
"beta.assistants.update",
"beta.assistants.delete",
"beta.assistants.list",
"beta.assistants.files.create",
"beta.assistants.files.retrieve",
"beta.assistants.files.delete",
"beta.assistants.files.list",
# Threads
"beta.threads.create",
"beta.threads.retrieve",
"beta.threads.update",
"beta.threads.delete",
# Messages
"beta.threads.messages.create",
"beta.threads.messages.retrieve",
"beta.threads.messages.update",
"beta.threads.messages.list",
"beta.threads.messages.files.retrieve",
"beta.threads.messages.files.list",
# Runs
"beta.threads.runs.create",
"beta.threads.runs.retrieve",
"beta.threads.runs.update",
"beta.threads.runs.list",
"beta.threads.runs.cancel",
"beta.threads.runs.submit_tool_outputs",
# Run Steps
"beta.threads.runs.steps.Steps.retrieve",
"beta.threads.runs.steps.Steps.list",
),
"0.0.0": (
"ChatCompletion.create",
"ChatCompletion.acreate",
Expand Down
4 changes: 4 additions & 0 deletions docs/v1/examples/examples.mdx
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,10 @@ mode: "wide"
<Card title="Multi Session" icon="computer" href="/v1/examples/multi_session">
Manage multiple sessions at the same time
</Card>

<Card title="OpenAI Assistants" icon={<img src="https://www.github.com/agentops-ai/agentops/blob/main/docs/images/external/openai/openai-logomark.png?raw=true" alt="OpenAI Assistants" />} iconType="image" href="/v1/integrations/openai" href="/v1/examples/openai_assistants">
Observe OpenAI Assistants
</Card>
</CardGroup>

### Integration Examples
Expand Down
Loading
Loading