Skip to content

Commit

Permalink
Add error tracing tests & impl for v1
Browse files Browse the repository at this point in the history
  • Loading branch information
hmstepanek committed Dec 13, 2023
1 parent 410e47c commit ea026f3
Show file tree
Hide file tree
Showing 4 changed files with 537 additions and 58 deletions.
77 changes: 59 additions & 18 deletions newrelic/hooks/mlmodel_openai.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,7 @@
from newrelic.api.function_trace import FunctionTrace
from newrelic.api.time_trace import get_trace_linking_metadata
from newrelic.api.transaction import current_transaction
from newrelic.common.encoding_utils import json_decode
from newrelic.common.object_wrapper import wrap_function_wrapper
from newrelic.common.package_version_utils import get_package_version
from newrelic.core.config import global_settings
Expand Down Expand Up @@ -192,16 +193,36 @@ def wrap_chat_completion_sync(wrapped, instance, args, kwargs):
try:
return_val = wrapped(*args, **kwargs)
except Exception as exc:
exc_organization = getattr(exc, "organization", "")
if OPENAI_V1:
response = getattr(exc, "response", "")
response_headers = getattr(response, "headers", "")
exc_organization = response_headers.get("openai-organization", "") if response_headers else ""
# There appears to be a bug here in openai v1 where despite having code,
# param, etc in the error response, they are not populated on the exception
# object so grab them from the response object instead.
content = getattr(response, "content", b"{}")
response = json_decode(content.decode("utf-8")).get("error", {})
notice_error_attributes = {
"http.statusCode": getattr(exc, "status_code", "") or "",
"error.message": response.get("message", "") or "",
"error.code": response.get("code", "") or "",
"error.param": response.get("param", "") or "",
"completion_id": completion_id,
}
else:
exc_organization = getattr(exc, "organization", "")
notice_error_attributes = {
"http.statusCode": getattr(exc, "http_status", ""),
"error.message": getattr(exc, "_message", ""),
"error.code": getattr(getattr(exc, "error", ""), "code", ""),
"error.param": getattr(exc, "param", ""),
"completion_id": completion_id,
}
# Override the default message if it is not empty.
message = notice_error_attributes.pop("error.message")
if message:
exc._nr_message = message

notice_error_attributes = {
"http.statusCode": getattr(exc, "http_status", ""),
"error.message": getattr(exc, "_message", ""),
"error.code": getattr(getattr(exc, "error", ""), "code", ""),
"error.param": getattr(exc, "param", ""),
"completion_id": completion_id,
}
exc._nr_message = notice_error_attributes.pop("error.message")
ft.notice_error(
attributes=notice_error_attributes,
)
Expand Down Expand Up @@ -617,16 +638,36 @@ async def wrap_chat_completion_async(wrapped, instance, args, kwargs):
try:
return_val = await wrapped(*args, **kwargs)
except Exception as exc:
exc_organization = getattr(exc, "organization", "")
if OPENAI_V1:
response = getattr(exc, "response", "")
response_headers = getattr(response, "headers", "")
exc_organization = response_headers.get("openai-organization", "") if response_headers else ""
# There appears to be a bug here in openai v1 where despite having code,
# param, etc in the error response, they are not populated on the exception
# object so grab them from the response object instead.
content = getattr(response, "content", b"{}")
response = json_decode(content.decode("utf-8")).get("error", {})
notice_error_attributes = {
"http.statusCode": getattr(exc, "status_code", "") or "",
"error.message": response.get("message", "") or "",
"error.code": response.get("code", "") or "",
"error.param": response.get("param", "") or "",
"completion_id": completion_id,
}
else:
exc_organization = getattr(exc, "organization", "")
notice_error_attributes = {
"http.statusCode": getattr(exc, "http_status", ""),
"error.message": getattr(exc, "_message", ""),
"error.code": getattr(getattr(exc, "error", ""), "code", ""),
"error.param": getattr(exc, "param", ""),
"completion_id": completion_id,
}
# Override the default message if it is not empty.
message = notice_error_attributes.pop("error.message")
if message:
exc._nr_message = message

notice_error_attributes = {
"http.statusCode": getattr(exc, "http_status", ""),
"error.message": getattr(exc, "_message", ""),
"error.code": getattr(getattr(exc, "error", ""), "code", ""),
"error.param": getattr(exc, "param", ""),
"completion_id": completion_id,
}
exc._nr_message = notice_error_attributes.pop("error.message")
ft.notice_error(
attributes=notice_error_attributes,
)
Expand Down
24 changes: 24 additions & 0 deletions tests/mlmodel_openai/_mock_external_openai_server.py
Original file line number Diff line number Diff line change
Expand Up @@ -107,6 +107,30 @@
"system_fingerprint": None,
},
],
"Invalid API key.": [
{"content-type": "application/json; charset=utf-8", "x-request-id": "a51821b9fd83d8e0e04542bedc174310"},
401,
{
"error": {
"message": "Incorrect API key provided: DEADBEEF. You can find your API key at https://platform.openai.com/account/api-keys.",
"type": "invalid_request_error",
"param": None,
"code": "invalid_api_key",
}
},
],
"Model does not exist.": [
{"content-type": "application/json; charset=utf-8", "x-request-id": "3b0f8e510ee8a67c08a227a98eadbbe6"},
404,
{
"error": {
"message": "The model `does-not-exist` does not exist",
"type": "invalid_request_error",
"param": None,
"code": "model_not_found",
}
},
],
"This is an embedding test.": [
{
"content-type": "application/json",
Expand Down
80 changes: 40 additions & 40 deletions tests/mlmodel_openai/conftest.py
Original file line number Diff line number Diff line change
Expand Up @@ -52,6 +52,7 @@
if get_openai_version() < (1, 0):
collect_ignore = [
"test_chat_completion_v1.py",
"test_chat_completion_error_v1.py",
"test_embeddings_v1.py",
"test_get_llm_message_ids_v1.py",
"test_chat_completion_error_v1.py",
Expand Down Expand Up @@ -144,9 +145,9 @@ def set_info():
def openai_server(
openai_version, # noqa: F811
openai_clients,
wrap_openai_base_client_process_response,
wrap_openai_api_requestor_request,
wrap_openai_api_requestor_interpret_response,
wrap_httpx_client_send,
):
"""
This fixture will either create a mocked backend for testing purposes, or will
Expand All @@ -166,9 +167,7 @@ def openai_server(
yield # Run tests
else:
# Apply function wrappers to record data
wrap_function_wrapper(
"openai._base_client", "BaseClient._process_response", wrap_openai_base_client_process_response
)
wrap_function_wrapper("httpx._client", "Client.send", wrap_httpx_client_send)
yield # Run tests
# Write responses to audit log
with open(OPENAI_AUDIT_LOG_FILE, "w") as audit_log_fp:
Expand All @@ -178,6 +177,43 @@ def openai_server(
yield


def bind_send_params(request, *, stream=False, **kwargs):
return request


@pytest.fixture(scope="session")
def wrap_httpx_client_send(extract_shortened_prompt): # noqa: F811
def _wrap_httpx_client_send(wrapped, instance, args, kwargs):
request = bind_send_params(*args, **kwargs)
if not request:
return wrapped(*args, **kwargs)

params = json.loads(request.content.decode("utf-8"))
prompt = extract_shortened_prompt(params)

# Send request
response = wrapped(*args, **kwargs)

if response.status_code >= 400 or response.status_code < 200:
prompt = "error"

rheaders = getattr(response, "headers")

headers = dict(
filter(
lambda k: k[0].lower() in RECORDED_HEADERS
or k[0].lower().startswith("openai")
or k[0].lower().startswith("x-ratelimit"),
rheaders.items(),
)
)
body = json.loads(response.content.decode("utf-8"))
OPENAI_AUDIT_LOG_CONTENTS[prompt] = headers, response.status_code, body # Append response data to log
return response

return _wrap_httpx_client_send


@pytest.fixture(scope="session")
def wrap_openai_api_requestor_interpret_response():
def _wrap_openai_api_requestor_interpret_response(wrapped, instance, args, kwargs):
Expand Down Expand Up @@ -236,39 +272,3 @@ def bind_request_params(method, url, params=None, *args, **kwargs):

def bind_request_interpret_response_params(result, stream):
return result.content.decode("utf-8"), result.status_code, result.headers


def bind_base_client_process_response(
cast_to,
options,
response,
stream,
stream_cls,
):
return options, response


@pytest.fixture(scope="session")
def wrap_openai_base_client_process_response(extract_shortened_prompt): # noqa: F811
def _wrap_openai_base_client_process_response(wrapped, instance, args, kwargs):
options, response = bind_base_client_process_response(*args, **kwargs)
if not options:
return wrapped(*args, **kwargs)

data = getattr(options, "json_data", {})
prompt = extract_shortened_prompt(data)
rheaders = getattr(response, "headers")

headers = dict(
filter(
lambda k: k[0].lower() in RECORDED_HEADERS
or k[0].lower().startswith("openai")
or k[0].lower().startswith("x-ratelimit"),
rheaders.items(),
)
)
body = json.loads(response.content.decode("utf-8"))
OPENAI_AUDIT_LOG_CONTENTS[prompt] = headers, response.status_code, body # Append response data to audit log
return wrapped(*args, **kwargs)

return _wrap_openai_base_client_process_response
Loading

0 comments on commit ea026f3

Please sign in to comment.