diff --git a/newrelic/hooks/mlmodel_openai.py b/newrelic/hooks/mlmodel_openai.py index a653b7ca69..7ea277e766 100644 --- a/newrelic/hooks/mlmodel_openai.py +++ b/newrelic/hooks/mlmodel_openai.py @@ -59,19 +59,37 @@ def wrap_embedding_sync(wrapped, instance, args, kwargs): try: response = wrapped(*args, **kwargs) except Exception as exc: - notice_error_attributes = { - "http.statusCode": getattr(exc, "http_status", ""), - "error.message": getattr(exc, "_message", ""), - "error.code": getattr(getattr(exc, "error", ""), "code", ""), - "error.param": getattr(exc, "param", ""), - "embedding_id": embedding_id, - } - exc._nr_message = notice_error_attributes.pop("error.message") + if OPENAI_V1: + response = getattr(exc, "response", "") + response_headers = getattr(response, "headers", "") + exc_organization = response_headers.get("openai-organization", "") if response_headers else "" + # There appears to be a bug here in openai v1 where despite having code, + # param, etc in the error response, they are not populated on the exception + # object so grab them from the response body object instead. + body = getattr(exc, "body", {}) or {} + notice_error_attributes = { + "http.statusCode": getattr(exc, "status_code", "") or "", + "error.message": body.get("message", "") or "", + "error.code": body.get("code", "") or "", + "error.param": body.get("param", "") or "", + "embedding_id": embedding_id, + } + else: + exc_organization = getattr(exc, "organization", "") + notice_error_attributes = { + "http.statusCode": getattr(exc, "http_status", ""), + "error.message": getattr(exc, "_message", ""), + "error.code": getattr(getattr(exc, "error", ""), "code", ""), + "error.param": getattr(exc, "param", ""), + "embedding_id": embedding_id, + } + message = notice_error_attributes.pop("error.message") + if message: + exc._nr_message = message ft.notice_error( attributes=notice_error_attributes, ) - # Gather attributes to add to embedding summary event in error context - exc_organization = getattr(exc, "organization", "") + error_embedding_dict = { "id": embedding_id, "appName": settings.app_name, @@ -498,19 +516,37 @@ async def wrap_embedding_async(wrapped, instance, args, kwargs): try: response = await wrapped(*args, **kwargs) except Exception as exc: - notice_error_attributes = { - "http.statusCode": getattr(exc, "http_status", ""), - "error.message": getattr(exc, "_message", ""), - "error.code": getattr(getattr(exc, "error", ""), "code", ""), - "error.param": getattr(exc, "param", ""), - "embedding_id": embedding_id, - } - exc._nr_message = notice_error_attributes.pop("error.message") + if OPENAI_V1: + response = getattr(exc, "response", "") + response_headers = getattr(response, "headers", "") + exc_organization = response_headers.get("openai-organization", "") if response_headers else "" + # There appears to be a bug here in openai v1 where despite having code, + # param, etc in the error response, they are not populated on the exception + # object so grab them from the response body object instead. + body = getattr(exc, "body", {}) or {} + notice_error_attributes = { + "http.statusCode": getattr(exc, "status_code", "") or "", + "error.message": body.get("message", "") or "", + "error.code": body.get("code", "") or "", + "error.param": body.get("param", "") or "", + "embedding_id": embedding_id, + } + else: + exc_organization = getattr(exc, "organization", "") + notice_error_attributes = { + "http.statusCode": getattr(exc, "http_status", ""), + "error.message": getattr(exc, "_message", ""), + "error.code": getattr(getattr(exc, "error", ""), "code", ""), + "error.param": getattr(exc, "param", ""), + "embedding_id": embedding_id, + } + message = notice_error_attributes.pop("error.message") + if message: + exc._nr_message = message ft.notice_error( attributes=notice_error_attributes, ) - # Gather attributes to add to embedding summary event in error context - exc_organization = getattr(exc, "organization", "") + error_embedding_dict = { "id": embedding_id, "appName": settings.app_name, diff --git a/tests/mlmodel_openai/conftest.py b/tests/mlmodel_openai/conftest.py index 6c0fed0e44..180bec9cc4 100644 --- a/tests/mlmodel_openai/conftest.py +++ b/tests/mlmodel_openai/conftest.py @@ -65,7 +65,6 @@ "test_chat_completion.py", "test_get_llm_message_ids.py", "test_chat_completion_error.py", - "test_embeddings_error_v1.py", ] diff --git a/tests/mlmodel_openai/test_embeddings_error_v1.py b/tests/mlmodel_openai/test_embeddings_error_v1.py index 485723f041..d5cfaf3457 100644 --- a/tests/mlmodel_openai/test_embeddings_error_v1.py +++ b/tests/mlmodel_openai/test_embeddings_error_v1.py @@ -12,17 +12,311 @@ # See the License for the specific language governing permissions and # limitations under the License. +import sys + import openai import pytest -from newrelic.api.background_task import background_task +from testing_support.fixtures import ( + dt_enabled, + reset_core_stats_engine, + validate_custom_event_count, +) +from testing_support.validators.validate_custom_events import validate_custom_events +from testing_support.validators.validate_error_trace_attributes import ( + validate_error_trace_attributes, +) +from testing_support.validators.validate_span_events import validate_span_events +from testing_support.validators.validate_transaction_metrics import ( + validate_transaction_metrics, +) +from newrelic.api.background_task import background_task +from newrelic.common.object_names import callable_name # Sync tests: +no_model_events = [ + ( + {"type": "LlmEmbedding"}, + { + "id": None, # UUID that varies with each run + "appName": "Python Agent Test (mlmodel_openai)", + "transaction_id": "transaction-id", + "span_id": None, + "trace_id": "trace-id", + "input": "This is an embedding test with no model.", + "api_key_last_four_digits": "sk-CRET", + "duration": None, # Response time varies each test run + "request.model": "", # No model in this test case + "response.organization": "", + "vendor": "openAI", + "ingest_source": "Python", + "error": True, + }, + ), +] + + +@dt_enabled +@reset_core_stats_engine() +@validate_error_trace_attributes( + callable_name(TypeError), + exact_attrs={ + "agent": {}, + "intrinsic": {}, + "user": {}, + }, +) +@validate_span_events( + exact_agents={ + "error.message": "create() missing 1 required keyword-only argument: 'model'" + if sys.version_info < (3, 10) + else "Embeddings.create() missing 1 required keyword-only argument: 'model'", + } +) +@validate_transaction_metrics( + name="test_embeddings_error_v1:test_embeddings_invalid_request_error_no_model", + scoped_metrics=[("Llm/embedding/OpenAI/create", 1)], + rollup_metrics=[("Llm/embedding/OpenAI/create", 1)], + custom_metrics=[ + ("Python/ML/OpenAI/%s" % openai.__version__, 1), + ], + background_task=True, +) +@validate_custom_events(no_model_events) +@validate_custom_event_count(count=1) +@background_task() +def test_embeddings_invalid_request_error_no_model(set_trace_info, sync_openai_client): + with pytest.raises(TypeError): + set_trace_info() + sync_openai_client.embeddings.create(input="This is an embedding test with no model.") # no model provided + + +@dt_enabled +@reset_core_stats_engine() +@validate_error_trace_attributes( + callable_name(TypeError), + exact_attrs={ + "agent": {}, + "intrinsic": {}, + "user": {}, + }, +) +@validate_span_events( + exact_agents={ + "error.message": "create() missing 1 required keyword-only argument: 'model'" + if sys.version_info < (3, 10) + else "AsyncEmbeddings.create() missing 1 required keyword-only argument: 'model'", + } +) +@validate_transaction_metrics( + name="test_embeddings_error_v1:test_embeddings_invalid_request_error_no_model_async", + scoped_metrics=[("Llm/embedding/OpenAI/create", 1)], + rollup_metrics=[("Llm/embedding/OpenAI/create", 1)], + custom_metrics=[ + ("Python/ML/OpenAI/%s" % openai.__version__, 1), + ], + background_task=True, +) +@validate_custom_events(no_model_events) +@validate_custom_event_count(count=1) +@background_task() +def test_embeddings_invalid_request_error_no_model_async(set_trace_info, async_openai_client, loop): + with pytest.raises(TypeError): + set_trace_info() + loop.run_until_complete( + async_openai_client.embeddings.create(input="This is an embedding test with no model.") + ) # no model provided + + +invalid_model_events = [ + ( + {"type": "LlmEmbedding"}, + { + "id": None, # UUID that varies with each run + "appName": "Python Agent Test (mlmodel_openai)", + "transaction_id": "transaction-id", + "span_id": None, + "trace_id": "trace-id", + "input": "Model does not exist.", + "api_key_last_four_digits": "sk-CRET", + "duration": None, # Response time varies each test run + "request.model": "does-not-exist", + "response.organization": None, + "vendor": "openAI", + "ingest_source": "Python", + "error": True, + }, + ), +] + + +@dt_enabled +@reset_core_stats_engine() +@validate_error_trace_attributes( + callable_name(openai.NotFoundError), + exact_attrs={ + "agent": {}, + "intrinsic": {}, + "user": { + "http.statusCode": 404, + "error.code": "model_not_found", + }, + }, +) +@validate_span_events( + exact_agents={ + "error.message": "The model `does-not-exist` does not exist", + } +) +@validate_transaction_metrics( + name="test_embeddings_error_v1:test_embeddings_invalid_request_error_invalid_model", + scoped_metrics=[("Llm/embedding/OpenAI/create", 1)], + rollup_metrics=[("Llm/embedding/OpenAI/create", 1)], + custom_metrics=[ + ("Python/ML/OpenAI/%s" % openai.__version__, 1), + ], + background_task=True, +) +@validate_custom_events(invalid_model_events) +@validate_custom_event_count(count=1) @background_task() def test_embeddings_invalid_request_error_invalid_model(set_trace_info, sync_openai_client): - with pytest.raises(openai.InternalServerError): + with pytest.raises(openai.NotFoundError): set_trace_info() sync_openai_client.embeddings.create(input="Model does not exist.", model="does-not-exist") +@dt_enabled +@reset_core_stats_engine() +@validate_error_trace_attributes( + callable_name(openai.NotFoundError), + exact_attrs={ + "agent": {}, + "intrinsic": {}, + "user": { + "http.statusCode": 404, + "error.code": "model_not_found", + }, + }, +) +@validate_span_events( + exact_agents={ + "error.message": "The model `does-not-exist` does not exist", + } +) +@validate_transaction_metrics( + name="test_embeddings_error_v1:test_embeddings_invalid_request_error_invalid_model_async", + scoped_metrics=[("Llm/embedding/OpenAI/create", 1)], + rollup_metrics=[("Llm/embedding/OpenAI/create", 1)], + custom_metrics=[ + ("Python/ML/OpenAI/%s" % openai.__version__, 1), + ], + background_task=True, +) +@validate_custom_events(invalid_model_events) +@validate_custom_event_count(count=1) +@background_task() +def test_embeddings_invalid_request_error_invalid_model_async(set_trace_info, async_openai_client, loop): + with pytest.raises(openai.NotFoundError): + set_trace_info() + loop.run_until_complete( + async_openai_client.embeddings.create(input="Model does not exist.", model="does-not-exist") + ) + + +embedding_invalid_key_error_events = [ + ( + {"type": "LlmEmbedding"}, + { + "id": None, # UUID that varies with each run + "appName": "Python Agent Test (mlmodel_openai)", + "transaction_id": "transaction-id", + "span_id": None, + "trace_id": "trace-id", + "input": "Invalid API key.", + "api_key_last_four_digits": "sk-BEEF", + "duration": None, # Response time varies each test run + "request.model": "text-embedding-ada-002", + "response.organization": None, + "vendor": "openAI", + "ingest_source": "Python", + "error": True, + }, + ), +] + +@dt_enabled +@reset_core_stats_engine() +@validate_error_trace_attributes( + callable_name(openai.AuthenticationError), + exact_attrs={ + "agent": {}, + "intrinsic": {}, + "user": { + "http.statusCode": 401, + "error.code": "invalid_api_key", + }, + }, +) +@validate_span_events( + exact_agents={ + "error.message": "Incorrect API key provided: DEADBEEF. You can find your API key at https://platform.openai.com/account/api-keys.", + } +) +@validate_transaction_metrics( + name="test_embeddings_error_v1:test_embeddings_wrong_api_key_error", + scoped_metrics=[("Llm/embedding/OpenAI/create", 1)], + rollup_metrics=[("Llm/embedding/OpenAI/create", 1)], + custom_metrics=[ + ("Python/ML/OpenAI/%s" % openai.__version__, 1), + ], + background_task=True, +) +@validate_custom_events(embedding_invalid_key_error_events) +@validate_custom_event_count(count=1) +@background_task() +def test_embeddings_wrong_api_key_error(set_trace_info, monkeypatch, sync_openai_client): + with pytest.raises(openai.AuthenticationError): + set_trace_info() + monkeypatch.setattr(sync_openai_client, "api_key", "DEADBEEF") + sync_openai_client.embeddings.create(input="Invalid API key.", model="text-embedding-ada-002") + + +@dt_enabled +@reset_core_stats_engine() +@validate_error_trace_attributes( + callable_name(openai.AuthenticationError), + exact_attrs={ + "agent": {}, + "intrinsic": {}, + "user": { + "http.statusCode": 401, + "error.code": "invalid_api_key", + }, + }, +) +@validate_span_events( + exact_agents={ + "error.message": "Incorrect API key provided: DEADBEEF. You can find your API key at https://platform.openai.com/account/api-keys.", + } +) +@validate_transaction_metrics( + name="test_embeddings_error_v1:test_embeddings_wrong_api_key_error_async", + scoped_metrics=[("Llm/embedding/OpenAI/create", 1)], + rollup_metrics=[("Llm/embedding/OpenAI/create", 1)], + custom_metrics=[ + ("Python/ML/OpenAI/%s" % openai.__version__, 1), + ], + background_task=True, +) +@validate_custom_events(embedding_invalid_key_error_events) +@validate_custom_event_count(count=1) +@background_task() +def test_embeddings_wrong_api_key_error_async(set_trace_info, monkeypatch, async_openai_client, loop): + with pytest.raises(openai.AuthenticationError): + set_trace_info() + monkeypatch.setattr(async_openai_client, "api_key", "DEADBEEF") + loop.run_until_complete( + async_openai_client.embeddings.create(input="Invalid API key.", model="text-embedding-ada-002") + ) diff --git a/tox.ini b/tox.ini index 3ba0daf7a2..a0827dea61 100644 --- a/tox.ini +++ b/tox.ini @@ -495,4 +495,4 @@ source = directory = ${TOX_ENV_DIR-.}/htmlcov [coverage:xml] -output = ${TOX_ENV_DIR-.}/coverage.xml +output = ${TOX_ENV_DIR-.}/coverage.xml \ No newline at end of file