diff --git a/newrelic/hooks/mlmodel_openai.py b/newrelic/hooks/mlmodel_openai.py index 9473e07b8..67642888a 100644 --- a/newrelic/hooks/mlmodel_openai.py +++ b/newrelic/hooks/mlmodel_openai.py @@ -12,6 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. +import json import logging import sys import traceback @@ -251,11 +252,17 @@ def _record_embedding_success(transaction, embedding_id, linking_metadata, kwarg response_headers = getattr(response, "_nr_response_headers", {}) input = kwargs.get("input") + attribute_response = response # In v1, response objects are pydantic models so this function call converts the # object back to a dictionary for backwards compatibility. - attribute_response = response if OPENAI_V1: - attribute_response = response.model_dump() + if hasattr(response, "model_dump"): + attribute_response = response.model_dump() + elif hasattr(response, "http_response") and hasattr(response.http_response, "text"): + # This is for the .with_raw_response. wrapper. This is expected + # to change, but the return type for now is the following: + # openai._legacy_response.LegacyAPIResponse + attribute_response = json.loads(response.http_response.text.strip()) request_id = response_headers.get("x-request-id") response_model = attribute_response.get("model") @@ -441,11 +448,18 @@ def _handle_completion_success(transaction, linking_metadata, completion_id, kwa # If response is not a stream generator, record the event data. # At this point, we have a response so we can grab attributes only available on the response object response_headers = getattr(return_val, "_nr_response_headers", {}) + response = return_val + # In v1, response objects are pydantic models so this function call converts the # object back to a dictionary for backwards compatibility. - response = return_val if OPENAI_V1: - response = response.model_dump() + if hasattr(response, "model_dump"): + response = response.model_dump() + elif hasattr(response, "http_response") and hasattr(response.http_response, "text"): + # This is for the .with_raw_response. wrapper. This is expected + # to change, but the return type for now is the following: + # openai._legacy_response.LegacyAPIResponse + response = json.loads(response.http_response.text.strip()) _record_completion_success(transaction, linking_metadata, completion_id, kwargs, ft, response_headers, response) except Exception: diff --git a/tests/mlmodel_openai/_mock_external_openai_server.py b/tests/mlmodel_openai/_mock_external_openai_server.py index c8b844cf3..9bb88e40d 100644 --- a/tests/mlmodel_openai/_mock_external_openai_server.py +++ b/tests/mlmodel_openai/_mock_external_openai_server.py @@ -223,284 +223,111 @@ } RESPONSES_V1 = { - "You are a scientist.": [ - { - "Content-Type": "text/event-stream", - "openai-model": "gpt-3.5-turbo-0613", - "openai-organization": "foobar-jtbczk", - "openai-processing-ms": "516", - "openai-version": "2020-10-01", - "x-ratelimit-limit-requests": "200", - "x-ratelimit-limit-tokens": "40000", - "x-ratelimit-remaining-requests": "196", - "x-ratelimit-remaining-tokens": "39880", - "x-ratelimit-reset-requests": "23m5.129s", - "x-ratelimit-reset-tokens": "180ms", - "x-request-id": "5c53c9b80af57a1c9b38568f01dcde7f", - }, - 200, - [ - { - "id": "chatcmpl-87sb95K4EF2nuJRcTs43Tm9ntTemv", - "object": "chat.completion.chunk", - "created": 1706565311, - "model": "gpt-3.5-turbo-0613", - "system_fingerprint": None, - "choices": [ - {"index": 0, "delta": {"role": "assistant", "content": ""}, "logprobs": None, "finish_reason": None} - ], - }, - { - "id": "chatcmpl-87sb95K4EF2nuJRcTs43Tm9ntTemv", - "object": "chat.completion.chunk", - "created": 1706565311, - "model": "gpt-3.5-turbo-0613", - "system_fingerprint": None, - "choices": [{"index": 0, "delta": {"content": "212"}, "logprobs": None, "finish_reason": None}], - }, - { - "id": "chatcmpl-87sb95K4EF2nuJRcTs43Tm9ntTemv", - "object": "chat.completion.chunk", - "created": 1706565311, - "model": "gpt-3.5-turbo-0613", - "system_fingerprint": None, - "choices": [{"index": 0, "delta": {"content": " degrees"}, "logprobs": None, "finish_reason": None}], - }, - { - "id": "chatcmpl-87sb95K4EF2nuJRcTs43Tm9ntTemv", - "object": "chat.completion.chunk", - "created": 1706565311, - "model": "gpt-3.5-turbo-0613", - "system_fingerprint": None, - "choices": [{"index": 0, "delta": {"content": " Fahrenheit"}, "logprobs": None, "finish_reason": None}], - }, - { - "id": "chatcmpl-87sb95K4EF2nuJRcTs43Tm9ntTemv", - "object": "chat.completion.chunk", - "created": 1706565311, - "model": "gpt-3.5-turbo-0613", - "system_fingerprint": None, - "choices": [{"index": 0, "delta": {"content": " is"}, "logprobs": None, "finish_reason": None}], - }, - { - "id": "chatcmpl-87sb95K4EF2nuJRcTs43Tm9ntTemv", - "object": "chat.completion.chunk", - "created": 1706565311, - "model": "gpt-3.5-turbo-0613", - "system_fingerprint": None, - "choices": [{"index": 0, "delta": {"content": " equal"}, "logprobs": None, "finish_reason": None}], - }, - { - "id": "chatcmpl-87sb95K4EF2nuJRcTs43Tm9ntTemv", - "object": "chat.completion.chunk", - "created": 1706565311, - "model": "gpt-3.5-turbo-0613", - "system_fingerprint": None, - "choices": [{"index": 0, "delta": {"content": " to"}, "logprobs": None, "finish_reason": None}], - }, - { - "id": "chatcmpl-87sb95K4EF2nuJRcTs43Tm9ntTemv", - "object": "chat.completion.chunk", - "created": 1706565311, - "model": "gpt-3.5-turbo-0613", - "system_fingerprint": None, - "choices": [{"index": 0, "delta": {"content": " "}, "logprobs": None, "finish_reason": None}], - }, - { - "id": "chatcmpl-87sb95K4EF2nuJRcTs43Tm9ntTemv", - "object": "chat.completion.chunk", - "created": 1706565311, - "model": "gpt-3.5-turbo-0613", - "system_fingerprint": None, - "choices": [{"index": 0, "delta": {"content": "100"}, "logprobs": None, "finish_reason": None}], - }, - { - "id": "chatcmpl-87sb95K4EF2nuJRcTs43Tm9ntTemv", - "object": "chat.completion.chunk", - "created": 1706565311, - "model": "gpt-3.5-turbo-0613", - "system_fingerprint": None, - "choices": [{"index": 0, "delta": {"content": " degrees"}, "logprobs": None, "finish_reason": None}], - }, - { - "id": "chatcmpl-87sb95K4EF2nuJRcTs43Tm9ntTemv", - "object": "chat.completion.chunk", - "created": 1706565311, - "model": "gpt-3.5-turbo-0613", - "system_fingerprint": None, - "choices": [{"index": 0, "delta": {"content": " Celsius"}, "logprobs": None, "finish_reason": None}], - }, - { - "id": "chatcmpl-87sb95K4EF2nuJRcTs43Tm9ntTemv", - "object": "chat.completion.chunk", - "created": 1706565311, - "model": "gpt-3.5-turbo-0613", - "system_fingerprint": None, - "choices": [{"index": 0, "delta": {"content": "."}, "logprobs": None, "finish_reason": None}], - }, - { - "id": "chatcmpl-87sb95K4EF2nuJRcTs43Tm9ntTemv", - "object": "chat.completion.chunk", - "created": 1706565311, - "model": "gpt-3.5-turbo-0613", - "system_fingerprint": None, - "choices": [{"index": 0, "delta": {}, "logprobs": None, "finish_reason": "stop"}], - }, - ], - ] -} -RESPONSES_V1 = { - "You are a scientist.": [ + "Model does not exist.": [ + {"content-type": "application/json; charset=utf-8", "x-request-id": "req_715be6580ab5bf4eef8d2b0893926ec9"}, + 404, { - "content-type": "application/json", - "openai-model": "gpt-3.5-turbo-0613", - "openai-organization": "new-relic-nkmd8b", - "openai-processing-ms": "6326", - "openai-version": "2020-10-01", - "x-ratelimit-limit-requests": "200", - "x-ratelimit-limit-tokens": "40000", - "x-ratelimit-limit-tokens_usage_based": "40000", - "x-ratelimit-remaining-requests": "198", - "x-ratelimit-remaining-tokens": "39880", - "x-ratelimit-remaining-tokens_usage_based": "39880", - "x-ratelimit-reset-requests": "11m32.334s", - "x-ratelimit-reset-tokens": "180ms", - "x-ratelimit-reset-tokens_usage_based": "180ms", - "x-request-id": "f8d0f53b6881c5c0a3698e55f8f410ac", + "error": { + "message": "The model `does-not-exist` does not exist or you do not have access to it.", + "type": "invalid_request_error", + "param": None, + "code": "model_not_found", + } }, - 200, + ], + "Invalid API key.": [ + {"content-type": "application/json; charset=utf-8", "x-request-id": "req_7ffd0e41c0d751be15275b1df6b2644c"}, + 401, { - "id": "chatcmpl-8TJ9dS50zgQM7XicE8PLnCyEihRug", - "object": "chat.completion", - "created": 1701995833, - "model": "gpt-3.5-turbo-0613", - "choices": [ - { - "index": 0, - "message": { - "role": "assistant", - "content": "212 degrees Fahrenheit is equal to 100 degrees Celsius.", - }, - "finish_reason": "stop", - } - ], - "usage": {"prompt_tokens": 26, "completion_tokens": 82, "total_tokens": 108}, - "system_fingerprint": None, + "error": { + "message": "Incorrect API key provided: DEADBEEF. You can find your API key at https://platform.openai.com/account/api-keys.", + "type": "invalid_request_error", + "param": None, + "code": "invalid_api_key", + } }, ], - "You are a mathematician.": [ + "You are a scientist.": [ { "content-type": "application/json", - "openai-model": "gpt-3.5-turbo-0613", "openai-organization": "new-relic-nkmd8b", - "openai-processing-ms": "6326", + "openai-processing-ms": "1676", "openai-version": "2020-10-01", - "x-ratelimit-limit-requests": "200", - "x-ratelimit-limit-tokens": "40000", - "x-ratelimit-limit-tokens_usage_based": "40000", - "x-ratelimit-remaining-requests": "198", - "x-ratelimit-remaining-tokens": "39880", - "x-ratelimit-remaining-tokens_usage_based": "39880", - "x-ratelimit-reset-requests": "11m32.334s", - "x-ratelimit-reset-tokens": "180ms", - "x-ratelimit-reset-tokens_usage_based": "180ms", - "x-request-id": "f8d0f53b6881c5c0a3698e55f8f410cd", + "x-ratelimit-limit-requests": "10000", + "x-ratelimit-limit-tokens": "60000", + "x-ratelimit-remaining-requests": "9993", + "x-ratelimit-remaining-tokens": "59880", + "x-ratelimit-reset-requests": "54.889s", + "x-ratelimit-reset-tokens": "120ms", + "x-request-id": "req_25be7e064e0c590cd65709c85385c796", }, 200, { - "id": "chatcmpl-87sb95K4EF2nuJRcTs43Tm9ntTeat", + "id": "chatcmpl-9NPYxI4Zk5ztxNwW5osYdpevgoiBQ", "object": "chat.completion", - "created": 1701995833, - "model": "gpt-3.5-turbo-0613", + "created": 1715366835, + "model": "gpt-3.5-turbo-0125", "choices": [ { "index": 0, "message": { "role": "assistant", - "content": "1 plus 2 is 3.", + "content": "212 degrees Fahrenheit is equivalent to 100 degrees Celsius. \n\nThe formula to convert Fahrenheit to Celsius is: \n\n\\[Celsius = (Fahrenheit - 32) \\times \\frac{5}{9}\\]\n\nSo, for 212 degrees Fahrenheit:\n\n\\[Celsius = (212 - 32) \\times \\frac{5}{9} = 100\\]", }, + "logprobs": None, "finish_reason": "stop", } ], - "usage": {"prompt_tokens": 26, "completion_tokens": 82, "total_tokens": 108}, + "usage": {"prompt_tokens": 26, "completion_tokens": 75, "total_tokens": 101}, "system_fingerprint": None, }, ], - "Invalid API key.": [ - {"content-type": "application/json; charset=utf-8", "x-request-id": "a51821b9fd83d8e0e04542bedc174310"}, - 401, - { - "error": { - "message": "Incorrect API key provided: DEADBEEF. You can find your API key at https://platform.openai.com/account/api-keys.", - "type": "invalid_request_error", - "param": None, - "code": "invalid_api_key", - } - }, - ], - "Model does not exist.": [ - {"content-type": "application/json; charset=utf-8", "x-request-id": "3b0f8e510ee8a67c08a227a98eadbbe6"}, - 404, - { - "error": { - "message": "The model `does-not-exist` does not exist", - "type": "invalid_request_error", - "param": None, - "code": "model_not_found", - } - }, - ], "No usage data": [ { "content-type": "application/json", - "openai-model": "gpt-3.5-turbo-0613", "openai-organization": "new-relic-nkmd8b", - "openai-processing-ms": "6326", + "openai-processing-ms": "324", "openai-version": "2020-10-01", - "x-ratelimit-limit-requests": "200", - "x-ratelimit-limit-tokens": "40000", - "x-ratelimit-limit-tokens_usage_based": "40000", - "x-ratelimit-remaining-requests": "198", - "x-ratelimit-remaining-tokens": "39880", - "x-ratelimit-remaining-tokens_usage_based": "39880", - "x-ratelimit-reset-requests": "11m32.334s", - "x-ratelimit-reset-tokens": "180ms", - "x-ratelimit-reset-tokens_usage_based": "180ms", - "x-request-id": "f8d0f53b6881c5c0a3698e55f8f410ac", + "x-ratelimit-limit-requests": "10000", + "x-ratelimit-limit-tokens": "60000", + "x-ratelimit-remaining-requests": "9986", + "x-ratelimit-remaining-tokens": "59895", + "x-ratelimit-reset-requests": "1m55.869s", + "x-ratelimit-reset-tokens": "105ms", + "x-request-id": "req_2c8bb96fe67d2ccfa8305923f04759a2", }, 200, { - "id": "chatcmpl-8TJ9dS50zgQM7XicE8PLnCyEihRug", + "id": "chatcmpl-9NPZEmq5Loals5BA3Uw2GsSLhmlNH", "object": "chat.completion", - "created": 1701995833, - "model": "gpt-3.5-turbo-0613", + "created": 1715366852, + "model": "gpt-3.5-turbo-0125", "choices": [ { "index": 0, - "message": { - "role": "assistant", - "content": "212 degrees Fahrenheit is equal to 100 degrees Celsius.", - }, + "message": {"role": "assistant", "content": "Hello! How can I assist you today?"}, + "logprobs": None, "finish_reason": "stop", } ], - "usage": None, + "usage": {"prompt_tokens": 10, "completion_tokens": 9, "total_tokens": 19}, "system_fingerprint": None, }, ], "This is an embedding test.": [ { "content-type": "application/json", - "openai-organization": "foobar-jtbczk", - "openai-processing-ms": "21", + "openai-model": "text-embedding-ada-002", + "openai-organization": "new-relic-nkmd8b", + "openai-processing-ms": "17", "openai-version": "2020-10-01", - "x-ratelimit-limit-requests": "200", - "x-ratelimit-limit-tokens": "150000", - "x-ratelimit-remaining-requests": "197", - "x-ratelimit-remaining-tokens": "149993", - "x-ratelimit-reset-requests": "19m5.228s", - "x-ratelimit-reset-tokens": "2ms", - "x-request-id": "fef7adee5adcfb03c083961bdce4f6a4", + "x-ratelimit-limit-requests": "3000", + "x-ratelimit-limit-tokens": "1000000", + "x-ratelimit-remaining-requests": "2999", + "x-ratelimit-remaining-tokens": "999994", + "x-ratelimit-reset-requests": "20ms", + "x-ratelimit-reset-tokens": "0s", + "x-request-id": "req_eb2b9f2d23a671ad0d69545044437d68", }, 200, { @@ -509,14 +336,15 @@ { "object": "embedding", "index": 0, - "embedding": "SLewvFF6iztXKj07UOCQO41IorspWOk79KHuu12FrbwjqLe8FCTnvBKqj7sz6bM8qqUEvFSfITpPrJu7uOSbPM8agzyYYqM7YJl/PBF2mryNN967uRiRO9lGcbszcuq7RZIavAnnNLwWA5s8mnb1vG+UGTyqpYS846PGO2M1X7wIxAO8HfgFvc8s8LuQXPQ5qgsKPOinEL15ndY8/MrOu1LRMTxCbQS7PEYJOyMx7rwDJj+79dVjO5P4UzmoPZq8jUgivL36UjzA/Lc8Jt6Ru4bKAL1jRiM70i5VO4neUjwneAy7mlNEPBVpoDuayo28TO2KvAmBrzzwvyy8B3/KO0ZgCry3sKa6QTmPO0a1Szz46Iw87AAcPF0O5DyJVZw8Ac+Yu1y3Pbqzesw8DUDAuq8hQbyALLy7TngmPL6lETxXxLc6TzXSvKJrYLy309c8OHa0OU3NZ7vru2K8mIXUPCxrErxLU5C5s/EVPI+wjLp7BcE74TvcO+2aFrx4A9w80j+Zu/aAojwmzU08k/hTvBpL4rvHFFQ76YftutrxL7wyxgK9BsIevLkYkTq4B028OZnlPPkcgjxhzfS79oCiuB34BbwITTq97nrzOugwRzwGS1U7CqTgvFxROLx4aWG7E/DxPA3J9jwd+AU8dVWPvGlc2jzwWae57nrzu569E72GU7e8Vn9+vFLA7TtVbZE8eOCqPG+3Sjxr5/W8s+DRPE+sm7wFKKQ8A8A5vUSBVryeIxk8hsqAPAeQjryeIxm8gU/tuxVpoDxVXM250GDlOlEDwjs0t6O8Tt6rOVrGHLvmyFy6dhI7PLPxlbv3YP88B/YTPEZgCrxqKsq8Xh+ou96wQLp5rpo8LSg+vL63/rsFjqk8E/DxPEi3MDzTcw66PjcqPNgSfLwqnaK85QuxPI7iHL2+pRE8Z+ICOxzEELvph+07jHqyu2ltnrwNQMC82BL8vAOdiDwSqo88CLM/PCKFBrzmP6a85Nc7PBaM0bvh1VY7NB2pvMkF9Tx3New87mgGPAoKZjo+nS+/Rk/GucqwMz3fwYS8yrCzPMo56jyDHV08XLe9vB4+aLwXwMY8dVUPvCFATbx2eMC8V7NzvEnrpTsIxIO7yVmNu2lc2ryGQnM8A6/1PH/VFbySO6g80i5VPOY/prv6cyi7W5QMPJVP+jsyLIi84H6wPKM50DrZNIS8UEaWPPrIaTzvrmg8rcoaPRuQm7ysH9y8OxIUO7ss4zq3Od08paG6vAPAuTjYAI88/qmCuuROhbzBMK08R4M7u67+j7uClKa6/KedOsqNArzysM08QJ8UvMD8t7v5P7M799fIvAWx2jxiEi48ja6nPL0LFzxFkpq7LAWNPA1AQLyWlLO6qrfxvOGypTxJUau8aJ8uPceLnTtS0TG9omtgPO7xPDvzbfm7FfJWu2CqwzwAASk96FN4PLPgUbwRdhq8Vn9+PLk7wjs8NUW84yx9vHJCZjzysM079hodO/NbDL2BxrY6CE26OzpEpDv7DaM8y0quO41IIr1+Kte8QdMJvKlxDzy9+lI8hfyQPA3J9jzWmKS7z6O5u4a5vLtXKj088XzYO1fEtzwY4/e7Js1NugbCnjymxOu7906SvPSPAb1ieDO8dnjAu/EW0zp/b5C8mGIjvWTPWTwIxIM8YgFqPKvrZrwKpOA7/jK5O2vViDyfaXs8DR2Pu0AFGrvTc446IIOhvDreHrxRnTw8ROdbu55Gyrsht5Y8tVmAvHK5rzzZvTo8bx1QPMglmLvigBU8oIuDvAFYz7pblIw8OZnlOsTvPbxhzfS8BxnFOpkwE72E60w7cNp7utp6ZrtvHdC4uwmyO5dRX7sAm6M7kqEtvElRK7yWg++7JHanvM6ACDvrZqG8Xh+oupQsyTwkZWO8VzuBu5xVKbzEZoc7wB9pvA796zyZlpi8YbsHvQs+W7u9cZy8gKMFOxYDGzyu7Uu71KeDPJxVqbxwyI68VpDCu9VT67xKqFG7KWmtuvNteTocs0w7aJ8uPMUSbzz6cyg8MiwIPEtlfTo+wOA75tkgu7VZgDw8WPa8mGIjPKq38bsr0Zc7Ot4evNNiyju9C5c7YCENPP6pAj3uV8I7X3bOusfxIjvpZLy655bMvL9ivbxO3iu8NKbfPNe7VTz9ZMk88RZTu5QsybxeQtk7qpTAOzGSjTxSwO27mGIjPO7OC7x7FoW8wJayvI2uJzttxqk84H4wOUtlfbxblAw8uTtCPIO3Vzxkz9k8ENwfvfQYuLvHFNQ8LvatPF65ojzPLHA8+RyCvK3Kmjx27wk8Dcn2PARatDv3tBc8hkLzPEOz5jyQSoe8gU/tPMRmhzzp2wU90shPPBv2oLsNQMA8jTdevIftMTt/Xsw7MMQdPICjBT012tS7SLewvJBtuDuevZM8LyojPa6HxjtOAd07v9mGusZXqDoPqKo8qdeUvETnW7y5occ5pOSOvPPkwjsDN4O8Mk85vKnXlDtp06O7kZDpO6GuNDtRFAY9lAkYPGHNdDx2Afc7RRtROy5/5LyUoxI9mu0+u/dOEryrYrC867vivJp29TtVbZG8SVGrO0im7LnhsqU80frfPL/IwryBT+07/+/kPLZ8sTwoNbg7ZkiIOxadlbxlnUm68RbTuxkX7Tu/cwG7aqGTPO8CAbzTYsq6AIpfvA50tbzllOc7s3rMO0SBVjzXzJm8eZ3Wu4vgtzwPDrA8W6b5uwJpEzwLtaQ81pgkPJuqarxmro288369u48WkjwREBU9JP/dPJ69kzvw4t27h3bouxhrBbwrNx29F9EKPFmSJ7v8px08Tt6rvEJthLxon648UYz4u61TUTz4lPQ7ERAVuhwqFrzfSjs8RRtRO6lxD7zHelm87lfCu10O5LrXMh886YftvL9iPTxCf/E6MZKNOmAhDb2diZ47eRSgPBfRCrznlsw5MiwIvHW7FD3tI807uG3SPE7eqzx1VY864TtcO3zTMDw7EhS8c+0kPLr47TvUDQm8domEvEi3MLruaAa7tUi8u4FgsTwbkBu6pQfAvEJthLwDnQg8S1OQO55GSrxZLCK8nkZKvFXTFr01dM+8W6Z5vO+u6Luh0eW8rofGvFsdw7x7KHK8sN5svCFAzbo/0SS8f9UVu7Qli7wr0Re95E4FvSg1ODok/907AAGpPHQhGrwtS++71pgkvCtazjsSzcC7exYFPLVZgLzZmom7W6Z5PHr0fLtn9O86oUivukvcRrzjPcE8a8REPAei+zoBNZ685aUrPNBg5bqeIxk8FJuwPPdOkrtUOZy8GRftO4KD4rz/72Q7ERCVu8WJODy5O8I5L7NZuxJECjxFkpq8Uq4AOy2fh7wY9Du8GRdtu48o/7mHdug803MOvCUQIrw2hZM8v+tzvE54pruyI6a6exYFvDXrGDwNQEA8zyxwO7c53TwUJGe8Wk9Tu6ouu7yqCwo8vi7IvNe71TxB04m8domEvKTkDrzsidK8+nOovLfT1zr11eM7SVErO3EOcbzqMqw74Tvcut4WRrz5pbi8oznQvMi/Er0aS+I87lfCvK+qdztd6zI83eJQPFy3vbyACQu9/8wzO/k/s7weG7e8906SPA3J9jw8NUU8TUQxPfEWU7wjH4E8J3gMPC72LTp6SJU8exaFOXBiibyf4MS6EXYaO3DIjjy61by7ACRaO5NvnTvMGB48Dw6wPFEUBr30j4E7niMZvIZC87s7EpS8OZnlPJZxgrxug9U7/DDUvNrxL7yV14e3E2c7PBdaQTwT8HE8oIuDPGIB6rvMB9o6cR+1OwbCHrylfgm8z6M5vIiqXbxFG1G8a9WIPItp7rpGT8Y838GEvAoK5jyAG3g7xRJvPPxBGLzJWQ28XYWtO85vRLp0IZq8cR81vc7mDb28PSe89LKyuig1uDyxEuK8GlwmPIbKgLwHGcW7/qkCvC8ZXzzSyE89F8BGOxPw8Tx+Ktc8BkvVurXiNryRkOk8jyj/OcKH0zp69Pw8apDPPFuUjLwPDrC8xuBeuD43KrxuYKQ7qXGPvF0OZDx1VQ88VVzNvD9rn7ushWE7EZlLvSL9+DrHi528dzXsu3k30bzeFka7hrm8vD3gAz1/Xsy80D20PNPZE7sorAG86WS8u2Y3xDtvHVC7PKwOO5DkAT3KOeo8c+0kvI+fyLuY61k8SKbsO4TrzLrrZqE87O9XvMkF9Tynb6q847SKvBjjdzyhSK88zTtPPNNzjjsvGV87UQPCvMD8t7stn4e7GRftPBQkZ7x4eiW7sqzcu3ufO7yAG3g8OHa0u0T4n7wcxJC7r6r3vAbCnrth3rg7BxnFumqQzzyXyCi8V8Q3vEPEqjyIu6E8Ac+YvGR6GLulkHY8um83PMqNgrv5pTi8N7kIPOhTeLy6TIY8B5COvDLGArvEzAy9IbcWvIUfQjxQ4BC7B/aTvCfwfrz15ie8ucR4PD1pursLtSS8AgMOOzIsiLv0srI7Q01hPCvRF7vySsg6O5tKunh6JTvCZCI7xuDevLc53btvLhQ8/pi+PJU9Dbugi4O8Qn/xvLpMhrth3ji8n/GIPKouu7tBS3y853MbPGAQyTt27wk7iokRO8d62bzZRnG7sN5svAG+1Lqvqve8JGXjur0Ll7tCf/E75/xRPIWFx7wgDNi8ucT4OZNvHb2nktu8qrfxuyR2J7zWh2A6juKcPDhlcLx/1RU9IAxYPGJ4szylB8C8qfrFO276HjuWcQK9QdOJvCUQIjzjo8a8SeslvBrCKztCf/E66MrBOx1eCz2Xt+Q66YdtvKg9mrrLSq47fFznO1uUjDsoNTg8QyqwuzH4Ejz/Zi67A8A5uKg9GrtFkhq862ahOzSmXzkMDEs8q+vmvNVkLzwc1n28mu0+vCbekTyCg+K7ekgVvO8CAT2yRtc8apBPu1b2R7zUp4M8VW2RvPc9zrx69Hw753ObvCcSB71sG+u8OwHQuv67b7zLSi65HrWxO0ZPRrxmwPq7t7CmPGxvAzygnfC8oIsDvKY7tbwZF+07p2+qvOnbhbv0oW47/2auuThlcDwIxIM8n/EIO6ijH7vHetk7uRiRPGUDT7pgh5I85shcPpGQabykShS7FWmgPPjojDvJ8wc8mlPEOY2uJzt7FoW7HNb9O7rVvDzKjQI80NcuuqvINbvNTBO8TgFdvEJ/cbzEZoe8SVGrvMvkqLyHdui7P2ufvBSbMDw0t6O82GaUPOLmGrxSNze8KVjpuwizPzwqjN48Xh8ovE4B3TtiAeo8azsOO8eLnbyO4py7x/GiPIvgNzzvi7c8BFq0O/dOEj1fU5282ZoJPCL9+LqyIyY8IoUGPNI/mbwKpGC7EkQKuzrN2jwVzyU7QpA1vLIjpjwi64s8HYE8u6eSW7yryLU8yK5OOzysjjwi6wu8GsIrOu7xPDwCaRO8dzVsPP/vZLwT3oQ8cQ7xvOJv0TtWBww8hlM3PBPeBDxT9OK71pgkPPSysrugiwO90GDlvHOHHz3xfNg8904SPVpglzzmP6a7Cgrmu9/BBLyH7bG85QsxvVSfIb2Xt2Q8paG6vOqYsTos9Mi8nqxPu8wHWjuYhdS7GAWAvCIOvTp/bxA8j7CMPG1P4Dxd67I7xxRUvOM9wbxMhwU9Kp0iPfF82LvQYOU6XkJZPBxNx7y0nX28B5COO8FT3rp4eiW8R/oEvSfw/jtC9rq8n/GIux3nQTw8WPY8LBf6uzSmXzzSPxm88rDNvDysDjwyPnW7tdFyPBLNwDo8WHa8bPi5vOO0CrylGAQ8YgFqvEFLfDy7LOO7TIeFPAHPmDv3YP+6/+9kPBKqjzt5rpo8VJ+hvE7eKzyc3t88P2sfvLQUR7wJ1vC6exaFvD6dr7zNO888i+A3ulwuhzuF/JC8gKMFveoyLLxqBxk7YgFquws+2zwOUYS8agcZvGJ4M71AjtC747QKvAizP73UH3a7LvatPJBtuLzEzIy8bG8DvJEHM75E59s7zbIYPObZIL2uZJW7WRveugblTzy6TIa802JKvD9rH7xlA088QAWavIFP7bwL2FW8vqWRu0ZgijyRkGm7ZGnUvIeHLD1c2m48THbBPPkcAr1NzWc8+JT0uulkvLvXMp+7lU96u7kYET1xhTo8e3wKvItGPTxb+hG87mgGPWqhk7uhrrQ73rBAPCbNTT13rDW8K8DTus8s8DsNt4k8gpQmPLES4ryyvSA8lcbDO60woDyLVwE9BFq0u+cNFj3C7Vi8UXoLPDYOyryQ0z083+S1Ox34hTzEzIw7pX4Ju6ouuzxIpmw8w5iXuylYaTy5sgu9Js3NOo+fyLyjFp+8MMSdvOROBb2n+OA7b7fKOeIJzDoNpkW8WsYct7SdfTxXxLc7TO2KO3YB9zynktu7OkSkPKnXFLvtRv47AJujuzGSDT0twjg8AgOOO4d26DvpZDy8lAkYPI5r0zcGS9W8OGXwu9xIVjyH7TG9IUDNuiqMXrwb9qA79I+BPL1xHLuVPY07MOfOO0ztCruvMoW8BuXPu4AbeLyIRNg8uG3SPO5XQjuFH0K8zm9EPEAoSz0tKL652ZqJOgABqbwsjsM8mlPEPLewpjsVWNw8OGXwOlYHjLzfwQQ81iFbOyJ0Qj3d85S7cQ7xvIqswjxKhSC7906SvAFYz72xiau8LAWNPB1eCz09jGu72ZoJPfDiXTwPDrA8CYGvvNH6XzxTa6y8+RwCvY8of7xxDnG8Ef/QvJ9p+zqh0eU8a16/OzBN1LyDLiE9PFh2u+0jTbxLUxA9ZZ3JvItXgbqL4Dc8BuXPvKnXFDzmPyY8k/hTOlum+bqAksG8OZnluPmluLxRnTy6/KcdvKAUOrzRcSm8fqEgPcTeebzeOXc8KCR0OnN2W7xRA0K8Wsacu+M9wToyLIi8mTATu21P4LuadvW8Dtq6vPmlODsjqLe88ieXPJEHszySoa08U/RiPNQNCbwb9qC8bG+DOXW7FL0OdLW7Tc3nvG8dULsAJNo7fNMwO7sJMr2O4hy85ZTnuwAkWjw+Nyq8rcoaO+8lsrvx86E8U/TivGUUkzp6SJW8lT0NvWz4uTzeFka6qguKvIKD4rt/1ZU8LBf6vD6dr7es/Ko7qWBLvIlVHDxwUUU6Jt4RvRJEijnRcSk88235PGvVCL3zbfm8DaZFO+7xvLs3qES8oznQO9XKNDxZLKK8IIMhvComWb0CAw48fDk2O+nbBb29C5e8ogVbu1EUBryYhdS7OTPgOul1AD25sgs7i1cBPBYmzLtSroA8hfyQvP3bErz9h/o82ZoJO7/ZhjxtT+A8UZ28uzaFk7wJ1nA6dd7FPGg5Kbwb9iC8psRrvBXyVjzGRuS8uAfNu0+smzvFAAK96FN4vC2fhzy65oC7tgXou/9mLjxMELw8GSgxPRBlVjxDxCq80j8ZveinkDxHgzu70j8ZvPGNnDyPn0i8Vn9+urXR8ju10fI7sRJiPDBemLt8OTa8tJ39O4ne0rsaXKa7t0ohPHQhGrdYXjI824sqvDw1RT2/2YY8E/BxPIUOfjv9dQ08PM8/PMwYHrwwXpi7nqxPPM8aA7w+wOC7ROdbO79iPTxVbRE8U45dPOOjRjxwYok8ME1Uu1SfIbyifKQ8UXqLPI85wzsITTq8R+lAPMRVQzzcv58892B/Oqg9mjw3MXu7P9EkvM6AiLyx7zA8eHolPLYWLLugFLq8AJsjvEOzZjk6RKQ8uRgRPXVVjzw0HSk9PWk6PLss47spzzK93rBAvJpTxDun+OC7OTPgvEa1yzvAH+k5fZDcOid4jLuN0di8N7kIPPe0F7wVaSC8zxoDvJVgvrvUpwO9dd7FPKUHQLxn4oI7Ng7KPIydYzzZRvE8LTkCu3bvCTy10fK7QAWaPGHeOLu6+O27omvgO8Rmh7xrXj87AzeDvORg8jnGRuS8UEYWPLPg0TvYZpQ9FJuwPLC7O7xug1U8bvoevAnW8DvxFtM8kEoHPDxYdrzcWZq8n3q/O94nCjvZI0C82yUlvayWpbyHh6y7ME1UO9b+KTzbFGG89oCiPFpgFzzhTKA84gnMPKgsVjyia+C7XNpuPHxc5zyDLqG8ukyGvKqUQLwG5U88wB/pO+B+ML2O4py8MOdOPHt8irsDnYg6rv6PumJ4szzuV0I80qWePKTkDj14A9y8fqEgu9DXLjykbUU7yEhJvLYFaLyfVw68", + "embedding": "/PewvOJoiTsN5zg7gDeTOxfbo7tzJus7JK3uu3QArbyKlLe8FL7mvOAljruL17I87jgFvDDiBTqEmZq7PQicPJDQgDz0M6I7x91/PMwqmbxwStq7vX6MO7JJdbsNk+27GWEavNIlNrycs5s8HYL1vPa5GDzuOIW8gOPHOy5eXrxUzAK8BlMGvb8Z8bvoqPA5+YIKPEV2EL2sTtg8MSfQu6/BLzyhAgS7cEgLO+MD7ryJUTy7ikBsOz6hsTeuKJq86KYhvIrqUTyhrrg8hdyVu4RDAL1jzik7zNZNO0JZUzzFqQW7dplCPHwrpjtA0Y287fWJvK/BrzzCNi68/9PBO7jZCbwfBp26vDuRO7ukSjxX6448nLMbPLv65Dz7Xps8A4qUu4d1K7q1vMw87V7DutssQLwSjLu7Rg8mPPg/DzyKlLc6AbDSvLw9YLx3Mtg8ugu1OUmXa7szA2G8ZgDVPGkdEryNB4+5DxcVPAv4iLod1sA7UkjbO+osGLyrC908x4WWu2v5ojzBSU08QllTvHpR5Lu/w1Y7oNT2ulhBKbwfsAK90YwgvISZmjp/oEy8FL7mPAGugzxeK/a7rigauXGLBrwG/zq9xuwAO5EVSzzoUlY7P5DhvL+wN7xiN2O7rKTyPEy29zyVdQM8e5KQvDsu2jwYHp+5NJz2uw8XFb2olra8ul+AvJDl7jvOsI88b1uqPKkvTDzJY/a8wAbSPDF7m7yOoKQ87+Q5vaxOWLwILxc8ZkGBPExejry6tRq8SZfru9wZoTzUqyy64mrYOnyBwDusoqO8X2yiOfteG7sxfWq6Zqo6PFEDkbsZCwA9weATPKZkC7wbUMq87Uuku8PPQ7o3Y5k8rPg9vCWYALwusqk8gxXzPIwaLjycsxu62pMqPGl1e7xN96O802ixPOnpHL2lIRA88/L1Oq9rFbv1eOw7GvqvuwyRnry4hb68gY/8vFOJhzx7kpA8xBI/PPTdB7zz8Ka8g6w5PMuT0rs2zFI77AipvLJJ9TyKQOw8jkoKPKykcjoPbS+/fNfauWFIMz1O5IS8N7mzPAfu6jwWRF08UzW8vCzYZ7xdPMY8BM0PvFBsSrzbLMC8GN1yvNwZoTvFqYW7sfGLuzsu2rwv93M8iLr1PPf8E7znY6Y8U4tWPBHzpbuNXSm7z/MKPMbsADwp+oe8/PewPCzY5zoYyIS8Mr6WPHMmazzGRGo85EQaPZyzm7wLt9y8jkoKO+732DqZlt48Bv+6vM9cxDj4P488jMSTugGug7xLca08m8Y6uyd0kbtt1bO6s4qhOgw7BLwsgs08SxsTvHJ6trvxarA796jIvNuC2jwD4C48n9KnPMwqGTxHUqG7n3yNPHaZQrw+obG6QWzyvKa6pTyHdau8eqUvPVrHnzsIhTG9UQVgPIYyMDtRW/q7YFtSu3aZwjzOBio9pXl5PMAGUrwl7hq8ul+APLdCwzuqcke81K17vNMUZjyqcsc7ZpcbO/OaDL2V3rw6s+C7O4NWnzvKpKI8tqmtO/QzIr0eb9a8mtcKvAlyEjxac1Q8yMiRPHZF9zymuqW7cTe7uxcxvrsqpjw8v8PWO/XMtzzhffe7uIU+uuMBnzxD8ui7gDeTvKjqAb1P0zS8vSrBu/hB3jondJG8C04jvVkwWTxgWYM8WwxqPGjcZbz3/uI7m8a6O4K9iTw/5ns87LKOu19sIrvzmow63BmhvOimIbys+D08jMZiuycgxrvTEpc8G/x+vPz3sDyz4Ds8ebhOPBS8l7vNbRQ8fluCvByTxbqIYow8HSzbOmtPPbwFaPS81yDTOglyEr3k8E47B+5quopAbLs6Qfm4VbuyOyGOYrsv9aQ7x9swvLfsKLx9cPC7aranvIhiDDs8xaC8riiaug/DyTwPGWS8mKl9u6tfKLznDYw7cyZrvN5e6zzAnZi8XxYIvfJZYLvGQpu8W7QAO2HyGDylzUS7Kj2DPPiVqbyZlI+8HdbAu5y16ryPj1S7OKaUugUQizpwSlo7YosuPKf/bzxGDyY87fUJPD2ygTp279w7+dgkuxkLgDzc2PS8ghMkPB2C9btml5s7U98hvETfyTttf5k74CUOPOmTAj1pycY7PHHVuqFYHjutO7m6wUnNvAykvbxpcyy8EFzfPOhSVjx6+8k80PVZu2LhyLzbgto7ABe9O598jTzjA+67a/kiPAUQC7ytj4S8tWayvJtwIDsWmKg8RXYQOYY0f7xYLgo8ybdBPLM2Vjy429g8MDggvYqUt7uPj9Q8Vv6tPOK+ozxBbHI8cs6BvE99mjyIYgw84X33PMs9uDt/9Bc8WYbzPMGf5zzKToi8eMvtPFOJhzww4gU9YZ5NPAyRnrsFvL88/uZgvDZ2ODsyass7reUePAZTBj2429i7PqGxvFR4tzuqxpI8QWojPZ7lxjseb9Y7mKl9ur3UpjpFzKo88RSWvMkNXLyvKuk54CWOvBAGxTvjq4S8E882vN/ikjuViKK72LnoO34HNzubGgY9lw4ZPIi6dTzUrfs7eHVTO/2j5byANxM9YZ5Nuy0ZFLz897C8OajjvL8Z8Tsb55C82VCvO6Tg47lGD6Y8UQXgPEwKw7wSOPA7elHkPD6hsTy/sLc7TaGJO1DAlbztXkO6lTTXuzF96jvCOP26Ff+SPPFs/7thns268llgvD1etrxWZ+c75PDOO5U0VzyXDpm8BRLau2AFuDxco7A8jV/4u7ZTEzyOoKQ8xLwkPK8qabyrCY68Bby/u4cfkTyj3hQ9OuvePBX/kjtdkuC7pODju62PBLwfBh29/icNPEYPJruhWB48h3WrvJV1g7xEia88dkX3u4rqUTwiJ/g7tRAYunNnF7ynUzs8JFdUO3zVC7wFElq8QH3Cu9GO77rXdB48TjzuvFM1PDy8PeA6QNGNOr1+DL0CR5k7TjqfPDosC7zc2PQ4EuAGvDimFD0sgs07H7LRPP46rDxvW6o6t5hdO/z3MDwV/xK8+dgkPN5e6zu42Qm8uRyFvJnqKbp81Qu7ld68u1yjsDzenxe6sp3AvIMAhbxrowg8aR2SOw/DSbxHUiG8xzFLvJ32Fr3wfc+8e+p5vAJJ6Lt/9ua8M63GvBbuwrx9cHC8NyJtvNaHvbqIuCa8ek8Vu1JGjLxh8hi97jgFvTNXLDr/Kdw77AipPEPwGbxr+/G7EfMlvB+y0TuIDsG7oQIEPJDQgLyHH5G7qh58PO97gLuIuvU6vheiuoxwSLx2mcI8pc1EPAGuAzvujp+868UtPIY0/7oZYRo88WqwPFfrjrvGQpu8YLHsO85v47wQXF87pSGQu+k/NzyziqE5QNNcuwBrCDzYt5m87k3zOnYwibybxjq8y+lsu8WpBboCSeg8wyMPvNGMILzrb5M8L/dzvMgerLuqHK26GMgEvCBJGDyUm0E8skl1O1jt3TwUvma8bOhSuzwbu7ya1wo8wozIvL/D1jz5goq8TuSEvMMjD7wYytO8aranvE2j2DohjuI7VHg3O6f/b7wUErI7f6DMuldUSLw8G7u8B5jQvKrGEr1LHeI8lJvBvHvqeTthSDM8rtROPGtPvbzbgAu9CIUxO6Pxs7xUeLe8qsaSPAsN9zxYl0M8c70xPWBbUryK6II8k+8MPDe5MzrNbZQ8UQMROSMSirzGmLW6f/QXO3XtjTz/08G7HSxbO2v5ojtbChs82VCvPNzDBr3cw4Y7c2cXvC/387tLG5O80xTmPGZBgbxNo9g77zrUvAPgLrxBFAm5Wh06PLKdQDzc2HQ8iuiCPFsM6rtWEc06Nna4O+6OH7w8bwa8s+A7vDRGXLxac1S8a6OIPC/387ozrcY8TuSEvP2j5Tw6QXk76KhwPGfaFrxqYA28pXeqOy9LP7qXDpm8he80vasJDr0FZiW852OmuqGuuDyMxuK8sUcmPIRDgLwDNsm7hjT/u4chYDyQ0k895TNKO0Fs8jxBFtg8EZ/auvXMN7zGROo8BqkgOis/0joD4v08ebjOPHzVi7xEia+8EFzfuKV3KrymuqU7zrCPvKTgYzyNBw889mXNvL4XorumZlo7xzFLvd5e6zr1dp28eMvtu1TO0bylzUS7yvq8vGzmAz0yasu8Q0a0PGI1FLvsx/y7DKS9u2nJxjsjFFm7bwUQO3LOAT0H7uo8KA0nvPIDxrtlvVk8JK3uO7b/x7qP4588pmZavEcR9TwcPau8cEiLvHZFdzw4/K48IPVMPGpgjTshjmI7lJvBvKdTu7v03Ye7YLHsPFDCZLw2IB67jMbiu/SJPLzmIno8nUyxu3HhoLylIZC7kwT7vMCdmLsfXLc7u6TKuvB9zzxqtie8Ses2vMgerDxT36E8AkeZvNd0Hrtk0Hg8+7Q1PJDQgLuDrDm8WC4KPGl1e7xl/oU8Is+OvK8q6brnDQy9A4oUvDTwQTy7+BW7SxuTvO97gLwWmCi8t+53PJYhuLun/SC8DxcVO6Zki7vB87I7jMZiPDYgHrviFL46TaNYujzFIDsX2yM7wyXevMMl3rtWqBM8Wdq+PIF6Drt+W4K8VOHwvI+NhbsYdDm8F4WJPHgfubuqHny8WwobPF08xjuGNP86Ff8SO1kw2byiWm27PMdvvJe6zbr5l/i8y+nsukV2kLtU4fA7AbBSPD86x7x3Mti8sfELOpbLHb1819q8fXDwu73UJrw2zFI6KpOdPADDcbx6TxU9Oy5aPMHzsjz5Lr+8M63GOzSaJzuzdwK9AGuIvJtwIDz+kMa8WYQkvHeGIzss2Oc6FavHO6ZkCz0Y3fI69XhsvL1+jLqej6w7bYHoOy+fijvv5Dk8Y86pu/3kETwtby671yDTNxN5HLuoQBy8DJGeO5dksznq2Ew8wZ/nvOA4LTxXAH2880ZBvMjIkTy7+uS7CC8XvJDQAD0Abdc8MmpLu7znRbxs5oM8OemPvEOczryNX3g7MXubvOhQB70fCOy8DT3TujzHb7xWEU250iW2O4DjR7zPCPm7XimnPE7kBDwqUvG846sEvGfttbxU4fA7e+iqvO97gLtWZ2c7VqiTuZWKcTxCV4Q8xakFO2v5Irv4Qd47njmSPIQCVLo/jpI8TGBdPsZEarwH7Bu7BqmgPP4njTuUMgg8s3eCOEXMKjsMO4S7GQsAPBKMuzzdBgI87UskulBsSrtWqBO8du9cvNYzcrwS4Ia8XearvLyRq7xOPO67GB6fvJG/MDwX26O8A4qUPDF7G7xP0zS8lxDouy9LPzxGeN88iLgmvP8p3DvGROo8k+8MOx8GnbzF/5+74r6jPGySODzLPbg8Z+21Oz+OEj1sPJ68vX4MPH5bArvheyg8U4kHPIuBmLyNCV67IxIKuzsu2jxGDyY7he80vFKcpjzbgIs8BHlEu0xgXbz7tLU8Gg1PO/g/jzyT7wy8xpg1OuIUPjxEMxW89XhsPP2jZbzQNoY8GN3yvHm4zjsRnQs8K+k3PAw7BDwUvua7cJ4lPAFauLs2ygO9/aPlvINWHz3W3dc83+ISPcCdmDzb1qW7ryrpuwGuA7w+obG8kb8wvYn7Ib0PGWQ8cTe7vCfKqzotxci8cY1Vu0sdYju0edG7hEOAvJmW3jpdkBE8sfGLPJOu4DxP07Q7hAJUvAphwrynpwY9X2wiPZmW3rtc+co6iGRbPKpyx7xXAH28n3yNOwJJ6LqCEyS8TuQEvbDD/jt9xLu8pmSLu3aZQjwLDfc8wjj9u3uUXzz8oRa8SoTMvF7TDDwdgnW7WYZzPO6hvjrEvnO8TpC5vDosC7xPJwA88NNpvGl1ezwi0d27cYuGPG1/mTvw0+m6UMJkPGI1lDsxe5s89DOivMgeLDyTruA8PMUgvEvHR7xRW/q6+sWFvKQ0r7y6Yc88jBouujWHiDvyV5G8mxoGvSfKK7zenxc7bYFouxGf2jyK6IK8hJkavKPxM70xJ9C7F4UJvAW8P72ySXW7jBquPPpxurznDYy8QlcEvMHzMr4S4tU7opsZPNGMIL2DVp+7H7LRug6ATjxTiYe8FGhMvI/jH7wxJ1A8riiavA2T7byJp1a8sfGLu6ZkizyQ5W67cY3VvNSrLD1OPG48nyjCPD2yAb3YuWg8nLXquhh0ubsO1Jm7aXV7u12QET0kATo8cEgLvDUzPTwV/xK8cYsGPVxNlrssLLM70J8/POTwTj3dsja8fl3Ruh8I7Dua14o8EjYhPIzG4rxHUiE8+OvDOzA4oDwxJQE9T9O0u/yhFj1lvVm84mgJPNO+y7wjvj48hjKwO9zDhjymZIs7mZQPu1odujwkrW48c2eXu4WbaTxGuQu95w/bOhtQyrz6G6C8oVievMWpBb0+TeY7H7ACOkIDuTrIdEa83VwcuEWLfjxBwL07iGKMOwsN9zxket67+dikPPf8E7tmQQE8ifuhuzREDT1mqjo8fRiHO5cQ6DsG/zq8r2sVPPkuPzinqdW8QWzyu9bdVzwUEjK9dzLYuiLRXbyJ+6E79SCDPOREGrvOsI87aIbLO9ceBLu5HIW82GPOu0cRdbx3Mtg81yDTPPSJPDtGZUC8/pBGPMcxSz1BwD257fWJOi6yqbyBJsM87V7DPC6yqTte1ds8uXTuOrHxi7yzdwI8HSxbO3aZQj1LG5O7a/vxvOHRwjyKPh27tlOTvE8pz70nyqu8t5aOPKZkCz0aY2m7uNkJPff+YjzxarA891KuvNo/Xzy2qa28s3cCvWZBgbxmVm+8xu7PvLkcBTvTFOY8Xn/BO5Z30rwSNiE9pXl5u/ZlTbxFdhA9RN/JvIzGYrpgBTg85PDOvN/iEjymuiU830tMOhjd8rq9KsG8rPg9udfKuLz1dh26KpMdvB4ZPLx1Qyi80YwgPb2Terxk0Hg8rZFTOjRGXLyfKEK8ZVSgux3WwDpBFIm8xakFuz+Q4bsFaPS8s+C7vLPgOzvpP7e8JjGWPIvXsjwh4q08zm9jPO31Cbwew6G8a089Oa9rFb1Vu7K76y7nvABtV7seb9Y7mKcuO9/1Mb1OOh+80xTmu/OcWzzakyq8LNYYOz6hsbvKpKI89/7ivOhQhzqRaZa8k+8MvQb/ujwAwSK6WC6KvLeY3bvZ+pQ85iL6vC4IRLnNw647dBNMvFQiHTz+5mA61FUSvcVVujgiJSk8OkH5PBeFCb2lefm8YyREO7Pgu7tYl0O8qOzQO5B8NTy+F6K8KVAivGW9Wb0izw48c70xO9A2Br2pg5e8cEpau18WCLxac9S77bTdOmZBAT3xFBY7kNAAPFYRzbu6X4A8wyOPvLZTE7y9k/o8RrkLOwZThjy8PeA8oa64uwOKlLy9k3o6vOfFPBw9K7w8xSC8YLFsvDDkVDwmM+W8IPXMux8GnTvdBgK9+Zd4vPTdhzyww367JjPlu51MMTxIqLs8PqExPbkeVDz4lSm8opsZvbCukDxZ2j67VWUYvH6xnDw/Oke8U4tWun1w8DvEvvM7JjNlPH/0l7uWITi8kNAAPKjs0LteKae7ifshPBGdCzlVuzI8LrIpvLFaRT1xi4Y86KhwPFylfzvt9Qk82yxAPAEEHrxD8Jm70ntQPD2yAbwQXN+71ZpcOzUzPTzyVxE8svNaPOsbSDz5goo8cY1Vu7OKIbyOoKQ8ZLuKPJqDvzsG/zq8phBAPF08RjwYHp88njmSOoSZmjyzd4K7L/UkvIK9ibzkmjQ8jqAkPGQRJbtaHbq8uC8kvAb/ujlwnqU8vDsRPVEDkTyNXSk9s+A7PP2j5bv2DzO9vSpBvEUixTu2VeK7KHbgvJEVyzuww345bOjSOpPvjLusTti8RrkLPPa5GLzjAR+8iugCvHyBwLv1IAO9h8vFPL0qQbxO5IQ7OVLJPPJZYDy/GfE8NEQNu2S7CjyySfW7YfKYPKQ0L7tgsey7elHkO+hQh7zYDTQ7YFmDvPrFBTq7+uS8o94UPKHB1zvNbZQ9qdmxPF/CPLwkV1Q8YK8dvDzH7zuiBNM8rEwJPOYierzGQpu8a0+9O8pOCDsvSz+8O4Ilvaa6pbz3Uq67umFPO6V3KjzUV2G8Nd2iPK9rFTybcKA8xzHLPGYAVTyXEOi7a/txPBS+5jy+F6K83MOGvL0qQbytkVM8LNjnO1yjML3dXJy88H1PPLHxi7vQNoY6bwWQuvYPszzzRkE8BqmgPFfrDj29gNu8Wscfu2KLLjxmqjo7hohKvK8qabzVmA28", } ], - "model": "text-embedding-ada-002-v2", + "model": "text-embedding-ada-002", "usage": {"prompt_tokens": 6, "total_tokens": 6}, }, ], } + STREAMED_RESPONSES_V1 = { "Invalid API key.": [ {"content-type": "application/json; charset=utf-8", "x-request-id": "req_a78a2cb09e3c7f224e78bfbf0841e38a"}, diff --git a/tests/mlmodel_openai/conftest.py b/tests/mlmodel_openai/conftest.py index 0a9f531e1..d0eb4cf8a 100644 --- a/tests/mlmodel_openai/conftest.py +++ b/tests/mlmodel_openai/conftest.py @@ -198,7 +198,7 @@ def _wrap_httpx_client_send(wrapped, instance, args, kwargs): # Send request response = wrapped(*args, **kwargs) - if response.status_code >= 400 or response.status_code < 200: + if response.status_code >= 500 or response.status_code < 200: prompt = "error" rheaders = getattr(response, "headers") diff --git a/tests/mlmodel_openai/test_chat_completion_error_v1.py b/tests/mlmodel_openai/test_chat_completion_error_v1.py index dc1cdcfb7..18c2bb7da 100644 --- a/tests/mlmodel_openai/test_chat_completion_error_v1.py +++ b/tests/mlmodel_openai/test_chat_completion_error_v1.py @@ -282,7 +282,7 @@ def test_chat_completion_invalid_request_error_no_model_async_no_content(loop, s ) @validate_span_events( exact_agents={ - "error.message": "The model `does-not-exist` does not exist", + "error.message": "The model `does-not-exist` does not exist or you do not have access to it.", } ) @validate_transaction_metrics( @@ -322,7 +322,7 @@ def test_chat_completion_invalid_request_error_invalid_model(set_trace_info, syn ) @validate_span_events( exact_agents={ - "error.message": "The model `does-not-exist` does not exist", + "error.message": "The model `does-not-exist` does not exist or you do not have access to it.", } ) @validate_transaction_metrics( @@ -361,7 +361,7 @@ def test_chat_completion_invalid_request_error_invalid_model_with_token_count(se ) @validate_span_events( exact_agents={ - "error.message": "The model `does-not-exist` does not exist", + "error.message": "The model `does-not-exist` does not exist or you do not have access to it.", } ) @validate_transaction_metrics( @@ -403,7 +403,7 @@ def test_chat_completion_invalid_request_error_invalid_model_async(loop, set_tra ) @validate_span_events( exact_agents={ - "error.message": "The model `does-not-exist` does not exist", + "error.message": "The model `does-not-exist` does not exist or you do not have access to it.", } ) @validate_transaction_metrics( @@ -543,3 +543,397 @@ def test_chat_completion_wrong_api_key_error_async(loop, monkeypatch, set_trace_ max_tokens=100, ) ) + + +@dt_enabled +@reset_core_stats_engine() +@validate_error_trace_attributes( + callable_name(TypeError), + exact_attrs={ + "agent": {}, + "intrinsic": {}, + "user": {}, + }, +) +@validate_span_events( + exact_agents={ + "error.message": "Missing required arguments; Expected either ('messages' and 'model') or ('messages', 'model' and 'stream') arguments to be given", + } +) +@validate_transaction_metrics( + "test_chat_completion_error_v1:test_chat_completion_invalid_request_error_no_model_with_raw_response", + scoped_metrics=[("Llm/completion/OpenAI/create", 1)], + rollup_metrics=[("Llm/completion/OpenAI/create", 1)], + background_task=True, +) +@validate_custom_events(expected_events_on_no_model_error) +@validate_custom_event_count(count=3) +@background_task() +def test_chat_completion_invalid_request_error_no_model_with_raw_response(set_trace_info, sync_openai_client): + with pytest.raises(TypeError): + set_trace_info() + add_custom_attribute("llm.conversation_id", "my-awesome-id") + sync_openai_client.chat.completions.with_raw_response.create( + messages=_test_openai_chat_completion_messages, temperature=0.7, max_tokens=100 + ) + + +@dt_enabled +@reset_core_stats_engine() +@disabled_ai_monitoring_record_content_settings +@validate_error_trace_attributes( + callable_name(TypeError), + exact_attrs={ + "agent": {}, + "intrinsic": {}, + "user": {}, + }, +) +@validate_span_events( + exact_agents={ + "error.message": "Missing required arguments; Expected either ('messages' and 'model') or ('messages', 'model' and 'stream') arguments to be given", + } +) +@validate_transaction_metrics( + "test_chat_completion_error_v1:test_chat_completion_invalid_request_error_no_model_no_content_with_raw_response", + scoped_metrics=[("Llm/completion/OpenAI/create", 1)], + rollup_metrics=[("Llm/completion/OpenAI/create", 1)], + background_task=True, +) +@validate_custom_events(events_sans_content(expected_events_on_no_model_error)) +@validate_custom_event_count(count=3) +@background_task() +def test_chat_completion_invalid_request_error_no_model_no_content_with_raw_response( + set_trace_info, sync_openai_client +): + with pytest.raises(TypeError): + set_trace_info() + add_custom_attribute("llm.conversation_id", "my-awesome-id") + sync_openai_client.chat.completions.with_raw_response.create( + messages=_test_openai_chat_completion_messages, temperature=0.7, max_tokens=100 + ) + + +@dt_enabled +@reset_core_stats_engine() +@validate_error_trace_attributes( + callable_name(TypeError), + exact_attrs={ + "agent": {}, + "intrinsic": {}, + "user": {}, + }, +) +@validate_span_events( + exact_agents={ + "error.message": "Missing required arguments; Expected either ('messages' and 'model') or ('messages', 'model' and 'stream') arguments to be given", + } +) +@validate_transaction_metrics( + "test_chat_completion_error_v1:test_chat_completion_invalid_request_error_no_model_async_with_raw_response", + scoped_metrics=[("Llm/completion/OpenAI/create", 1)], + rollup_metrics=[("Llm/completion/OpenAI/create", 1)], + background_task=True, +) +@validate_custom_events(expected_events_on_no_model_error) +@validate_custom_event_count(count=3) +@background_task() +def test_chat_completion_invalid_request_error_no_model_async_with_raw_response( + loop, set_trace_info, async_openai_client +): + with pytest.raises(TypeError): + set_trace_info() + add_custom_attribute("llm.conversation_id", "my-awesome-id") + loop.run_until_complete( + async_openai_client.chat.completions.with_raw_response.create( + messages=_test_openai_chat_completion_messages, temperature=0.7, max_tokens=100 + ) + ) + + +@dt_enabled +@reset_core_stats_engine() +@disabled_ai_monitoring_record_content_settings +@validate_error_trace_attributes( + callable_name(TypeError), + exact_attrs={ + "agent": {}, + "intrinsic": {}, + "user": {}, + }, +) +@validate_span_events( + exact_agents={ + "error.message": "Missing required arguments; Expected either ('messages' and 'model') or ('messages', 'model' and 'stream') arguments to be given", + } +) +@validate_transaction_metrics( + "test_chat_completion_error_v1:test_chat_completion_invalid_request_error_no_model_async_no_content_with_raw_response", + scoped_metrics=[("Llm/completion/OpenAI/create", 1)], + rollup_metrics=[("Llm/completion/OpenAI/create", 1)], + background_task=True, +) +@validate_custom_events(events_sans_content(expected_events_on_no_model_error)) +@validate_custom_event_count(count=3) +@background_task() +def test_chat_completion_invalid_request_error_no_model_async_no_content_with_raw_response( + loop, set_trace_info, async_openai_client +): + with pytest.raises(TypeError): + set_trace_info() + add_custom_attribute("llm.conversation_id", "my-awesome-id") + loop.run_until_complete( + async_openai_client.chat.completions.with_raw_response.create( + messages=_test_openai_chat_completion_messages, temperature=0.7, max_tokens=100 + ) + ) + + +@dt_enabled +@reset_core_stats_engine() +@validate_error_trace_attributes( + callable_name(openai.NotFoundError), + exact_attrs={ + "agent": {}, + "intrinsic": {}, + "user": { + "error.code": "model_not_found", + "http.statusCode": 404, + }, + }, +) +@validate_span_events( + exact_agents={ + "error.message": "The model `does-not-exist` does not exist or you do not have access to it.", + } +) +@validate_transaction_metrics( + "test_chat_completion_error_v1:test_chat_completion_invalid_request_error_invalid_model_with_raw_response", + scoped_metrics=[("Llm/completion/OpenAI/create", 1)], + rollup_metrics=[("Llm/completion/OpenAI/create", 1)], + background_task=True, +) +@validate_custom_events(expected_events_on_invalid_model_error) +@validate_custom_event_count(count=2) +@background_task() +def test_chat_completion_invalid_request_error_invalid_model_with_raw_response(set_trace_info, sync_openai_client): + with pytest.raises(openai.NotFoundError): + set_trace_info() + add_custom_attribute("llm.conversation_id", "my-awesome-id") + sync_openai_client.chat.completions.with_raw_response.create( + model="does-not-exist", + messages=({"role": "user", "content": "Model does not exist."},), + temperature=0.7, + max_tokens=100, + ) + + +@dt_enabled +@reset_core_stats_engine() +@override_llm_token_callback_settings(llm_token_count_callback) +@validate_error_trace_attributes( + callable_name(openai.NotFoundError), + exact_attrs={ + "agent": {}, + "intrinsic": {}, + "user": { + "error.code": "model_not_found", + "http.statusCode": 404, + }, + }, +) +@validate_span_events( + exact_agents={ + "error.message": "The model `does-not-exist` does not exist or you do not have access to it.", + } +) +@validate_transaction_metrics( + "test_chat_completion_error_v1:test_chat_completion_invalid_request_error_invalid_model_with_token_count_with_raw_response", + scoped_metrics=[("Llm/completion/OpenAI/create", 1)], + rollup_metrics=[("Llm/completion/OpenAI/create", 1)], + background_task=True, +) +@validate_custom_events(add_token_count_to_events(expected_events_on_invalid_model_error)) +@validate_custom_event_count(count=2) +@background_task() +def test_chat_completion_invalid_request_error_invalid_model_with_token_count_with_raw_response( + set_trace_info, sync_openai_client +): + with pytest.raises(openai.NotFoundError): + set_trace_info() + add_custom_attribute("llm.conversation_id", "my-awesome-id") + sync_openai_client.chat.completions.with_raw_response.create( + model="does-not-exist", + messages=({"role": "user", "content": "Model does not exist."},), + temperature=0.7, + max_tokens=100, + ) + + +@dt_enabled +@reset_core_stats_engine() +@validate_error_trace_attributes( + callable_name(openai.NotFoundError), + exact_attrs={ + "agent": {}, + "intrinsic": {}, + "user": { + "error.code": "model_not_found", + "http.statusCode": 404, + }, + }, +) +@validate_span_events( + exact_agents={ + "error.message": "The model `does-not-exist` does not exist or you do not have access to it.", + } +) +@validate_transaction_metrics( + "test_chat_completion_error_v1:test_chat_completion_invalid_request_error_invalid_model_async_with_raw_response", + scoped_metrics=[("Llm/completion/OpenAI/create", 1)], + rollup_metrics=[("Llm/completion/OpenAI/create", 1)], + background_task=True, +) +@validate_custom_events(expected_events_on_invalid_model_error) +@validate_custom_event_count(count=2) +@background_task() +def test_chat_completion_invalid_request_error_invalid_model_async_with_raw_response( + loop, set_trace_info, async_openai_client +): + with pytest.raises(openai.NotFoundError): + set_trace_info() + add_custom_attribute("llm.conversation_id", "my-awesome-id") + loop.run_until_complete( + async_openai_client.chat.completions.with_raw_response.create( + model="does-not-exist", + messages=({"role": "user", "content": "Model does not exist."},), + temperature=0.7, + max_tokens=100, + ) + ) + + +@dt_enabled +@reset_core_stats_engine() +@override_llm_token_callback_settings(llm_token_count_callback) +@validate_error_trace_attributes( + callable_name(openai.NotFoundError), + exact_attrs={ + "agent": {}, + "intrinsic": {}, + "user": { + "error.code": "model_not_found", + "http.statusCode": 404, + }, + }, +) +@validate_span_events( + exact_agents={ + "error.message": "The model `does-not-exist` does not exist or you do not have access to it.", + } +) +@validate_transaction_metrics( + "test_chat_completion_error_v1:test_chat_completion_invalid_request_error_invalid_model_with_token_count_async_with_raw_response", + scoped_metrics=[("Llm/completion/OpenAI/create", 1)], + rollup_metrics=[("Llm/completion/OpenAI/create", 1)], + background_task=True, +) +@validate_custom_events(add_token_count_to_events(expected_events_on_invalid_model_error)) +@validate_custom_event_count(count=2) +@background_task() +def test_chat_completion_invalid_request_error_invalid_model_with_token_count_async_with_raw_response( + loop, set_trace_info, async_openai_client +): + with pytest.raises(openai.NotFoundError): + set_trace_info() + add_custom_attribute("llm.conversation_id", "my-awesome-id") + loop.run_until_complete( + async_openai_client.chat.completions.with_raw_response.create( + model="does-not-exist", + messages=({"role": "user", "content": "Model does not exist."},), + temperature=0.7, + max_tokens=100, + ) + ) + + +@dt_enabled +@reset_core_stats_engine() +@validate_error_trace_attributes( + callable_name(openai.AuthenticationError), + exact_attrs={ + "agent": {}, + "intrinsic": {}, + "user": { + "http.statusCode": 401, + "error.code": "invalid_api_key", + }, + }, +) +@validate_span_events( + exact_agents={ + "error.message": "Incorrect API key provided: DEADBEEF. You can find your API key at https://platform.openai.com/account/api-keys.", + } +) +@validate_transaction_metrics( + "test_chat_completion_error_v1:test_chat_completion_wrong_api_key_error_with_raw_response", + scoped_metrics=[("Llm/completion/OpenAI/create", 1)], + rollup_metrics=[("Llm/completion/OpenAI/create", 1)], + background_task=True, +) +@validate_custom_events(expected_events_on_wrong_api_key_error) +@validate_custom_event_count(count=2) +@background_task() +def test_chat_completion_wrong_api_key_error_with_raw_response(monkeypatch, set_trace_info, sync_openai_client): + with pytest.raises(openai.AuthenticationError): + set_trace_info() + monkeypatch.setattr(sync_openai_client, "api_key", "DEADBEEF") + sync_openai_client.chat.completions.with_raw_response.create( + model="gpt-3.5-turbo", + messages=({"role": "user", "content": "Invalid API key."},), + temperature=0.7, + max_tokens=100, + ) + + +@dt_enabled +@reset_core_stats_engine() +@validate_error_trace_attributes( + callable_name(openai.AuthenticationError), + exact_attrs={ + "agent": {}, + "intrinsic": {}, + "user": { + "http.statusCode": 401, + "error.code": "invalid_api_key", + }, + }, +) +@validate_span_events( + exact_agents={ + "error.message": "Incorrect API key provided: DEADBEEF. You can find your API key at https://platform.openai.com/account/api-keys.", + } +) +@validate_transaction_metrics( + "test_chat_completion_error_v1:test_chat_completion_wrong_api_key_error_async_with_raw_response", + scoped_metrics=[("Llm/completion/OpenAI/create", 1)], + rollup_metrics=[("Llm/completion/OpenAI/create", 1)], + background_task=True, +) +@validate_custom_events(expected_events_on_wrong_api_key_error) +@validate_custom_event_count(count=2) +@background_task() +def test_chat_completion_wrong_api_key_error_async_with_raw_response( + loop, monkeypatch, set_trace_info, async_openai_client +): + with pytest.raises(openai.AuthenticationError): + set_trace_info() + monkeypatch.setattr(async_openai_client, "api_key", "DEADBEEF") + loop.run_until_complete( + async_openai_client.chat.completions.with_raw_response.create( + model="gpt-3.5-turbo", + messages=({"role": "user", "content": "Invalid API key."},), + temperature=0.7, + max_tokens=100, + ) + ) diff --git a/tests/mlmodel_openai/test_chat_completion_v1.py b/tests/mlmodel_openai/test_chat_completion_v1.py index 5b7c294b0..cf0c4f849 100644 --- a/tests/mlmodel_openai/test_chat_completion_v1.py +++ b/tests/mlmodel_openai/test_chat_completion_v1.py @@ -51,24 +51,21 @@ "llm.foo": "bar", "span_id": None, "trace_id": "trace-id", - "request_id": "f8d0f53b6881c5c0a3698e55f8f410ac", + "request_id": "req_25be7e064e0c590cd65709c85385c796", "duration": None, # Response time varies each test run "request.model": "gpt-3.5-turbo", - "response.model": "gpt-3.5-turbo-0613", + "response.model": "gpt-3.5-turbo-0125", "response.organization": "new-relic-nkmd8b", "request.temperature": 0.7, "request.max_tokens": 100, "response.choices.finish_reason": "stop", "response.headers.llmVersion": "2020-10-01", - "response.headers.ratelimitLimitRequests": 200, - "response.headers.ratelimitLimitTokens": 40000, - "response.headers.ratelimitResetTokens": "180ms", - "response.headers.ratelimitResetRequests": "11m32.334s", - "response.headers.ratelimitRemainingTokens": 39880, - "response.headers.ratelimitRemainingRequests": 198, - "response.headers.ratelimitLimitTokensUsageBased": 40000, - "response.headers.ratelimitResetTokensUsageBased": "180ms", - "response.headers.ratelimitRemainingTokensUsageBased": 39880, + "response.headers.ratelimitLimitRequests": 10000, + "response.headers.ratelimitLimitTokens": 60000, + "response.headers.ratelimitResetTokens": "120ms", + "response.headers.ratelimitResetRequests": "54.889s", + "response.headers.ratelimitRemainingTokens": 59880, + "response.headers.ratelimitRemainingRequests": 9993, "vendor": "openai", "ingest_source": "Python", "response.number_of_messages": 3, @@ -77,17 +74,17 @@ ( {"type": "LlmChatCompletionMessage"}, { - "id": "chatcmpl-8TJ9dS50zgQM7XicE8PLnCyEihRug-0", + "id": "chatcmpl-9NPYxI4Zk5ztxNwW5osYdpevgoiBQ-0", "llm.conversation_id": "my-awesome-id", "llm.foo": "bar", - "request_id": "f8d0f53b6881c5c0a3698e55f8f410ac", + "request_id": "req_25be7e064e0c590cd65709c85385c796", "span_id": None, "trace_id": "trace-id", "content": "You are a scientist.", "role": "system", "completion_id": None, "sequence": 0, - "response.model": "gpt-3.5-turbo-0613", + "response.model": "gpt-3.5-turbo-0125", "vendor": "openai", "ingest_source": "Python", }, @@ -95,17 +92,17 @@ ( {"type": "LlmChatCompletionMessage"}, { - "id": "chatcmpl-8TJ9dS50zgQM7XicE8PLnCyEihRug-1", + "id": "chatcmpl-9NPYxI4Zk5ztxNwW5osYdpevgoiBQ-1", "llm.conversation_id": "my-awesome-id", "llm.foo": "bar", - "request_id": "f8d0f53b6881c5c0a3698e55f8f410ac", + "request_id": "req_25be7e064e0c590cd65709c85385c796", "span_id": None, "trace_id": "trace-id", "content": "What is 212 degrees Fahrenheit converted to Celsius?", "role": "user", "completion_id": None, "sequence": 1, - "response.model": "gpt-3.5-turbo-0613", + "response.model": "gpt-3.5-turbo-0125", "vendor": "openai", "ingest_source": "Python", }, @@ -113,17 +110,17 @@ ( {"type": "LlmChatCompletionMessage"}, { - "id": "chatcmpl-8TJ9dS50zgQM7XicE8PLnCyEihRug-2", + "id": "chatcmpl-9NPYxI4Zk5ztxNwW5osYdpevgoiBQ-2", "llm.conversation_id": "my-awesome-id", "llm.foo": "bar", - "request_id": "f8d0f53b6881c5c0a3698e55f8f410ac", + "request_id": "req_25be7e064e0c590cd65709c85385c796", "span_id": None, "trace_id": "trace-id", - "content": "212 degrees Fahrenheit is equal to 100 degrees Celsius.", + "content": "212 degrees Fahrenheit is equivalent to 100 degrees Celsius. \n\nThe formula to convert Fahrenheit to Celsius is: \n\n\\[Celsius = (Fahrenheit - 32) \\times \\frac{5}{9}\\]\n\nSo, for 212 degrees Fahrenheit:\n\n\\[Celsius = (212 - 32) \\times \\frac{5}{9} = 100\\]", "role": "assistant", "completion_id": None, "sequence": 2, - "response.model": "gpt-3.5-turbo-0613", + "response.model": "gpt-3.5-turbo-0125", "vendor": "openai", "is_response": True, "ingest_source": "Python", @@ -156,6 +153,30 @@ def test_openai_chat_completion_sync_with_llm_metadata(set_trace_info, sync_open ) +@reset_core_stats_engine() +@validate_custom_events(chat_completion_recorded_events) +# One summary event, one system message, one user message, and one response message from the assistant +@validate_custom_event_count(count=4) +@validate_transaction_metrics( + name="test_chat_completion_v1:test_openai_chat_completion_sync_with_llm_metadata_with_raw_response", + custom_metrics=[ + ("Supportability/Python/ML/OpenAI/%s" % openai.__version__, 1), + ], + background_task=True, +) +@validate_attributes("agent", ["llm"]) +@background_task() +def test_openai_chat_completion_sync_with_llm_metadata_with_raw_response(set_trace_info, sync_openai_client): + set_trace_info() + add_custom_attribute("llm.conversation_id", "my-awesome-id") + add_custom_attribute("llm.foo", "bar") + add_custom_attribute("non_llm_attr", "python-agent") + + sync_openai_client.chat.completions.with_raw_response.create( + model="gpt-3.5-turbo", messages=_test_openai_chat_completion_messages, temperature=0.7, max_tokens=100 + ) + + @reset_core_stats_engine() @disabled_ai_monitoring_record_content_settings @validate_custom_events(events_sans_content(chat_completion_recorded_events)) @@ -329,6 +350,33 @@ def test_openai_chat_completion_async_with_llm_metadata(loop, set_trace_info, as ) +@reset_core_stats_engine() +@validate_custom_events(chat_completion_recorded_events) +@validate_custom_event_count(count=4) +@validate_transaction_metrics( + "test_chat_completion_v1:test_openai_chat_completion_async_with_llm_metadata_with_raw_response", + scoped_metrics=[("Llm/completion/OpenAI/create", 1)], + rollup_metrics=[("Llm/completion/OpenAI/create", 1)], + custom_metrics=[ + ("Supportability/Python/ML/OpenAI/%s" % openai.__version__, 1), + ], + background_task=True, +) +@validate_attributes("agent", ["llm"]) +@background_task() +def test_openai_chat_completion_async_with_llm_metadata_with_raw_response(loop, set_trace_info, async_openai_client): + set_trace_info() + add_custom_attribute("llm.conversation_id", "my-awesome-id") + add_custom_attribute("llm.foo", "bar") + add_custom_attribute("non_llm_attr", "python-agent") + + loop.run_until_complete( + async_openai_client.chat.completions.with_raw_response.create( + model="gpt-3.5-turbo", messages=_test_openai_chat_completion_messages, temperature=0.7, max_tokens=100 + ) + ) + + @reset_core_stats_engine() @disabled_ai_monitoring_record_content_settings @validate_custom_events(events_sans_content(chat_completion_recorded_events)) diff --git a/tests/mlmodel_openai/test_embeddings_error_v1.py b/tests/mlmodel_openai/test_embeddings_error_v1.py index 2464ebfe2..bb79986c4 100644 --- a/tests/mlmodel_openai/test_embeddings_error_v1.py +++ b/tests/mlmodel_openai/test_embeddings_error_v1.py @@ -41,7 +41,6 @@ from newrelic.api.background_task import background_task from newrelic.common.object_names import callable_name -# Sync tests: no_model_events = [ ( {"type": "LlmEmbedding"}, @@ -201,7 +200,7 @@ def test_embeddings_invalid_request_error_no_model_async(set_trace_info, async_o ) @validate_span_events( exact_agents={ - "error.message": "The model `does-not-exist` does not exist", + "error.message": "The model `does-not-exist` does not exist or you do not have access to it.", } ) @validate_transaction_metrics( @@ -237,7 +236,7 @@ def test_embeddings_invalid_request_error_invalid_model_with_token_count(set_tra ) @validate_span_events( exact_agents={ - "error.message": "The model `does-not-exist` does not exist", + "error.message": "The model `does-not-exist` does not exist or you do not have access to it.", } ) @validate_transaction_metrics( @@ -273,7 +272,7 @@ def test_embeddings_invalid_request_error_invalid_model(set_trace_info, sync_ope ) @validate_span_events( exact_agents={ - "error.message": "The model `does-not-exist` does not exist", + "error.message": "The model `does-not-exist` does not exist or you do not have access to it.", } ) @validate_transaction_metrics( @@ -312,7 +311,7 @@ def test_embeddings_invalid_request_error_invalid_model_async(set_trace_info, as ) @validate_span_events( exact_agents={ - "error.message": "The model `does-not-exist` does not exist", + "error.message": "The model `does-not-exist` does not exist or you do not have access to it.", } ) @validate_transaction_metrics( @@ -351,7 +350,7 @@ def test_embeddings_invalid_request_error_invalid_model_async_no_content(set_tra ) @validate_span_events( exact_agents={ - "error.message": "The model `does-not-exist` does not exist", + "error.message": "The model `does-not-exist` does not exist or you do not have access to it.", } ) @validate_transaction_metrics( @@ -469,3 +468,401 @@ def test_embeddings_wrong_api_key_error_async(set_trace_info, monkeypatch, async loop.run_until_complete( async_openai_client.embeddings.create(input="Invalid API key.", model="text-embedding-ada-002") ) + + +# .with_raw_response tests + + +@dt_enabled +@reset_core_stats_engine() +@validate_error_trace_attributes( + callable_name(TypeError), + exact_attrs={ + "agent": {}, + "intrinsic": {}, + "user": {}, + }, +) +@validate_span_events( + exact_agents={ + "error.message": "create() missing 1 required keyword-only argument: 'model'" + if sys.version_info < (3, 10) + else "Embeddings.create() missing 1 required keyword-only argument: 'model'", + } +) +@validate_transaction_metrics( + name="test_embeddings_error_v1:test_embeddings_invalid_request_error_no_model_with_raw_response", + scoped_metrics=[("Llm/embedding/OpenAI/create", 1)], + rollup_metrics=[("Llm/embedding/OpenAI/create", 1)], + custom_metrics=[ + ("Supportability/Python/ML/OpenAI/%s" % openai.__version__, 1), + ], + background_task=True, +) +@validate_custom_events(no_model_events) +@validate_custom_event_count(count=1) +@background_task() +def test_embeddings_invalid_request_error_no_model_with_raw_response(set_trace_info, sync_openai_client): + with pytest.raises(TypeError): + set_trace_info() + sync_openai_client.embeddings.with_raw_response.create( + input="This is an embedding test with no model." + ) # no model provided + + +@dt_enabled +@disabled_ai_monitoring_record_content_settings +@reset_core_stats_engine() +@validate_error_trace_attributes( + callable_name(TypeError), + exact_attrs={ + "agent": {}, + "intrinsic": {}, + "user": {}, + }, +) +@validate_span_events( + exact_agents={ + "error.message": "create() missing 1 required keyword-only argument: 'model'" + if sys.version_info < (3, 10) + else "Embeddings.create() missing 1 required keyword-only argument: 'model'", + } +) +@validate_transaction_metrics( + name="test_embeddings_error_v1:test_embeddings_invalid_request_error_no_model_no_content_with_raw_response", + scoped_metrics=[("Llm/embedding/OpenAI/create", 1)], + rollup_metrics=[("Llm/embedding/OpenAI/create", 1)], + custom_metrics=[ + ("Supportability/Python/ML/OpenAI/%s" % openai.__version__, 1), + ], + background_task=True, +) +@validate_custom_events(events_sans_content(no_model_events)) +@validate_custom_event_count(count=1) +@background_task() +def test_embeddings_invalid_request_error_no_model_no_content_with_raw_response(set_trace_info, sync_openai_client): + with pytest.raises(TypeError): + set_trace_info() + sync_openai_client.embeddings.with_raw_response.create( + input="This is an embedding test with no model." + ) # no model provided + + +@dt_enabled +@reset_core_stats_engine() +@validate_error_trace_attributes( + callable_name(TypeError), + exact_attrs={ + "agent": {}, + "intrinsic": {}, + "user": {}, + }, +) +@validate_span_events( + exact_agents={ + "error.message": "create() missing 1 required keyword-only argument: 'model'" + if sys.version_info < (3, 10) + else "AsyncEmbeddings.create() missing 1 required keyword-only argument: 'model'", + } +) +@validate_transaction_metrics( + name="test_embeddings_error_v1:test_embeddings_invalid_request_error_no_model_async_with_raw_response", + scoped_metrics=[("Llm/embedding/OpenAI/create", 1)], + rollup_metrics=[("Llm/embedding/OpenAI/create", 1)], + custom_metrics=[ + ("Supportability/Python/ML/OpenAI/%s" % openai.__version__, 1), + ], + background_task=True, +) +@validate_custom_events(no_model_events) +@validate_custom_event_count(count=1) +@background_task() +def test_embeddings_invalid_request_error_no_model_async_with_raw_response(set_trace_info, async_openai_client, loop): + with pytest.raises(TypeError): + set_trace_info() + loop.run_until_complete( + async_openai_client.embeddings.with_raw_response.create(input="This is an embedding test with no model.") + ) # no model provided + + +@dt_enabled +@reset_core_stats_engine() +@override_llm_token_callback_settings(llm_token_count_callback) +@validate_error_trace_attributes( + callable_name(openai.NotFoundError), + exact_attrs={ + "agent": {}, + "intrinsic": {}, + "user": { + "http.statusCode": 404, + "error.code": "model_not_found", + }, + }, +) +@validate_span_events( + exact_agents={ + "error.message": "The model `does-not-exist` does not exist or you do not have access to it.", + } +) +@validate_transaction_metrics( + name="test_embeddings_error_v1:test_embeddings_invalid_request_error_invalid_model_with_token_count_with_raw_response", + scoped_metrics=[("Llm/embedding/OpenAI/create", 1)], + rollup_metrics=[("Llm/embedding/OpenAI/create", 1)], + custom_metrics=[ + ("Supportability/Python/ML/OpenAI/%s" % openai.__version__, 1), + ], + background_task=True, +) +@validate_custom_events(add_token_count_to_events(invalid_model_events)) +@validate_custom_event_count(count=1) +@background_task() +def test_embeddings_invalid_request_error_invalid_model_with_token_count_with_raw_response( + set_trace_info, sync_openai_client +): + with pytest.raises(openai.NotFoundError): + set_trace_info() + sync_openai_client.embeddings.with_raw_response.create(input="Model does not exist.", model="does-not-exist") + + +@dt_enabled +@reset_core_stats_engine() +@validate_error_trace_attributes( + callable_name(openai.NotFoundError), + exact_attrs={ + "agent": {}, + "intrinsic": {}, + "user": { + "http.statusCode": 404, + "error.code": "model_not_found", + }, + }, +) +@validate_span_events( + exact_agents={ + "error.message": "The model `does-not-exist` does not exist or you do not have access to it.", + } +) +@validate_transaction_metrics( + name="test_embeddings_error_v1:test_embeddings_invalid_request_error_invalid_model_with_raw_response", + scoped_metrics=[("Llm/embedding/OpenAI/create", 1)], + rollup_metrics=[("Llm/embedding/OpenAI/create", 1)], + custom_metrics=[ + ("Supportability/Python/ML/OpenAI/%s" % openai.__version__, 1), + ], + background_task=True, +) +@validate_custom_events(invalid_model_events) +@validate_custom_event_count(count=1) +@background_task() +def test_embeddings_invalid_request_error_invalid_model_with_raw_response(set_trace_info, sync_openai_client): + with pytest.raises(openai.NotFoundError): + set_trace_info() + sync_openai_client.embeddings.with_raw_response.create(input="Model does not exist.", model="does-not-exist") + + +# Async tests: +@dt_enabled +@reset_core_stats_engine() +@validate_error_trace_attributes( + callable_name(openai.NotFoundError), + exact_attrs={ + "agent": {}, + "intrinsic": {}, + "user": { + "http.statusCode": 404, + "error.code": "model_not_found", + }, + }, +) +@validate_span_events( + exact_agents={ + "error.message": "The model `does-not-exist` does not exist or you do not have access to it.", + } +) +@validate_transaction_metrics( + name="test_embeddings_error_v1:test_embeddings_invalid_request_error_invalid_model_async_with_raw_response", + scoped_metrics=[("Llm/embedding/OpenAI/create", 1)], + rollup_metrics=[("Llm/embedding/OpenAI/create", 1)], + custom_metrics=[ + ("Supportability/Python/ML/OpenAI/%s" % openai.__version__, 1), + ], + background_task=True, +) +@validate_custom_events(invalid_model_events) +@validate_custom_event_count(count=1) +@background_task() +def test_embeddings_invalid_request_error_invalid_model_async_with_raw_response( + set_trace_info, async_openai_client, loop +): + with pytest.raises(openai.NotFoundError): + set_trace_info() + loop.run_until_complete( + async_openai_client.embeddings.with_raw_response.create( + input="Model does not exist.", model="does-not-exist" + ) + ) + + +@dt_enabled +@reset_core_stats_engine() +@disabled_ai_monitoring_record_content_settings +@validate_error_trace_attributes( + callable_name(openai.NotFoundError), + exact_attrs={ + "agent": {}, + "intrinsic": {}, + "user": { + "http.statusCode": 404, + "error.code": "model_not_found", + }, + }, +) +@validate_span_events( + exact_agents={ + "error.message": "The model `does-not-exist` does not exist or you do not have access to it.", + } +) +@validate_transaction_metrics( + name="test_embeddings_error_v1:test_embeddings_invalid_request_error_invalid_model_async_no_content_with_raw_response", + scoped_metrics=[("Llm/embedding/OpenAI/create", 1)], + rollup_metrics=[("Llm/embedding/OpenAI/create", 1)], + custom_metrics=[ + ("Supportability/Python/ML/OpenAI/%s" % openai.__version__, 1), + ], + background_task=True, +) +@validate_custom_events(events_sans_content(invalid_model_events)) +@validate_custom_event_count(count=1) +@background_task() +def test_embeddings_invalid_request_error_invalid_model_async_no_content_with_raw_response( + set_trace_info, async_openai_client, loop +): + with pytest.raises(openai.NotFoundError): + set_trace_info() + loop.run_until_complete( + async_openai_client.embeddings.with_raw_response.create( + input="Model does not exist.", model="does-not-exist" + ) + ) + + +@dt_enabled +@reset_core_stats_engine() +@override_llm_token_callback_settings(llm_token_count_callback) +@validate_error_trace_attributes( + callable_name(openai.NotFoundError), + exact_attrs={ + "agent": {}, + "intrinsic": {}, + "user": { + "http.statusCode": 404, + "error.code": "model_not_found", + }, + }, +) +@validate_span_events( + exact_agents={ + "error.message": "The model `does-not-exist` does not exist or you do not have access to it.", + } +) +@validate_transaction_metrics( + name="test_embeddings_error_v1:test_embeddings_invalid_request_error_invalid_model_async_with_token_count_with_raw_response", + scoped_metrics=[("Llm/embedding/OpenAI/create", 1)], + rollup_metrics=[("Llm/embedding/OpenAI/create", 1)], + custom_metrics=[ + ("Supportability/Python/ML/OpenAI/%s" % openai.__version__, 1), + ], + background_task=True, +) +@validate_custom_events(add_token_count_to_events(invalid_model_events)) +@validate_custom_event_count(count=1) +@background_task() +def test_embeddings_invalid_request_error_invalid_model_async_with_token_count_with_raw_response( + set_trace_info, async_openai_client, loop +): + with pytest.raises(openai.NotFoundError): + set_trace_info() + + loop.run_until_complete( + async_openai_client.embeddings.with_raw_response.create( + input="Model does not exist.", model="does-not-exist" + ) + ) + + +@dt_enabled +@reset_core_stats_engine() +@validate_error_trace_attributes( + callable_name(openai.AuthenticationError), + exact_attrs={ + "agent": {}, + "intrinsic": {}, + "user": { + "http.statusCode": 401, + "error.code": "invalid_api_key", + }, + }, +) +@validate_span_events( + exact_agents={ + "error.message": "Incorrect API key provided: DEADBEEF. You can find your API key at https://platform.openai.com/account/api-keys.", + } +) +@validate_transaction_metrics( + name="test_embeddings_error_v1:test_embeddings_wrong_api_key_error_with_raw_response", + scoped_metrics=[("Llm/embedding/OpenAI/create", 1)], + rollup_metrics=[("Llm/embedding/OpenAI/create", 1)], + custom_metrics=[ + ("Supportability/Python/ML/OpenAI/%s" % openai.__version__, 1), + ], + background_task=True, +) +@validate_custom_events(embedding_invalid_key_error_events) +@validate_custom_event_count(count=1) +@background_task() +def test_embeddings_wrong_api_key_error_with_raw_response(set_trace_info, monkeypatch, sync_openai_client): + with pytest.raises(openai.AuthenticationError): + set_trace_info() + monkeypatch.setattr(sync_openai_client, "api_key", "DEADBEEF") + sync_openai_client.embeddings.with_raw_response.create(input="Invalid API key.", model="text-embedding-ada-002") + + +@dt_enabled +@reset_core_stats_engine() +@validate_error_trace_attributes( + callable_name(openai.AuthenticationError), + exact_attrs={ + "agent": {}, + "intrinsic": {}, + "user": { + "http.statusCode": 401, + "error.code": "invalid_api_key", + }, + }, +) +@validate_span_events( + exact_agents={ + "error.message": "Incorrect API key provided: DEADBEEF. You can find your API key at https://platform.openai.com/account/api-keys.", + } +) +@validate_transaction_metrics( + name="test_embeddings_error_v1:test_embeddings_wrong_api_key_error_async_with_raw_response", + scoped_metrics=[("Llm/embedding/OpenAI/create", 1)], + rollup_metrics=[("Llm/embedding/OpenAI/create", 1)], + custom_metrics=[ + ("Supportability/Python/ML/OpenAI/%s" % openai.__version__, 1), + ], + background_task=True, +) +@validate_custom_events(embedding_invalid_key_error_events) +@validate_custom_event_count(count=1) +@background_task() +def test_embeddings_wrong_api_key_error_async_with_raw_response(set_trace_info, monkeypatch, async_openai_client, loop): + with pytest.raises(openai.AuthenticationError): + set_trace_info() + monkeypatch.setattr(async_openai_client, "api_key", "DEADBEEF") + loop.run_until_complete( + async_openai_client.embeddings.with_raw_response.create( + input="Invalid API key.", model="text-embedding-ada-002" + ) + ) diff --git a/tests/mlmodel_openai/test_embeddings_v1.py b/tests/mlmodel_openai/test_embeddings_v1.py index 7bafa5318..31540e75a 100644 --- a/tests/mlmodel_openai/test_embeddings_v1.py +++ b/tests/mlmodel_openai/test_embeddings_v1.py @@ -43,17 +43,17 @@ "trace_id": "trace-id", "input": "This is an embedding test.", "duration": None, # Response time varies each test run - "response.model": "text-embedding-ada-002-v2", + "response.model": "text-embedding-ada-002", "request.model": "text-embedding-ada-002", - "request_id": "fef7adee5adcfb03c083961bdce4f6a4", - "response.organization": "foobar-jtbczk", + "request_id": "req_eb2b9f2d23a671ad0d69545044437d68", + "response.organization": "new-relic-nkmd8b", "response.headers.llmVersion": "2020-10-01", - "response.headers.ratelimitLimitRequests": 200, - "response.headers.ratelimitLimitTokens": 150000, - "response.headers.ratelimitResetTokens": "2ms", - "response.headers.ratelimitResetRequests": "19m5.228s", - "response.headers.ratelimitRemainingTokens": 149993, - "response.headers.ratelimitRemainingRequests": 197, + "response.headers.ratelimitLimitRequests": 3000, + "response.headers.ratelimitLimitTokens": 1000000, + "response.headers.ratelimitResetTokens": "0s", + "response.headers.ratelimitResetRequests": "20ms", + "response.headers.ratelimitRemainingTokens": 999994, + "response.headers.ratelimitRemainingRequests": 2999, "vendor": "openai", "ingest_source": "Python", }, @@ -80,6 +80,27 @@ def test_openai_embedding_sync(set_trace_info, sync_openai_client): sync_openai_client.embeddings.create(input="This is an embedding test.", model="text-embedding-ada-002") +@reset_core_stats_engine() +@validate_custom_events(embedding_recorded_events) +@validate_custom_event_count(count=1) +@validate_transaction_metrics( + name="test_embeddings_v1:test_openai_embedding_sync_with_raw_response", + scoped_metrics=[("Llm/embedding/OpenAI/create", 1)], + rollup_metrics=[("Llm/embedding/OpenAI/create", 1)], + custom_metrics=[ + ("Supportability/Python/ML/OpenAI/%s" % openai.__version__, 1), + ], + background_task=True, +) +@validate_attributes("agent", ["llm"]) +@background_task() +def test_openai_embedding_sync_with_raw_response(set_trace_info, sync_openai_client): + set_trace_info() + sync_openai_client.embeddings.with_raw_response.create( + input="This is an embedding test.", model="text-embedding-ada-002" + ) + + @reset_core_stats_engine() @disabled_ai_monitoring_record_content_settings @validate_custom_events(events_sans_content(embedding_recorded_events)) @@ -156,6 +177,30 @@ def test_openai_embedding_async(loop, set_trace_info, async_openai_client): ) +@reset_core_stats_engine() +@validate_custom_events(embedding_recorded_events) +@validate_custom_event_count(count=1) +@validate_transaction_metrics( + name="test_embeddings_v1:test_openai_embedding_async_with_raw_response", + scoped_metrics=[("Llm/embedding/OpenAI/create", 1)], + rollup_metrics=[("Llm/embedding/OpenAI/create", 1)], + custom_metrics=[ + ("Supportability/Python/ML/OpenAI/%s" % openai.__version__, 1), + ], + background_task=True, +) +@validate_attributes("agent", ["llm"]) +@background_task() +def test_openai_embedding_async_with_raw_response(loop, set_trace_info, async_openai_client): + set_trace_info() + + loop.run_until_complete( + async_openai_client.embeddings.with_raw_response.create( + input="This is an embedding test.", model="text-embedding-ada-002" + ) + ) + + @reset_core_stats_engine() @disabled_ai_monitoring_record_content_settings @validate_custom_events(events_sans_content(embedding_recorded_events))