Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Fix AI test failures & do not run graphqlmaster tests #1211

Merged
merged 6 commits into from
Sep 17, 2024
Merged
Show file tree
Hide file tree
Changes from 3 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 2 additions & 2 deletions tests/external_botocore/test_bedrock_chat_completion.py
Original file line number Diff line number Diff line change
Expand Up @@ -696,7 +696,7 @@ def test_bedrock_chat_completion_error_malformed_response_streaming_chunk(
@validate_custom_events(chat_completion_expected_malformed_response_streaming_chunk_events)
@validate_custom_event_count(count=2)
@validate_error_trace_attributes(
"botocore.eventstream:InvalidHeadersLength",
"botocore.eventstream:ChecksumMismatch",
exact_attrs={
"agent": {},
"intrinsic": {},
Expand All @@ -723,7 +723,7 @@ def test_bedrock_chat_completion_error_malformed_response_streaming_chunk(
def _test():
model = "amazon.titan-text-express-v1"
body = (chat_completion_payload_templates[model] % ("Malformed Streaming Chunk", 0.7, 100)).encode("utf-8")
with pytest.raises(botocore.eventstream.InvalidHeadersLength):
with pytest.raises(botocore.eventstream.ChecksumMismatch):
set_trace_info()
add_custom_attribute("llm.conversation_id", "my-awesome-id")
add_custom_attribute("llm.foo", "bar")
Expand Down
4 changes: 2 additions & 2 deletions tests/mlmodel_langchain/test_chain.py
Original file line number Diff line number Diff line change
Expand Up @@ -523,7 +523,7 @@
"vendor": "langchain",
"ingest_source": "Python",
"virtual_llm": True,
"content": "{'input': 'math', 'context': [Document(page_content='What is 2 + 4?')]}",
"content": "{'input': 'math', 'context': [Document(metadata={}, page_content='What is 2 + 4?')]}",
},
],
[
Expand Down Expand Up @@ -555,7 +555,7 @@
"ingest_source": "Python",
"is_response": True,
"virtual_llm": True,
"content": "{'input': 'math', 'context': [Document(page_content='What is 2 + 4?')], 'answer': '```html\\n<!DOCTYPE html>\\n<html>\\n<head>\\n <title>Math Quiz</title>\\n</head>\\n<body>\\n <h2>Math Quiz Questions</h2>\\n <ol>\\n <li>What is the result of 5 + 3?</li>\\n <ul>\\n <li>A) 7</li>\\n <li>B) 8</li>\\n <li>C) 9</li>\\n <li>D) 10</li>\\n </ul>\\n <li>What is the product of 6 x 7?</li>\\n <ul>\\n <li>A) 36</li>\\n <li>B) 42</li>\\n <li>C) 48</li>\\n <li>D) 56</li>\\n </ul>\\n <li>What is the square root of 64?</li>\\n <ul>\\n <li>A) 6</li>\\n <li>B) 7</li>\\n <li>C) 8</li>\\n <li>D) 9</li>\\n </ul>\\n <li>What is the result of 12 / 4?</li>\\n <ul>\\n <li>A) 2</li>\\n <li>B) 3</li>\\n <li>C) 4</li>\\n <li>D) 5</li>\\n </ul>\\n <li>What is the sum of 15 + 9?</li>\\n <ul>\\n <li>A) 22</li>\\n <li>B) 23</li>\\n <li>C) 24</li>\\n <li>D) 25</li>\\n </ul>\\n </ol>\\n</body>\\n</html>\\n```'}",
"content": "{'input': 'math', 'context': [Document(metadata={}, page_content='What is 2 + 4?')], 'answer': '```html\\n<!DOCTYPE html>\\n<html>\\n<head>\\n <title>Math Quiz</title>\\n</head>\\n<body>\\n <h2>Math Quiz Questions</h2>\\n <ol>\\n <li>What is the result of 5 + 3?</li>\\n <ul>\\n <li>A) 7</li>\\n <li>B) 8</li>\\n <li>C) 9</li>\\n <li>D) 10</li>\\n </ul>\\n <li>What is the product of 6 x 7?</li>\\n <ul>\\n <li>A) 36</li>\\n <li>B) 42</li>\\n <li>C) 48</li>\\n <li>D) 56</li>\\n </ul>\\n <li>What is the square root of 64?</li>\\n <ul>\\n <li>A) 6</li>\\n <li>B) 7</li>\\n <li>C) 8</li>\\n <li>D) 9</li>\\n </ul>\\n <li>What is the result of 12 / 4?</li>\\n <ul>\\n <li>A) 2</li>\\n <li>B) 3</li>\\n <li>C) 4</li>\\n <li>D) 5</li>\\n </ul>\\n <li>What is the sum of 15 + 9?</li>\\n <ul>\\n <li>A) 22</li>\\n <li>B) 23</li>\\n <li>C) 24</li>\\n <li>D) 25</li>\\n </ul>\\n </ol>\\n</body>\\n</html>\\n```'}",
},
],
]
Expand Down
18 changes: 9 additions & 9 deletions tests/mlmodel_langchain/test_tool.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@
import uuid

import langchain
import pydantic
import pydantic_core
import pytest
from langchain.tools import tool
from mock import patch
Expand Down Expand Up @@ -269,7 +269,7 @@ def test_langchain_multi_arg_tool_async(set_trace_info, multi_arg_tool, loop):
@reset_core_stats_engine()
@validate_transaction_error_event_count(1)
@validate_error_trace_attributes(
callable_name(pydantic.v1.error_wrappers.ValidationError),
callable_name(pydantic_core._pydantic_core.ValidationError),
exact_attrs={
"agent": {},
"intrinsic": {},
Expand All @@ -289,7 +289,7 @@ def test_langchain_multi_arg_tool_async(set_trace_info, multi_arg_tool, loop):
)
@background_task()
def test_langchain_error_in_run(set_trace_info, multi_arg_tool):
with pytest.raises(pydantic.v1.error_wrappers.ValidationError):
with pytest.raises(pydantic_core._pydantic_core.ValidationError):
set_trace_info()
# Only one argument is provided while the tool expects two to create an error
multi_arg_tool.run(
Expand All @@ -301,7 +301,7 @@ def test_langchain_error_in_run(set_trace_info, multi_arg_tool):
@disabled_ai_monitoring_record_content_settings
@validate_transaction_error_event_count(1)
@validate_error_trace_attributes(
callable_name(pydantic.v1.error_wrappers.ValidationError),
callable_name(pydantic_core._pydantic_core.ValidationError),
exact_attrs={
"agent": {},
"intrinsic": {},
Expand All @@ -321,7 +321,7 @@ def test_langchain_error_in_run(set_trace_info, multi_arg_tool):
)
@background_task()
def test_langchain_error_in_run_no_content(set_trace_info, multi_arg_tool):
with pytest.raises(pydantic.v1.error_wrappers.ValidationError):
with pytest.raises(pydantic_core._pydantic_core.ValidationError):
set_trace_info()
# Only one argument is provided while the tool expects two to create an error
multi_arg_tool.run(
Expand All @@ -332,7 +332,7 @@ def test_langchain_error_in_run_no_content(set_trace_info, multi_arg_tool):
@reset_core_stats_engine()
@validate_transaction_error_event_count(1)
@validate_error_trace_attributes(
callable_name(pydantic.v1.error_wrappers.ValidationError),
callable_name(pydantic_core._pydantic_core.ValidationError),
exact_attrs={
"agent": {},
"intrinsic": {},
Expand All @@ -352,7 +352,7 @@ def test_langchain_error_in_run_no_content(set_trace_info, multi_arg_tool):
)
@background_task()
def test_langchain_error_in_run_async(set_trace_info, multi_arg_tool, loop):
with pytest.raises(pydantic.v1.error_wrappers.ValidationError):
with pytest.raises(pydantic_core._pydantic_core.ValidationError):
set_trace_info()
# Only one argument is provided while the tool expects two to create an error
loop.run_until_complete(
Expand All @@ -366,7 +366,7 @@ def test_langchain_error_in_run_async(set_trace_info, multi_arg_tool, loop):
@disabled_ai_monitoring_record_content_settings
@validate_transaction_error_event_count(1)
@validate_error_trace_attributes(
callable_name(pydantic.v1.error_wrappers.ValidationError),
callable_name(pydantic_core._pydantic_core.ValidationError),
exact_attrs={
"agent": {},
"intrinsic": {},
Expand All @@ -386,7 +386,7 @@ def test_langchain_error_in_run_async(set_trace_info, multi_arg_tool, loop):
)
@background_task()
def test_langchain_error_in_run_async_no_content(set_trace_info, multi_arg_tool, loop):
with pytest.raises(pydantic.v1.error_wrappers.ValidationError):
with pytest.raises(pydantic_core._pydantic_core.ValidationError):
set_trace_info()
# Only one argument is provided while the tool expects two to create an error
loop.run_until_complete(
Expand Down
Loading