diff --git a/tests/external_botocore/test_bedrock_chat_completion.py b/tests/external_botocore/test_bedrock_chat_completion.py index 7cab44634..0c24fea0c 100644 --- a/tests/external_botocore/test_bedrock_chat_completion.py +++ b/tests/external_botocore/test_bedrock_chat_completion.py @@ -696,7 +696,7 @@ def test_bedrock_chat_completion_error_malformed_response_streaming_chunk( @validate_custom_events(chat_completion_expected_malformed_response_streaming_chunk_events) @validate_custom_event_count(count=2) @validate_error_trace_attributes( - "botocore.eventstream:InvalidHeadersLength", + "botocore.eventstream:ChecksumMismatch", exact_attrs={ "agent": {}, "intrinsic": {}, @@ -723,7 +723,7 @@ def test_bedrock_chat_completion_error_malformed_response_streaming_chunk( def _test(): model = "amazon.titan-text-express-v1" body = (chat_completion_payload_templates[model] % ("Malformed Streaming Chunk", 0.7, 100)).encode("utf-8") - with pytest.raises(botocore.eventstream.InvalidHeadersLength): + with pytest.raises(botocore.eventstream.ChecksumMismatch): set_trace_info() add_custom_attribute("llm.conversation_id", "my-awesome-id") add_custom_attribute("llm.foo", "bar") diff --git a/tests/mlmodel_langchain/test_chain.py b/tests/mlmodel_langchain/test_chain.py index 0df762b4a..f3c2ab085 100644 --- a/tests/mlmodel_langchain/test_chain.py +++ b/tests/mlmodel_langchain/test_chain.py @@ -523,7 +523,7 @@ "vendor": "langchain", "ingest_source": "Python", "virtual_llm": True, - "content": "{'input': 'math', 'context': [Document(page_content='What is 2 + 4?')]}", + "content": "{'input': 'math', 'context': [Document(metadata={}, page_content='What is 2 + 4?')]}", }, ], [ @@ -555,7 +555,7 @@ "ingest_source": "Python", "is_response": True, "virtual_llm": True, - "content": "{'input': 'math', 'context': [Document(page_content='What is 2 + 4?')], 'answer': '```html\\n\\n\\n\\n Math Quiz\\n\\n\\n

Math Quiz Questions

\\n
    \\n
  1. What is the result of 5 + 3?
  2. \\n \\n
  3. What is the product of 6 x 7?
  4. \\n \\n
  5. What is the square root of 64?
  6. \\n \\n
  7. What is the result of 12 / 4?
  8. \\n \\n
  9. What is the sum of 15 + 9?
  10. \\n \\n
\\n\\n\\n```'}", + "content": "{'input': 'math', 'context': [Document(metadata={}, page_content='What is 2 + 4?')], 'answer': '```html\\n\\n\\n\\n Math Quiz\\n\\n\\n

Math Quiz Questions

\\n
    \\n
  1. What is the result of 5 + 3?
  2. \\n \\n
  3. What is the product of 6 x 7?
  4. \\n \\n
  5. What is the square root of 64?
  6. \\n \\n
  7. What is the result of 12 / 4?
  8. \\n \\n
  9. What is the sum of 15 + 9?
  10. \\n \\n
\\n\\n\\n```'}", }, ], ] diff --git a/tests/mlmodel_langchain/test_tool.py b/tests/mlmodel_langchain/test_tool.py index 86a671662..1d41b41e8 100644 --- a/tests/mlmodel_langchain/test_tool.py +++ b/tests/mlmodel_langchain/test_tool.py @@ -17,7 +17,7 @@ import uuid import langchain -import pydantic +import pydantic_core import pytest from langchain.tools import tool from mock import patch @@ -269,7 +269,7 @@ def test_langchain_multi_arg_tool_async(set_trace_info, multi_arg_tool, loop): @reset_core_stats_engine() @validate_transaction_error_event_count(1) @validate_error_trace_attributes( - callable_name(pydantic.v1.error_wrappers.ValidationError), + callable_name(pydantic_core._pydantic_core.ValidationError), exact_attrs={ "agent": {}, "intrinsic": {}, @@ -289,7 +289,7 @@ def test_langchain_multi_arg_tool_async(set_trace_info, multi_arg_tool, loop): ) @background_task() def test_langchain_error_in_run(set_trace_info, multi_arg_tool): - with pytest.raises(pydantic.v1.error_wrappers.ValidationError): + with pytest.raises(pydantic_core._pydantic_core.ValidationError): set_trace_info() # Only one argument is provided while the tool expects two to create an error multi_arg_tool.run( @@ -301,7 +301,7 @@ def test_langchain_error_in_run(set_trace_info, multi_arg_tool): @disabled_ai_monitoring_record_content_settings @validate_transaction_error_event_count(1) @validate_error_trace_attributes( - callable_name(pydantic.v1.error_wrappers.ValidationError), + callable_name(pydantic_core._pydantic_core.ValidationError), exact_attrs={ "agent": {}, "intrinsic": {}, @@ -321,7 +321,7 @@ def test_langchain_error_in_run(set_trace_info, multi_arg_tool): ) @background_task() def test_langchain_error_in_run_no_content(set_trace_info, multi_arg_tool): - with pytest.raises(pydantic.v1.error_wrappers.ValidationError): + with pytest.raises(pydantic_core._pydantic_core.ValidationError): set_trace_info() # Only one argument is provided while the tool expects two to create an error multi_arg_tool.run( @@ -332,7 +332,7 @@ def test_langchain_error_in_run_no_content(set_trace_info, multi_arg_tool): @reset_core_stats_engine() @validate_transaction_error_event_count(1) @validate_error_trace_attributes( - callable_name(pydantic.v1.error_wrappers.ValidationError), + callable_name(pydantic_core._pydantic_core.ValidationError), exact_attrs={ "agent": {}, "intrinsic": {}, @@ -352,7 +352,7 @@ def test_langchain_error_in_run_no_content(set_trace_info, multi_arg_tool): ) @background_task() def test_langchain_error_in_run_async(set_trace_info, multi_arg_tool, loop): - with pytest.raises(pydantic.v1.error_wrappers.ValidationError): + with pytest.raises(pydantic_core._pydantic_core.ValidationError): set_trace_info() # Only one argument is provided while the tool expects two to create an error loop.run_until_complete( @@ -366,7 +366,7 @@ def test_langchain_error_in_run_async(set_trace_info, multi_arg_tool, loop): @disabled_ai_monitoring_record_content_settings @validate_transaction_error_event_count(1) @validate_error_trace_attributes( - callable_name(pydantic.v1.error_wrappers.ValidationError), + callable_name(pydantic_core._pydantic_core.ValidationError), exact_attrs={ "agent": {}, "intrinsic": {}, @@ -386,7 +386,7 @@ def test_langchain_error_in_run_async(set_trace_info, multi_arg_tool, loop): ) @background_task() def test_langchain_error_in_run_async_no_content(set_trace_info, multi_arg_tool, loop): - with pytest.raises(pydantic.v1.error_wrappers.ValidationError): + with pytest.raises(pydantic_core._pydantic_core.ValidationError): set_trace_info() # Only one argument is provided while the tool expects two to create an error loop.run_until_complete( diff --git a/tox.ini b/tox.ini index 84361d01b..34c0fc5b7 100644 --- a/tox.ini +++ b/tox.ini @@ -134,7 +134,8 @@ envlist = python-framework_flask-{py38,py39,py310,py311,py312,pypy310}-flask{020205,latest,master}, python-framework_graphene-{py37,py38,py39,py310,py311,py312}-graphenelatest, python-framework_graphql-{py37,py38,py39,py310,py311,py312,pypy310}-graphql03, - python-framework_graphql-{py37,py38,py39,py310,py311,py312,pypy310}-graphql{latest,master}, + ; Remove graphqlmaster tests. + python-framework_graphql-{py37,py38,py39,py310,py311,py312,pypy310}-graphql{latest}, python-framework_graphql-py37-graphql{0301,0302}, python-framework_pyramid-{py37,py38,py39,py310,py311,py312,pypy310}-Pyramidlatest, python-framework_pyramid-{py37,py38,py39,py310,py311,py312,pypy310}-Pyramid0110-cornice, @@ -150,7 +151,7 @@ envlist = python-logger_loguru-{py37,py38,py39,py310,py311,py312,pypy310}-logurulatest, python-logger_loguru-py39-loguru{06,05}, python-logger_structlog-{py37,py38,py39,py310,py311,py312,pypy310}-structloglatest, - python-mlmodel_langchain-{py38,py39,py310,py311,py312}, + python-mlmodel_langchain-{py39,py310,py311,py312}, python-mlmodel_openai-openai0-{py37,py38,py39,py310,py311,py312}, python-mlmodel_openai-openai107-py312, python-mlmodel_openai-openailatest-{py37,py38,py39,py310,py311,py312},