From 096b24c80de64501e6f10d6f6b1eff9ee186789b Mon Sep 17 00:00:00 2001 From: Tom Ron Date: Thu, 5 Dec 2024 18:27:04 +0000 Subject: [PATCH 01/13] Initial commit of Amazon Q Business runnable for langchain --- libs/aws/langchain_aws/llms/q_business.py | 147 ++++++++++++++++++++++ 1 file changed, 147 insertions(+) create mode 100644 libs/aws/langchain_aws/llms/q_business.py diff --git a/libs/aws/langchain_aws/llms/q_business.py b/libs/aws/langchain_aws/llms/q_business.py new file mode 100644 index 00000000..9b809a08 --- /dev/null +++ b/libs/aws/langchain_aws/llms/q_business.py @@ -0,0 +1,147 @@ +from typing import Any, AsyncGenerator, Dict, Iterator, List, Optional +from langchain_core.callbacks.manager import ( + AsyncCallbackManagerForLLMRun, + CallbackManagerForLLMRun +) +from langchain_core.outputs import GenerationChunk +from langchain_core.language_models import LLM +from pydantic import ConfigDict +import json +import asyncio +import boto3 +from tvm_client import TVMClient + +class AmazonQ(LLM): + """Amazon Q LLM wrapper. + + To authenticate, the AWS client uses the following methods to + automatically load credentials: + https://boto3.amazonaws.com/v1/documentation/api/latest/guide/credentials.html + + Make sure the credentials / roles used have the required policies to + access the Amazon Q service. + """ + + region_name: Optional[str] = None + """AWS region name. If not provided, will be extracted from environment.""" + + streaming: bool = False + """Whether to stream the results or not.""" + + client: Any = None + """Amazon Q client.""" + + application_id: str = None + """Amazon Q client.""" + + _last_response: Dict = None # Add this to store the full response + """Store the full response from Amazon Q.""" + + parent_message_id: Optional[str] = None + """AWS region name. If not provided, will be extracted from environment.""" + + conversation_id: Optional[str] = None + """AWS region name. If not provided, will be extracted from environment.""" + + chat_mode: str = "RETRIEVAL_MODE" + """AWS region name. If not provided, will be extracted from environment.""" + + model_config = ConfigDict( + extra="forbid", + ) + + @property + def _llm_type(self) -> str: + """Return type of llm.""" + return "amazon_q" + + def __init__(self, **kwargs: Any) -> None: + """Initialize the Amazon Q client.""" + super().__init__(**kwargs) + + def _call( + self, + prompt: str, + stop: Optional[List[str]] = None, + run_manager: Optional[CallbackManagerForLLMRun] = None, + **kwargs: Any, + ) -> str: + """Call out to Amazon Q service. + + Args: + prompt: The prompt to pass into the model. + stop: Optional list of stop words to use when generating. + + Returns: + The string generated by the model. + + Example: + .. code-block:: python + + response = llm.invoke("Tell me a joke.") + """ + try: + print("Prompt Length (Amazon Q ChatSync API takes a maximum of 7000 chars)") + print(len(prompt)) + + # Prepare the request + request = { + 'applicationId': "130f4ea4-855f-4ddf-b2a5-1e40923692d4", + 'userMessage': prompt, + 'chatMode':self.chat_mode, + } + if not self.conversation_id: + request = { + 'applicationId': self.application_id, + 'userMessage': prompt, + 'chatMode':self.chat_mode, + } + else: + request = { + 'applicationId': self.application_id, + 'userMessage': prompt, + 'chatMode':self.chat_mode, + 'conversationId':self.conversation_id, + 'parentMessageId':self.parent_message_id, + } + + # Call Amazon Q + response = self.client.chat_sync(**request) + self._last_response = response + + # Extract the response text + if 'systemMessage' in response: + return response["systemMessage"] + else: + raise ValueError("Unexpected response format from Amazon Q") + + except Exception as e: + raise ValueError(f"Error raised by Amazon Q service: {e}") + + def get_last_response(self) -> Dict: + """Method to access the full response from the last call""" + return self._last_response + + async def _acall( + self, + prompt: str, + stop: Optional[List[str]] = None, + run_manager: Optional[AsyncCallbackManagerForLLMRun] = None, + **kwargs: Any, + ) -> str: + """Async call to Amazon Q service.""" + + def _execute_call(): + return self._call(prompt, stop=stop, **kwargs) + + # Run the synchronous call in a thread pool + return await asyncio.get_running_loop().run_in_executor( + None, _execute_call + ) + + @property + def _identifying_params(self) -> Dict[str, Any]: + """Get the identifying parameters.""" + return { + "region_name": self.region_name, + } \ No newline at end of file From c9785c12b399469aca64e9ebd8421c2c75f05f9f Mon Sep 17 00:00:00 2001 From: Tom Ron Date: Thu, 5 Dec 2024 18:37:39 +0000 Subject: [PATCH 02/13] removed unused import --- libs/aws/langchain_aws/llms/q_business.py | 1 - 1 file changed, 1 deletion(-) diff --git a/libs/aws/langchain_aws/llms/q_business.py b/libs/aws/langchain_aws/llms/q_business.py index 9b809a08..ac30c348 100644 --- a/libs/aws/langchain_aws/llms/q_business.py +++ b/libs/aws/langchain_aws/llms/q_business.py @@ -9,7 +9,6 @@ import json import asyncio import boto3 -from tvm_client import TVMClient class AmazonQ(LLM): """Amazon Q LLM wrapper. From 40c8306cc5412fa83bef23d263f43b4d3de5c453 Mon Sep 17 00:00:00 2001 From: Tom Ron Date: Tue, 17 Dec 2024 20:18:23 +0000 Subject: [PATCH 03/13] Made changes in response to comments on https://github.com/langchain-ai/langchain-aws/pull/30 --- libs/aws/langchain_aws/llms/q_business.py | 77 +++++++++++++++++------ 1 file changed, 58 insertions(+), 19 deletions(-) diff --git a/libs/aws/langchain_aws/llms/q_business.py b/libs/aws/langchain_aws/llms/q_business.py index ac30c348..07dd556c 100644 --- a/libs/aws/langchain_aws/llms/q_business.py +++ b/libs/aws/langchain_aws/llms/q_business.py @@ -1,14 +1,15 @@ -from typing import Any, AsyncGenerator, Dict, Iterator, List, Optional +from typing import Any, Dict, List, Optional from langchain_core.callbacks.manager import ( AsyncCallbackManagerForLLMRun, CallbackManagerForLLMRun ) -from langchain_core.outputs import GenerationChunk +import logging from langchain_core.language_models import LLM -from pydantic import ConfigDict +from pydantic import ConfigDict, model_validator import json import asyncio import boto3 +from typing_extensions import Self class AmazonQ(LLM): """Amazon Q LLM wrapper. @@ -45,6 +46,13 @@ class AmazonQ(LLM): chat_mode: str = "RETRIEVAL_MODE" """AWS region name. If not provided, will be extracted from environment.""" + credentials_profile_name: Optional[str] = None + """The name of the profile in the ~/.aws/credentials or ~/.aws/config files, which + has either access keys or role information specified. + If not specified, the default credential profile or, if on an EC2 instance, + credentials from IMDS will be used. + See: https://boto3.amazonaws.com/v1/documentation/api/latest/guide/credentials.html + """ model_config = ConfigDict( extra="forbid", ) @@ -85,24 +93,15 @@ def _call( # Prepare the request request = { - 'applicationId': "130f4ea4-855f-4ddf-b2a5-1e40923692d4", + 'applicationId': self.application_id, 'userMessage': prompt, 'chatMode':self.chat_mode, } - if not self.conversation_id: - request = { - 'applicationId': self.application_id, - 'userMessage': prompt, - 'chatMode':self.chat_mode, - } - else: - request = { - 'applicationId': self.application_id, - 'userMessage': prompt, - 'chatMode':self.chat_mode, - 'conversationId':self.conversation_id, - 'parentMessageId':self.parent_message_id, - } + if self.conversation_id: + request.update({ + 'conversationId': self.conversation_id, + 'parentMessageId': self.parent_message_id, + }) # Call Amazon Q response = self.client.chat_sync(**request) @@ -115,6 +114,12 @@ def _call( raise ValueError("Unexpected response format from Amazon Q") except Exception as e: + if "Prompt Length" in str(e): + logging.info(f"Prompt Length: {len(prompt)}") + print(f"""Prompt: + {prompt}""") + raise ValueError(f"Error raised by Amazon Q service: {e}") + raise ValueError(f"Error raised by Amazon Q service: {e}") def get_last_response(self) -> Dict: @@ -143,4 +148,38 @@ def _identifying_params(self) -> Dict[str, Any]: """Get the identifying parameters.""" return { "region_name": self.region_name, - } \ No newline at end of file + } + @model_validator(mode="after") + def validate_environment(self) -> Self: + """Dont do anything if client provided externally""" + if self.client is not None: + return self + + """Validate that AWS credentials to and python package exists in environment.""" + try: + import boto3 + + try: + if self.credentials_profile_name is not None: + session = boto3.Session(profile_name=self.credentials_profile_name) + else: + # use default credentials + session = boto3.Session() + + self.client = session.client( + "qbusiness", region_name=self.region_name + ) + + except Exception as e: + raise ValueError( + "Could not load credentials to authenticate with AWS client. " + "Please check that credentials in the specified " + "profile name are valid." + ) from e + + except ImportError: + raise ImportError( + "Could not import boto3 python package. " + "Please install it with `pip install boto3`." + ) + return self \ No newline at end of file From 38665d070d727254b37335551962d52ac74f852a Mon Sep 17 00:00:00 2001 From: Tom Ron Date: Mon, 23 Dec 2024 22:02:25 +0000 Subject: [PATCH 04/13] Re-implementing q_business as a runnable --- libs/aws/langchain_aws/runnables/__init__.py | 3 ++ .../{llms => runnables}/q_business.py | 32 +++++++------------ 2 files changed, 15 insertions(+), 20 deletions(-) create mode 100644 libs/aws/langchain_aws/runnables/__init__.py rename libs/aws/langchain_aws/{llms => runnables}/q_business.py (87%) diff --git a/libs/aws/langchain_aws/runnables/__init__.py b/libs/aws/langchain_aws/runnables/__init__.py new file mode 100644 index 00000000..dbc787e2 --- /dev/null +++ b/libs/aws/langchain_aws/runnables/__init__.py @@ -0,0 +1,3 @@ +from langchain_aws.runnables.q_business import AmazonQ + +__all__ = ["AmazonQ"] \ No newline at end of file diff --git a/libs/aws/langchain_aws/llms/q_business.py b/libs/aws/langchain_aws/runnables/q_business.py similarity index 87% rename from libs/aws/langchain_aws/llms/q_business.py rename to libs/aws/langchain_aws/runnables/q_business.py index 07dd556c..5ccaf88d 100644 --- a/libs/aws/langchain_aws/llms/q_business.py +++ b/libs/aws/langchain_aws/runnables/q_business.py @@ -24,6 +24,9 @@ class AmazonQ(LLM): region_name: Optional[str] = None """AWS region name. If not provided, will be extracted from environment.""" + + user_id: Optional[str] = None + """Amazon Q user will be used for credentials if they are not provided through the client.""" streaming: bool = False """Whether to stream the results or not.""" @@ -61,7 +64,6 @@ class AmazonQ(LLM): def _llm_type(self) -> str: """Return type of llm.""" return "amazon_q" - def __init__(self, **kwargs: Any) -> None: """Initialize the Amazon Q client.""" super().__init__(**kwargs) @@ -88,9 +90,6 @@ def _call( response = llm.invoke("Tell me a joke.") """ try: - print("Prompt Length (Amazon Q ChatSync API takes a maximum of 7000 chars)") - print(len(prompt)) - # Prepare the request request = { 'applicationId': self.application_id, @@ -118,8 +117,6 @@ def _call( logging.info(f"Prompt Length: {len(prompt)}") print(f"""Prompt: {prompt}""") - raise ValueError(f"Error raised by Amazon Q service: {e}") - raise ValueError(f"Error raised by Amazon Q service: {e}") def get_last_response(self) -> Dict: @@ -143,32 +140,27 @@ def _execute_call(): None, _execute_call ) - @property - def _identifying_params(self) -> Dict[str, Any]: - """Get the identifying parameters.""" - return { - "region_name": self.region_name, - } @model_validator(mode="after") def validate_environment(self) -> Self: """Dont do anything if client provided externally""" if self.client is not None: return self + #If the client is not provided, and the user_id is not provided in the class constructor, throw an error saying one or the other needs to be provided + if self.user_id is None: + raise ValueError( + "Either the user_id or the client needs to be provided." + ) """Validate that AWS credentials to and python package exists in environment.""" try: import boto3 try: - if self.credentials_profile_name is not None: - session = boto3.Session(profile_name=self.credentials_profile_name) + if self.region_name is not None: + self.client = boto3.client('qbusiness', self.region_name) else: - # use default credentials - session = boto3.Session() - - self.client = session.client( - "qbusiness", region_name=self.region_name - ) + # use default region + self.client = boto3.client('qbusiness') except Exception as e: raise ValueError( From e52d3ee488cd45603fa31724dbe68a3e3a479306 Mon Sep 17 00:00:00 2001 From: Tom Ron Date: Mon, 23 Dec 2024 22:03:59 +0000 Subject: [PATCH 05/13] Removed unnecessary constructor params. Re-factored validator to create client using credentials sine anonymous invocation of chatsync with userId is no longer suppported --- .../aws/langchain_aws/runnables/q_business.py | 22 +++++-------------- 1 file changed, 6 insertions(+), 16 deletions(-) diff --git a/libs/aws/langchain_aws/runnables/q_business.py b/libs/aws/langchain_aws/runnables/q_business.py index 5ccaf88d..79e735f6 100644 --- a/libs/aws/langchain_aws/runnables/q_business.py +++ b/libs/aws/langchain_aws/runnables/q_business.py @@ -25,11 +25,8 @@ class AmazonQ(LLM): region_name: Optional[str] = None """AWS region name. If not provided, will be extracted from environment.""" - user_id: Optional[str] = None - """Amazon Q user will be used for credentials if they are not provided through the client.""" - - streaming: bool = False - """Whether to stream the results or not.""" + credentials: Optional[Any] = None + """Amazon Q credentials used to instantiate the client if the client is not provided.""" client: Any = None """Amazon Q client.""" @@ -49,13 +46,6 @@ class AmazonQ(LLM): chat_mode: str = "RETRIEVAL_MODE" """AWS region name. If not provided, will be extracted from environment.""" - credentials_profile_name: Optional[str] = None - """The name of the profile in the ~/.aws/credentials or ~/.aws/config files, which - has either access keys or role information specified. - If not specified, the default credential profile or, if on an EC2 instance, - credentials from IMDS will be used. - See: https://boto3.amazonaws.com/v1/documentation/api/latest/guide/credentials.html - """ model_config = ConfigDict( extra="forbid", ) @@ -146,9 +136,9 @@ def validate_environment(self) -> Self: if self.client is not None: return self #If the client is not provided, and the user_id is not provided in the class constructor, throw an error saying one or the other needs to be provided - if self.user_id is None: + if self.credentials is None: raise ValueError( - "Either the user_id or the client needs to be provided." + "Either the credentials or the client needs to be provided." ) """Validate that AWS credentials to and python package exists in environment.""" @@ -157,10 +147,10 @@ def validate_environment(self) -> Self: try: if self.region_name is not None: - self.client = boto3.client('qbusiness', self.region_name) + self.client = boto3.client('qbusiness', self.region_name, **self.credentials) else: # use default region - self.client = boto3.client('qbusiness') + self.client = boto3.client('qbusiness', **self.credentials) except Exception as e: raise ValueError( From b7378ea7b64ef770ef8b2b0ee4e0fac7466cc910 Mon Sep 17 00:00:00 2001 From: Tom Ron Date: Wed, 22 Jan 2025 14:22:49 +0000 Subject: [PATCH 06/13] Addressing comments in: https://github.com/langchain-ai/langchain-aws/pull/301#discussion_r1902214284 --- libs/aws/langchain_aws/runnables/q_business.py | 10 +++------- 1 file changed, 3 insertions(+), 7 deletions(-) diff --git a/libs/aws/langchain_aws/runnables/q_business.py b/libs/aws/langchain_aws/runnables/q_business.py index 79e735f6..e28b95fd 100644 --- a/libs/aws/langchain_aws/runnables/q_business.py +++ b/libs/aws/langchain_aws/runnables/q_business.py @@ -50,15 +50,11 @@ class AmazonQ(LLM): extra="forbid", ) - @property - def _llm_type(self) -> str: - """Return type of llm.""" - return "amazon_q" def __init__(self, **kwargs: Any) -> None: """Initialize the Amazon Q client.""" super().__init__(**kwargs) - def _call( + def invoke( self, prompt: str, stop: Optional[List[str]] = None, @@ -113,7 +109,7 @@ def get_last_response(self) -> Dict: """Method to access the full response from the last call""" return self._last_response - async def _acall( + async def ainvoke( self, prompt: str, stop: Optional[List[str]] = None, @@ -123,7 +119,7 @@ async def _acall( """Async call to Amazon Q service.""" def _execute_call(): - return self._call(prompt, stop=stop, **kwargs) + return self.invoke(prompt, stop=stop, **kwargs) # Run the synchronous call in a thread pool return await asyncio.get_running_loop().run_in_executor( From 79c32b58d6b4ab7793da63e73dba1046d19fc9f7 Mon Sep 17 00:00:00 2001 From: Michael Chin Date: Wed, 22 Jan 2025 21:57:28 -0800 Subject: [PATCH 07/13] Rewrite AmazonQ from LLM -> Runnable --- .../aws/langchain_aws/runnables/q_business.py | 94 ++++++++++--------- 1 file changed, 48 insertions(+), 46 deletions(-) diff --git a/libs/aws/langchain_aws/runnables/q_business.py b/libs/aws/langchain_aws/runnables/q_business.py index e28b95fd..947c46b3 100644 --- a/libs/aws/langchain_aws/runnables/q_business.py +++ b/libs/aws/langchain_aws/runnables/q_business.py @@ -1,18 +1,13 @@ -from typing import Any, Dict, List, Optional -from langchain_core.callbacks.manager import ( - AsyncCallbackManagerForLLMRun, - CallbackManagerForLLMRun -) +from typing import Any, Dict, Optional import logging -from langchain_core.language_models import LLM -from pydantic import ConfigDict, model_validator -import json +from langchain_core.runnables import Runnable +from pydantic import ConfigDict import asyncio -import boto3 from typing_extensions import Self -class AmazonQ(LLM): - """Amazon Q LLM wrapper. + +class AmazonQ(Runnable): + """Amazon Q Runnable wrapper. To authenticate, the AWS client uses the following methods to automatically load credentials: @@ -21,51 +16,58 @@ class AmazonQ(LLM): Make sure the credentials / roles used have the required policies to access the Amazon Q service. """ - + region_name: Optional[str] = None """AWS region name. If not provided, will be extracted from environment.""" credentials: Optional[Any] = None """Amazon Q credentials used to instantiate the client if the client is not provided.""" - - client: Any = None + + client: Optional[Any] = None """Amazon Q client.""" application_id: str = None - """Amazon Q client.""" _last_response: Dict = None # Add this to store the full response """Store the full response from Amazon Q.""" parent_message_id: Optional[str] = None - """AWS region name. If not provided, will be extracted from environment.""" conversation_id: Optional[str] = None - """AWS region name. If not provided, will be extracted from environment.""" chat_mode: str = "RETRIEVAL_MODE" - """AWS region name. If not provided, will be extracted from environment.""" model_config = ConfigDict( extra="forbid", ) - def __init__(self, **kwargs: Any) -> None: - """Initialize the Amazon Q client.""" - super().__init__(**kwargs) + def __init__( + self, + region_name: Optional[str] = None, + credentials: Optional[Any] = None, + client: Optional[Any] = None, + application_id: str = None, + parent_message_id: Optional[str] = None, + conversation_id: Optional[str] = None, + chat_mode: str = "RETRIEVAL_MODE", + ): + self.region_name = region_name + self.credentials = credentials + self.client = client + self.validate_environment() + self.application_id = application_id + self.parent_message_id = parent_message_id + self.conversation_id = conversation_id + self.chat_mode = chat_mode def invoke( self, - prompt: str, - stop: Optional[List[str]] = None, - run_manager: Optional[CallbackManagerForLLMRun] = None, - **kwargs: Any, + input: str, ) -> str: """Call out to Amazon Q service. Args: - prompt: The prompt to pass into the model. - stop: Optional list of stop words to use when generating. + input: The prompt to pass into the model. Returns: The string generated by the model. @@ -73,21 +75,25 @@ def invoke( Example: .. code-block:: python - response = llm.invoke("Tell me a joke.") + model = AmazonQ( + credentials=your_credentials, + application_id=your_app_id + ) + response = model.invoke("Tell me a joke") """ try: # Prepare the request request = { 'applicationId': self.application_id, - 'userMessage': prompt, - 'chatMode':self.chat_mode, + 'userMessage': input, + 'chatMode': self.chat_mode, } - if self.conversation_id: - request.update({ - 'conversationId': self.conversation_id, - 'parentMessageId': self.parent_message_id, + if self.conversation_id: + request.update({ + 'conversationId': self.conversation_id, + 'parentMessageId': self.parent_message_id, }) - + # Call Amazon Q response = self.client.chat_sync(**request) self._last_response = response @@ -100,9 +106,9 @@ def invoke( except Exception as e: if "Prompt Length" in str(e): - logging.info(f"Prompt Length: {len(prompt)}") + logging.info(f"Prompt Length: {len(input)}") print(f"""Prompt: - {prompt}""") + {input}""") raise ValueError(f"Error raised by Amazon Q service: {e}") def get_last_response(self) -> Dict: @@ -111,24 +117,20 @@ def get_last_response(self) -> Dict: async def ainvoke( self, - prompt: str, - stop: Optional[List[str]] = None, - run_manager: Optional[AsyncCallbackManagerForLLMRun] = None, - **kwargs: Any, + input: str, ) -> str: """Async call to Amazon Q service.""" - + def _execute_call(): - return self.invoke(prompt, stop=stop, **kwargs) + return self.invoke(input) # Run the synchronous call in a thread pool return await asyncio.get_running_loop().run_in_executor( None, _execute_call ) - @model_validator(mode="after") def validate_environment(self) -> Self: - """Dont do anything if client provided externally""" + """Don't do anything if client provided externally""" if self.client is not None: return self #If the client is not provided, and the user_id is not provided in the class constructor, throw an error saying one or the other needs to be provided @@ -160,4 +162,4 @@ def validate_environment(self) -> Self: "Could not import boto3 python package. " "Please install it with `pip install boto3`." ) - return self \ No newline at end of file + return self From 49e7b4ca81a43e395ebe618a30965327d3ed8e09 Mon Sep 17 00:00:00 2001 From: Michael Chin Date: Thu, 23 Jan 2025 13:34:22 -0800 Subject: [PATCH 08/13] Address linter warning --- libs/aws/langchain_aws/runnables/q_business.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/libs/aws/langchain_aws/runnables/q_business.py b/libs/aws/langchain_aws/runnables/q_business.py index 947c46b3..4565138e 100644 --- a/libs/aws/langchain_aws/runnables/q_business.py +++ b/libs/aws/langchain_aws/runnables/q_business.py @@ -1,8 +1,9 @@ -from typing import Any, Dict, Optional +import asyncio import logging +from typing import Any, Dict, Optional + from langchain_core.runnables import Runnable from pydantic import ConfigDict -import asyncio from typing_extensions import Self From 99ea5d9a8f279bc19eef2a6248fed9b1f4bbd922 Mon Sep 17 00:00:00 2001 From: Michael Chin Date: Thu, 23 Jan 2025 18:46:55 -0800 Subject: [PATCH 09/13] Apply suggestions from code review Co-authored-by: Piyush Jain --- libs/aws/langchain_aws/runnables/q_business.py | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/libs/aws/langchain_aws/runnables/q_business.py b/libs/aws/langchain_aws/runnables/q_business.py index 4565138e..0e579aa0 100644 --- a/libs/aws/langchain_aws/runnables/q_business.py +++ b/libs/aws/langchain_aws/runnables/q_business.py @@ -7,7 +7,10 @@ from typing_extensions import Self -class AmazonQ(Runnable): +from langchain_core._api.beta_decorator import beta + +@beta(message="This API is in beta and can change in future.") +class AmazonQ(Runnable[str, str]): """Amazon Q Runnable wrapper. To authenticate, the AWS client uses the following methods to @@ -54,8 +57,7 @@ def __init__( ): self.region_name = region_name self.credentials = credentials - self.client = client - self.validate_environment() + self.client = client or self.validate_environment() self.application_id = application_id self.parent_message_id = parent_message_id self.conversation_id = conversation_id @@ -132,8 +134,6 @@ def _execute_call(): def validate_environment(self) -> Self: """Don't do anything if client provided externally""" - if self.client is not None: - return self #If the client is not provided, and the user_id is not provided in the class constructor, throw an error saying one or the other needs to be provided if self.credentials is None: raise ValueError( @@ -146,10 +146,10 @@ def validate_environment(self) -> Self: try: if self.region_name is not None: - self.client = boto3.client('qbusiness', self.region_name, **self.credentials) + client = boto3.client('qbusiness', self.region_name, **self.credentials) else: # use default region - self.client = boto3.client('qbusiness', **self.credentials) + client = boto3.client('qbusiness', **self.credentials) except Exception as e: raise ValueError( @@ -163,4 +163,4 @@ def validate_environment(self) -> Self: "Could not import boto3 python package. " "Please install it with `pip install boto3`." ) - return self + return client From 2f0834cf1e5f0a9ebb9cb893e9f17f37e645340d Mon Sep 17 00:00:00 2001 From: Michael Chin Date: Thu, 23 Jan 2025 19:55:57 -0800 Subject: [PATCH 10/13] Remove ainvoke override, add RunnableConfig to invoke --- .../aws/langchain_aws/runnables/q_business.py | 21 ++++--------------- 1 file changed, 4 insertions(+), 17 deletions(-) diff --git a/libs/aws/langchain_aws/runnables/q_business.py b/libs/aws/langchain_aws/runnables/q_business.py index 0e579aa0..d1ad9e92 100644 --- a/libs/aws/langchain_aws/runnables/q_business.py +++ b/libs/aws/langchain_aws/runnables/q_business.py @@ -1,14 +1,13 @@ -import asyncio import logging from typing import Any, Dict, Optional +from langchain_core._api.beta_decorator import beta from langchain_core.runnables import Runnable +from langchain_core.runnables.config import RunnableConfig from pydantic import ConfigDict from typing_extensions import Self -from langchain_core._api.beta_decorator import beta - @beta(message="This API is in beta and can change in future.") class AmazonQ(Runnable[str, str]): """Amazon Q Runnable wrapper. @@ -66,6 +65,8 @@ def __init__( def invoke( self, input: str, + config: Optional[RunnableConfig] = None, + **kwargs: Any ) -> str: """Call out to Amazon Q service. @@ -118,20 +119,6 @@ def get_last_response(self) -> Dict: """Method to access the full response from the last call""" return self._last_response - async def ainvoke( - self, - input: str, - ) -> str: - """Async call to Amazon Q service.""" - - def _execute_call(): - return self.invoke(input) - - # Run the synchronous call in a thread pool - return await asyncio.get_running_loop().run_in_executor( - None, _execute_call - ) - def validate_environment(self) -> Self: """Don't do anything if client provided externally""" #If the client is not provided, and the user_id is not provided in the class constructor, throw an error saying one or the other needs to be provided From 802dfb6cc3fbf20c4651b9a6760a6995f278896e Mon Sep 17 00:00:00 2001 From: Tom Ron Date: Tue, 28 Jan 2025 22:22:40 +0000 Subject: [PATCH 11/13] Implemented logic to convert lang chain message array to formatted string for Amazon Q input --- libs/aws/langchain_aws/runnables/q_business.py | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/libs/aws/langchain_aws/runnables/q_business.py b/libs/aws/langchain_aws/runnables/q_business.py index d1ad9e92..d1acbcf6 100644 --- a/libs/aws/langchain_aws/runnables/q_business.py +++ b/libs/aws/langchain_aws/runnables/q_business.py @@ -64,7 +64,7 @@ def __init__( def invoke( self, - input: str, + input: Any, config: Optional[RunnableConfig] = None, **kwargs: Any ) -> str: @@ -89,7 +89,7 @@ def invoke( # Prepare the request request = { 'applicationId': self.application_id, - 'userMessage': input, + 'userMessage': self.convert_langchain_messages_to_q_input(input), # Langchain's input comes in the form of an array of "messages". We must convert to a single string for Amazon Q's use 'chatMode': self.chat_mode, } if self.conversation_id: @@ -151,3 +151,12 @@ def validate_environment(self) -> Self: "Please install it with `pip install boto3`." ) return client + def convert_langchain_messages_to_q_input(self, input: Any) -> str: + # Messages must be of type human', 'user', 'ai', 'assistant', or 'system + # Instead of logically formulating a message. We will allow langchain users to have their messages + # Added line by line the way they ordered them in the chain. We will prefix the content with the type, + # Hopefully this will inform Amazon Q how each message in the chain should be interpreted + messagesToStringArray = [] + for message in input.to_messages(): # Returns List[BaseMessage] + messagesToStringArray.append(message.type + ": " + message.content) + return "\n".join(messagesToStringArray) From f894ed054280f85cad57d387bca7bf4e9d106425 Mon Sep 17 00:00:00 2001 From: Tom Ron Date: Tue, 28 Jan 2025 22:50:53 +0000 Subject: [PATCH 12/13] Added logic to also accept plain string as the input in the event runnable is not invoked with chain. --- libs/aws/langchain_aws/runnables/q_business.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/libs/aws/langchain_aws/runnables/q_business.py b/libs/aws/langchain_aws/runnables/q_business.py index d1acbcf6..5baeee2f 100644 --- a/libs/aws/langchain_aws/runnables/q_business.py +++ b/libs/aws/langchain_aws/runnables/q_business.py @@ -152,6 +152,9 @@ def validate_environment(self) -> Self: ) return client def convert_langchain_messages_to_q_input(self, input: Any) -> str: + #If it is just a string and not a ChatPromptTemplate collection just return string + if type(input) is str: + return input # Messages must be of type human', 'user', 'ai', 'assistant', or 'system # Instead of logically formulating a message. We will allow langchain users to have their messages # Added line by line the way they ordered them in the chain. We will prefix the content with the type, From d8a01b0c9ecf7fd25f39f0fcd4ed9bcc94394277 Mon Sep 17 00:00:00 2001 From: Tom Ron Date: Tue, 4 Feb 2025 20:09:03 +0000 Subject: [PATCH 13/13] Implemented changes based on this comment https://github.com/langchain-ai/langchain-aws/pull/301#discussion_r1933118375 --- .../aws/langchain_aws/runnables/q_business.py | 21 +++++++------------ 1 file changed, 8 insertions(+), 13 deletions(-) diff --git a/libs/aws/langchain_aws/runnables/q_business.py b/libs/aws/langchain_aws/runnables/q_business.py index 5baeee2f..3b44d460 100644 --- a/libs/aws/langchain_aws/runnables/q_business.py +++ b/libs/aws/langchain_aws/runnables/q_business.py @@ -1,15 +1,16 @@ import logging -from typing import Any, Dict, Optional +from typing import Any, Dict, Optional, Union from langchain_core._api.beta_decorator import beta from langchain_core.runnables import Runnable from langchain_core.runnables.config import RunnableConfig from pydantic import ConfigDict from typing_extensions import Self +from langchain_core.prompt_values import ChatPromptValue @beta(message="This API is in beta and can change in future.") -class AmazonQ(Runnable[str, str]): +class AmazonQ(Runnable[Union[str,ChatPromptValue], str]): """Amazon Q Runnable wrapper. To authenticate, the AWS client uses the following methods to @@ -64,7 +65,7 @@ def __init__( def invoke( self, - input: Any, + input: Union[str,ChatPromptValue], config: Optional[RunnableConfig] = None, **kwargs: Any ) -> str: @@ -86,7 +87,7 @@ def invoke( response = model.invoke("Tell me a joke") """ try: - # Prepare the request + # Prepare the request request = { 'applicationId': self.application_id, 'userMessage': self.convert_langchain_messages_to_q_input(input), # Langchain's input comes in the form of an array of "messages". We must convert to a single string for Amazon Q's use @@ -151,15 +152,9 @@ def validate_environment(self) -> Self: "Please install it with `pip install boto3`." ) return client - def convert_langchain_messages_to_q_input(self, input: Any) -> str: + def convert_langchain_messages_to_q_input(self, input: Union[str,ChatPromptValue]) -> str: #If it is just a string and not a ChatPromptTemplate collection just return string if type(input) is str: return input - # Messages must be of type human', 'user', 'ai', 'assistant', or 'system - # Instead of logically formulating a message. We will allow langchain users to have their messages - # Added line by line the way they ordered them in the chain. We will prefix the content with the type, - # Hopefully this will inform Amazon Q how each message in the chain should be interpreted - messagesToStringArray = [] - for message in input.to_messages(): # Returns List[BaseMessage] - messagesToStringArray.append(message.type + ": " + message.content) - return "\n".join(messagesToStringArray) + return input.to_string() + \ No newline at end of file