From b1f2118764d37ba264ce1324cbb8371915e3c620 Mon Sep 17 00:00:00 2001 From: Collin Dutter Date: Wed, 5 Jun 2024 13:39:07 -0700 Subject: [PATCH] Removed `OpenAiChatCompletionPromptDriver` as it uses legacy APIs --- CHANGELOG.md | 1 + .../drivers/prompt-drivers.md | 32 ----- griptape/drivers/__init__.py | 4 - .../azure_openai_completion_prompt_driver.py | 41 ------- .../prompt/openai_completion_prompt_driver.py | 83 ------------- ...t_azure_openai_completion_prompt_driver.py | 75 ------------ .../test_openai_completion_prompt_driver.py | 112 ------------------ tests/utils/structure_tester.py | 11 -- 8 files changed, 1 insertion(+), 358 deletions(-) delete mode 100644 griptape/drivers/prompt/azure_openai_completion_prompt_driver.py delete mode 100644 griptape/drivers/prompt/openai_completion_prompt_driver.py delete mode 100644 tests/unit/drivers/prompt/test_azure_openai_completion_prompt_driver.py delete mode 100644 tests/unit/drivers/prompt/test_openai_completion_prompt_driver.py diff --git a/CHANGELOG.md b/CHANGELOG.md index 0756ef35d..93d7b88ce 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -26,6 +26,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - **BREAKING**: Removed `BedrockJurassicTokenizer`, use `SimpleTokenizer` instead. - **BREAKING**: Removed `BedrockLlamaTokenizer`, use `SimpleTokenizer` instead. - **BREAKING**: Removed `BedrockTitanTokenizer`, use `SimpleTokenizer` instead. +- **BREAKING**: Removed `OpenAiChatCompletionPromptDriver` as it uses the legacy [OpenAi Completions API](https://platform.openai.com/docs/api-reference/completions). - Updated `AmazonBedrockPromptDriver` to use [Converse API](https://docs.aws.amazon.com/bedrock/latest/userguide/conversation-inference.html). - `Structure.before_run()` now automatically resolves asymmetrically defined parent/child relationships using the new `Structure.resolve_relationships()`. - Updated `HuggingFaceHubPromptDriver` to use `transformers`'s `apply_chat_template`. diff --git a/docs/griptape-framework/drivers/prompt-drivers.md b/docs/griptape-framework/drivers/prompt-drivers.md index 859dcec03..befbbd13a 100644 --- a/docs/griptape-framework/drivers/prompt-drivers.md +++ b/docs/griptape-framework/drivers/prompt-drivers.md @@ -125,38 +125,6 @@ agent = Agent( agent.run("Artificial intelligence is a technology with great promise.") ``` -### Azure OpenAI Completion - -The [AzureOpenAiCompletionPromptDriver](../../reference/griptape/drivers/prompt/azure_openai_completion_prompt_driver.md) connects to Azure OpenAI [Text Completion](https://learn.microsoft.com/en-us/azure/cognitive-services/openai/reference) API. - -```python -import os -from griptape.structures import Agent -from griptape.drivers import AzureOpenAiCompletionPromptDriver -from griptape.config import StructureConfig - -agent = Agent( - config=StructureConfig( - prompt_driver=AzureOpenAiCompletionPromptDriver( - api_key=os.environ["AZURE_OPENAI_API_KEY_1"], - model="text-davinci-003", - azure_deployment=os.environ["AZURE_OPENAI_DAVINCI_DEPLOYMENT_ID"], - azure_endpoint=os.environ["AZURE_OPENAI_ENDPOINT_1"], - temperature=1 - ) - ) -) - -agent.run( - """ - Write a product launch email for new AI-powered headphones that are priced at $79.99 and available at Best Buy, Target and Amazon.com. The target audience is tech-savvy music lovers and the tone is friendly and exciting. - - 1. What should be the subject line of the email? - 2. What should be the body of the email? - """ -) -``` - ### Cohere The [CoherePromptDriver](../../reference/griptape/drivers/prompt/cohere_prompt_driver.md) connects to the Cohere [Generate](https://docs.cohere.ai/reference/generate) API. diff --git a/griptape/drivers/__init__.py b/griptape/drivers/__init__.py index 02a3882fb..f9f5302ea 100644 --- a/griptape/drivers/__init__.py +++ b/griptape/drivers/__init__.py @@ -1,8 +1,6 @@ from .prompt.base_prompt_driver import BasePromptDriver from .prompt.openai_chat_prompt_driver import OpenAiChatPromptDriver -from .prompt.openai_completion_prompt_driver import OpenAiCompletionPromptDriver from .prompt.azure_openai_chat_prompt_driver import AzureOpenAiChatPromptDriver -from .prompt.azure_openai_completion_prompt_driver import AzureOpenAiCompletionPromptDriver from .prompt.cohere_prompt_driver import CoherePromptDriver from .prompt.huggingface_pipeline_prompt_driver import HuggingFacePipelinePromptDriver from .prompt.huggingface_hub_prompt_driver import HuggingFaceHubPromptDriver @@ -112,9 +110,7 @@ __all__ = [ "BasePromptDriver", "OpenAiChatPromptDriver", - "OpenAiCompletionPromptDriver", "AzureOpenAiChatPromptDriver", - "AzureOpenAiCompletionPromptDriver", "CoherePromptDriver", "HuggingFacePipelinePromptDriver", "HuggingFaceHubPromptDriver", diff --git a/griptape/drivers/prompt/azure_openai_completion_prompt_driver.py b/griptape/drivers/prompt/azure_openai_completion_prompt_driver.py deleted file mode 100644 index 4ff2a4902..000000000 --- a/griptape/drivers/prompt/azure_openai_completion_prompt_driver.py +++ /dev/null @@ -1,41 +0,0 @@ -from typing import Callable, Optional -from attrs import define, field, Factory -from griptape.drivers import OpenAiCompletionPromptDriver -import openai - - -@define -class AzureOpenAiCompletionPromptDriver(OpenAiCompletionPromptDriver): - """ - Attributes: - azure_deployment: An optional Azure OpenAi deployment id. Defaults to the model name. - azure_endpoint: An Azure OpenAi endpoint. - azure_ad_token: An optional Azure Active Directory token. - azure_ad_token_provider: An optional Azure Active Directory token provider. - api_version: An Azure OpenAi API version. - client: An `openai.AzureOpenAI` client. - """ - - azure_deployment: str = field( - kw_only=True, default=Factory(lambda self: self.model, takes_self=True), metadata={"serializable": True} - ) - azure_endpoint: str = field(kw_only=True, metadata={"serializable": True}) - azure_ad_token: Optional[str] = field(kw_only=True, default=None, metadata={"serializable": False}) - azure_ad_token_provider: Optional[Callable[[], str]] = field( - kw_only=True, default=None, metadata={"serializable": False} - ) - api_version: str = field(default="2023-05-15", kw_only=True, metadata={"serializable": True}) - client: openai.AzureOpenAI = field( - default=Factory( - lambda self: openai.AzureOpenAI( - organization=self.organization, - api_key=self.api_key, - api_version=self.api_version, - azure_endpoint=self.azure_endpoint, - azure_deployment=self.azure_deployment, - azure_ad_token=self.azure_ad_token, - azure_ad_token_provider=self.azure_ad_token_provider, - ), - takes_self=True, - ) - ) diff --git a/griptape/drivers/prompt/openai_completion_prompt_driver.py b/griptape/drivers/prompt/openai_completion_prompt_driver.py deleted file mode 100644 index 1a738a487..000000000 --- a/griptape/drivers/prompt/openai_completion_prompt_driver.py +++ /dev/null @@ -1,83 +0,0 @@ -from typing import Optional -from collections.abc import Iterator -from attrs import define, field, Factory -from griptape.artifacts import TextArtifact -from griptape.utils import PromptStack -from griptape.drivers import BasePromptDriver -from griptape.tokenizers import OpenAiTokenizer -import openai - - -@define -class OpenAiCompletionPromptDriver(BasePromptDriver): - """ - Attributes: - base_url: An optional OpenAi API URL. - api_key: An optional OpenAi API key. If not provided, the `OPENAI_API_KEY` environment variable will be used. - organization: An optional OpenAI organization. If not provided, the `OPENAI_ORG_ID` environment variable will be used. - client: An `openai.OpenAI` client. - model: An OpenAI model name. - tokenizer: An `OpenAiTokenizer`. - user: A user id. Can be used to track requests by user. - ignored_exception_types: An optional tuple of exception types to ignore. Defaults to OpenAI's known exception types. - """ - - base_url: Optional[str] = field(default=None, kw_only=True, metadata={"serializable": True}) - api_key: Optional[str] = field(default=None, kw_only=True, metadata={"serializable": False}) - organization: Optional[str] = field(default=None, kw_only=True, metadata={"serializable": True}) - client: openai.OpenAI = field( - default=Factory( - lambda self: openai.OpenAI(api_key=self.api_key, base_url=self.base_url, organization=self.organization), - takes_self=True, - ) - ) - model: str = field(kw_only=True, metadata={"serializable": True}) - tokenizer: OpenAiTokenizer = field( - default=Factory(lambda self: OpenAiTokenizer(model=self.model), takes_self=True), kw_only=True - ) - user: str = field(default="", kw_only=True, metadata={"serializable": True}) - ignored_exception_types: tuple[type[Exception], ...] = field( - default=Factory( - lambda: ( - openai.BadRequestError, - openai.AuthenticationError, - openai.PermissionDeniedError, - openai.NotFoundError, - openai.ConflictError, - openai.UnprocessableEntityError, - ) - ), - kw_only=True, - ) - - def try_run(self, prompt_stack: PromptStack) -> TextArtifact: - result = self.client.completions.create(**self._base_params(prompt_stack)) - - if len(result.choices) == 1: - return TextArtifact(value=result.choices[0].text.strip()) - else: - raise Exception("completion with more than one choice is not supported yet") - - def try_stream(self, prompt_stack: PromptStack) -> Iterator[TextArtifact]: - result = self.client.completions.create(**self._base_params(prompt_stack), stream=True) - - for chunk in result: - if len(chunk.choices) == 1: - choice = chunk.choices[0] - delta_content = choice.text - yield TextArtifact(value=delta_content) - - else: - raise Exception("completion with more than one choice is not supported yet") - - def _base_params(self, prompt_stack: PromptStack) -> dict: - prompt = self.prompt_stack_to_string(prompt_stack) - - return { - "model": self.model, - "max_tokens": self.max_output_tokens(prompt), - "temperature": self.temperature, - "stop": self.tokenizer.stop_sequences, - "user": self.user, - "prompt": prompt, - } diff --git a/tests/unit/drivers/prompt/test_azure_openai_completion_prompt_driver.py b/tests/unit/drivers/prompt/test_azure_openai_completion_prompt_driver.py deleted file mode 100644 index 65758843a..000000000 --- a/tests/unit/drivers/prompt/test_azure_openai_completion_prompt_driver.py +++ /dev/null @@ -1,75 +0,0 @@ -import pytest -from unittest.mock import Mock -from griptape.drivers import AzureOpenAiCompletionPromptDriver -from tests.unit.drivers.prompt.test_openai_completion_prompt_driver import TestOpenAiCompletionPromptDriverFixtureMixin -from unittest.mock import ANY - - -class TestAzureOpenAiCompletionPromptDriver(TestOpenAiCompletionPromptDriverFixtureMixin): - @pytest.fixture - def mock_completion_create(self, mocker): - mock_chat_create = mocker.patch("openai.AzureOpenAI").return_value.completions.create - mock_choice = Mock() - mock_choice.text = "model-output" - mock_chat_create.return_value.choices = [mock_choice] - return mock_chat_create - - @pytest.fixture - def mock_completion_stream_create(self, mocker): - mock_chat_create = mocker.patch("openai.AzureOpenAI").return_value.completions.create - mock_chunk = Mock() - mock_choice = Mock() - mock_choice.text = "model-output" - mock_chunk.choices = [mock_choice] - mock_chat_create.return_value = iter([mock_chunk]) - return mock_chat_create - - def test_init(self): - assert AzureOpenAiCompletionPromptDriver( - azure_endpoint="endpoint", azure_deployment="deployment", model="text-davinci-003" - ) - assert ( - AzureOpenAiCompletionPromptDriver(azure_endpoint="endpoint", model="text-davinci-003").azure_deployment - == "text-davinci-003" - ) - - def test_try_run(self, mock_completion_create, prompt_stack, prompt): - # Given - driver = AzureOpenAiCompletionPromptDriver( - azure_endpoint="endpoint", azure_deployment="deployment", model="text-davinci-003" - ) - - # When - text_artifact = driver.try_run(prompt_stack) - - # Then - mock_completion_create.assert_called_once_with( - model=driver.model, - max_tokens=ANY, - temperature=driver.temperature, - stop=driver.tokenizer.stop_sequences, - user=driver.user, - prompt=prompt, - ) - assert text_artifact.value == "model-output" - - def test_try_stream_run(self, mock_completion_stream_create, prompt_stack, prompt): - # Given - driver = AzureOpenAiCompletionPromptDriver( - azure_endpoint="endpoint", azure_deployment="deployment", model="text-davinci-003", stream=True - ) - - # When - text_artifact = next(driver.try_stream(prompt_stack)) - - # Then - mock_completion_stream_create.assert_called_once_with( - model=driver.model, - max_tokens=ANY, - temperature=driver.temperature, - stop=driver.tokenizer.stop_sequences, - user=driver.user, - stream=True, - prompt=prompt, - ) - assert text_artifact.value == "model-output" diff --git a/tests/unit/drivers/prompt/test_openai_completion_prompt_driver.py b/tests/unit/drivers/prompt/test_openai_completion_prompt_driver.py deleted file mode 100644 index 66998c261..000000000 --- a/tests/unit/drivers/prompt/test_openai_completion_prompt_driver.py +++ /dev/null @@ -1,112 +0,0 @@ -from griptape.drivers import OpenAiCompletionPromptDriver -from griptape.utils import PromptStack -from unittest.mock import ANY, Mock -from griptape.tokenizers import OpenAiTokenizer -import pytest - - -class TestOpenAiCompletionPromptDriverFixtureMixin: - @pytest.fixture - def mock_completion_create(self, mocker): - mock_chat_create = mocker.patch("openai.OpenAI").return_value.completions.create - mock_choice = Mock() - mock_choice.text = "model-output" - mock_chat_create.return_value.choices = [mock_choice] - return mock_chat_create - - @pytest.fixture - def mock_completion_stream_create(self, mocker): - mock_chat_create = mocker.patch("openai.OpenAI").return_value.completions.create - mock_chunk = Mock() - mock_choice = Mock() - mock_choice.text = "model-output" - mock_chunk.choices = [mock_choice] - mock_chat_create.return_value = iter([mock_chunk]) - return mock_chat_create - - @pytest.fixture - def prompt_stack(self): - prompt_stack = PromptStack() - prompt_stack.add_generic_input("generic-input") - prompt_stack.add_system_input("system-input") - prompt_stack.add_user_input("user-input") - prompt_stack.add_assistant_input("assistant-input") - return prompt_stack - - @pytest.fixture - def prompt(self): - return "".join( - [ - "generic-input\n\n", - "system-input\n\n", - "User: user-input\n\n", - "Assistant: assistant-input\n\n", - "Assistant:", - ] - ) - - -class TestOpenAiCompletionPromptDriver(TestOpenAiCompletionPromptDriverFixtureMixin): - def test_init(self): - assert OpenAiCompletionPromptDriver(model=OpenAiTokenizer.DEFAULT_OPENAI_GPT_3_CHAT_MODEL) - - def test_try_run(self, mock_completion_create, prompt_stack, prompt): - # Given - driver = OpenAiCompletionPromptDriver(model=OpenAiTokenizer.DEFAULT_OPENAI_GPT_3_CHAT_MODEL) - - # When - text_artifact = driver.try_run(prompt_stack) - - # Then - mock_completion_create.assert_called_once_with( - model=driver.model, - max_tokens=ANY, - temperature=driver.temperature, - stop=driver.tokenizer.stop_sequences, - user=driver.user, - prompt=prompt, - ) - assert text_artifact.value == "model-output" - - def test_try_stream_run(self, mock_completion_stream_create, prompt_stack, prompt): - # Given - driver = OpenAiCompletionPromptDriver(model=OpenAiTokenizer.DEFAULT_OPENAI_GPT_3_CHAT_MODEL, stream=True) - - # When - text_artifact = next(driver.try_stream(prompt_stack)) - - # Then - mock_completion_stream_create.assert_called_once_with( - model=driver.model, - max_tokens=ANY, - temperature=driver.temperature, - stop=driver.tokenizer.stop_sequences, - user=driver.user, - stream=True, - prompt=prompt, - ) - assert text_artifact.value == "model-output" - - def test_try_run_throws_when_prompt_stack_is_string(self): - # Given - driver = OpenAiCompletionPromptDriver(model=OpenAiTokenizer.DEFAULT_OPENAI_GPT_3_CHAT_MODEL) - - # When - with pytest.raises(Exception) as e: - driver.try_run("prompt-stack") # pyright: ignore - - # Then - assert e.value.args[0] == "'str' object has no attribute 'inputs'" - - @pytest.mark.parametrize("choices", [[], [1, 2]]) - def test_try_run_throws_when_multiple_choices_returned(self, choices, mock_completion_create, prompt_stack): - # Given - driver = OpenAiCompletionPromptDriver(model=OpenAiTokenizer.DEFAULT_OPENAI_GPT_3_CHAT_MODEL) - mock_completion_create.return_value.choices = choices - - # When - with pytest.raises(Exception) as e: - driver.try_run(prompt_stack) - - # Then - e.value.args[0] == "Completion with more than one choice is not supported yet." diff --git a/tests/utils/structure_tester.py b/tests/utils/structure_tester.py index 592843279..d21d52c93 100644 --- a/tests/utils/structure_tester.py +++ b/tests/utils/structure_tester.py @@ -16,7 +16,6 @@ AnthropicPromptDriver, CoherePromptDriver, OpenAiChatPromptDriver, - OpenAiCompletionPromptDriver, AzureOpenAiChatPromptDriver, AmazonSageMakerPromptDriver, SageMakerLlamaPromptModelDriver, @@ -49,12 +48,6 @@ class TesterPromptDriverOption: prompt_driver=OpenAiChatPromptDriver(model="gpt-3.5-turbo-1106", api_key=os.environ["OPENAI_API_KEY"]), enabled=True, ), - "OPENAI_CHAT_35_TURBO_INSTRUCT": TesterPromptDriverOption( - prompt_driver=OpenAiCompletionPromptDriver( - model="gpt-3.5-turbo-instruct", api_key=os.environ["OPENAI_API_KEY"] - ), - enabled=True, - ), "OPENAI_CHAT_4": TesterPromptDriverOption( prompt_driver=OpenAiChatPromptDriver(model="gpt-4", api_key=os.environ["OPENAI_API_KEY"]), enabled=True ), @@ -65,10 +58,6 @@ class TesterPromptDriverOption: prompt_driver=OpenAiChatPromptDriver(model="gpt-4-1106-preview", api_key=os.environ["OPENAI_API_KEY"]), enabled=True, ), - "OPENAI_COMPLETION_DAVINCI": TesterPromptDriverOption( - prompt_driver=OpenAiCompletionPromptDriver(api_key=os.environ["OPENAI_API_KEY"], model="text-davinci-003"), - enabled=True, - ), "AZURE_CHAT_35_TURBO": TesterPromptDriverOption( prompt_driver=AzureOpenAiChatPromptDriver( api_key=os.environ["AZURE_OPENAI_API_KEY_1"],