diff --git a/docs/griptape-framework/structures/src/task_memory_6.py b/docs/griptape-framework/structures/src/task_memory_6.py index 371b3c821..006bf8769 100644 --- a/docs/griptape-framework/structures/src/task_memory_6.py +++ b/docs/griptape-framework/structures/src/task_memory_6.py @@ -1,13 +1,11 @@ -from griptape.artifacts import TextArtifact from griptape.configs import Defaults from griptape.configs.drivers import OpenAiDriversConfig from griptape.drivers import ( + AmazonBedrockPromptDriver, LocalVectorStoreDriver, OpenAiChatPromptDriver, OpenAiEmbeddingDriver, ) -from griptape.memory import TaskMemory -from griptape.memory.task.storage import TextArtifactStorage from griptape.structures import Agent from griptape.tools import FileManagerTool, QueryTool, WebScraperTool @@ -15,23 +13,15 @@ prompt_driver=OpenAiChatPromptDriver(model="gpt-4"), ) -Defaults.drivers_config = OpenAiDriversConfig( - prompt_driver=OpenAiChatPromptDriver(model="gpt-4"), -) - vector_store_driver = LocalVectorStoreDriver(embedding_driver=OpenAiEmbeddingDriver()) agent = Agent( - task_memory=TaskMemory( - artifact_storages={ - TextArtifact: TextArtifactStorage( - vector_store_driver=vector_store_driver, - ) - } - ), tools=[ WebScraperTool(off_prompt=True), - QueryTool(off_prompt=True), + QueryTool( + off_prompt=True, + prompt_driver=AmazonBedrockPromptDriver(model="anthropic.claude-3-haiku-20240307-v1:0"), + ), FileManagerTool(off_prompt=True), ], ) diff --git a/docs/griptape-framework/structures/task-memory.md b/docs/griptape-framework/structures/task-memory.md index 81334b1cb..a3fc04dc5 100644 --- a/docs/griptape-framework/structures/task-memory.md +++ b/docs/griptape-framework/structures/task-memory.md @@ -198,8 +198,8 @@ And now we get the expected output: Because Task Memory splits up the storage and retrieval of data, you can use different models for each step. -Here is an example where we use GPT-4 to orchestrate the Tools and store the data in Task Memory, and Amazon Bedrock's Titan model to query the raw content. -In this example, GPT-4 _never_ sees the contents of the page, only that it was stored in Task Memory. Even the query results generated by the Titan model are stored in Task Memory so that the `FileManagerTool` can save the results to disk without GPT-4 ever seeing them. +Here is an example where we use GPT-4 to orchestrate the Tools and store the data in Task Memory, and Anthropic's Claude 3 Haiku model to query the raw content. +In this example, GPT-4 _never_ sees the contents of the page, only that it was stored in Task Memory. Even the query results generated by the Haiku model are stored in Task Memory so that the `FileManagerTool` can save the results to disk without GPT-4 ever seeing them. ```python --8<-- "docs/griptape-framework/structures/src/task_memory_6.py" diff --git a/griptape/tools/query/tool.py b/griptape/tools/query/tool.py index 3bc954239..0089970e9 100644 --- a/griptape/tools/query/tool.py +++ b/griptape/tools/query/tool.py @@ -1,5 +1,7 @@ from __future__ import annotations +from typing import TYPE_CHECKING + from attrs import Factory, define, field from schema import Literal, Or, Schema @@ -15,19 +17,22 @@ from griptape.tools.base_tool import BaseTool from griptape.utils.decorators import activity +if TYPE_CHECKING: + from griptape.drivers.prompt.base_prompt_driver import BasePromptDriver + @define(kw_only=True) class QueryTool(BaseTool, RuleMixin): """Tool for performing a query against data.""" + prompt_driver: BasePromptDriver = field(default=Factory(lambda: Defaults.drivers_config.prompt_driver)) + _rag_engine: RagEngine = field( default=Factory( lambda self: RagEngine( response_stage=ResponseRagStage( response_modules=[ - PromptResponseRagModule( - prompt_driver=Defaults.drivers_config.prompt_driver, rulesets=self.rulesets - ) + PromptResponseRagModule(prompt_driver=self.prompt_driver, rulesets=self.rulesets) ], ), ),