diff --git a/CHANGELOG.md b/CHANGELOG.md index bfc0b2f00..a17730852 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -22,6 +22,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - Rulesets can now be serialized and deserialized. - `ToolkitTask` now serializes its `tools` field. - `PromptTask.prompt_driver` is now serialized. +- `PromptTask` can now do everything a `ToolkitTask` can do. ### Fixed @@ -32,6 +33,10 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - Parsing `ActionCallDeltaMessageContent`s with empty string `partial_input`s. - `Stream` util not properly propagating thread contextvars. +### Deprecated + +- `ToolkitTask`. `PromptTask` is a drop-in replacement. + ## [1.0.0] - 2024-12-09 ### Added diff --git a/README.md b/README.md index e00337e65..766a49d60 100644 --- a/README.md +++ b/README.md @@ -111,7 +111,7 @@ agent.run("https://griptape.ai", "griptape.txt") And here is the output: ``` -[08/12/24 14:48:15] INFO ToolkitTask c90d263ec69046e8b30323c131ae4ba0 +[08/12/24 14:48:15] INFO PromptTask c90d263ec69046e8b30323c131ae4ba0 Input: Load https://griptape.ai, summarize it, and store it in a file called griptape.txt. [08/12/24 14:48:16] INFO Subtask ebe23832cbe2464fb9ecde9fcee7c30f Actions: [ @@ -166,7 +166,7 @@ And here is the output: ] INFO Subtask c233853450fb4fd6a3e9c04c52b33bf6 Response: Successfully saved memory artifacts to disk -[08/12/24 14:48:23] INFO ToolkitTask c90d263ec69046e8b30323c131ae4ba0 +[08/12/24 14:48:23] INFO PromptTask c90d263ec69046e8b30323c131ae4ba0 Output: The content from https://griptape.ai has been summarized and stored in a file called `griptape.txt`. ``` @@ -177,8 +177,8 @@ The important thing to note here is that no matter how big the webpage is it can In the above example, we set [off_prompt](https://docs.griptape.ai/stable/griptape-framework/structures/task-memory.md#off-prompt) to `True`, which means that the LLM can never see the data it manipulates, but can send it to other Tools. > [!IMPORTANT] -> This example uses Griptape's [ToolkitTask](https://docs.griptape.ai/stable/griptape-framework/structures/tasks/#toolkit-task), which requires a highly capable LLM to function correctly. By default, Griptape uses the [OpenAiChatPromptDriver](https://docs.griptape.ai/stable/griptape-framework/drivers/prompt-drivers/#openai-chat); for another powerful LLM try swapping to the [AnthropicPromptDriver](https://docs.griptape.ai/stable/griptape-framework/drivers/prompt-drivers/#anthropic)! -> If you're using a less powerful LLM, consider using the [ToolTask](https://docs.griptape.ai/stable/griptape-framework/structures/tasks/#tool-task) instead, as the `ToolkitTask` might not work properly or at all. +> This example uses Griptape's [PromptTask](https://docs.griptape.ai/stable/griptape-framework/structures/tasks/#prompt-task) with `tools`, which requires a highly capable LLM to function correctly. By default, Griptape uses the [OpenAiChatPromptDriver](https://docs.griptape.ai/stable/griptape-framework/drivers/prompt-drivers/#openai-chat); for another powerful LLM try swapping to the [AnthropicPromptDriver](https://docs.griptape.ai/stable/griptape-framework/drivers/prompt-drivers/#anthropic)! +> If you're using a less powerful LLM, consider using the [ToolTask](https://docs.griptape.ai/stable/griptape-framework/structures/tasks/#tool-task) instead, as the `PromptTask` with `tools` might not work properly or at all. [Check out our docs](https://docs.griptape.ai/stable/griptape-framework/drivers/prompt-drivers/) to learn more about how to use Griptape with other LLM providers like Anthropic, Claude, Hugging Face, and Azure. diff --git a/docs/griptape-framework/drivers/web-search-drivers.md b/docs/griptape-framework/drivers/web-search-drivers.md index 6e950dba0..a0a74618b 100644 --- a/docs/griptape-framework/drivers/web-search-drivers.md +++ b/docs/griptape-framework/drivers/web-search-drivers.md @@ -16,7 +16,7 @@ You can use Web Search Drivers with [Structures](../structures/agents.md): ``` ``` -ToolkitTask 45a53f1024494baab41a1f10a67017b1 +PromptTask 45a53f1024494baab41a1f10a67017b1 Output: Here are some websites with information about AI frameworks: diff --git a/docs/griptape-framework/index.md b/docs/griptape-framework/index.md index de6206d22..39cfb8996 100644 --- a/docs/griptape-framework/index.md +++ b/docs/griptape-framework/index.md @@ -106,7 +106,7 @@ Agents on their own are fun, but let's add some capabilities to them using Gript Here is the chain of thought from the Agent. Notice where it realizes it can use the tool you just injected to do the calculation.[^1] ``` -[07/23/24 10:47:38] INFO ToolkitTask 6a51060d1fb74e57840a91aa319f26dc +[07/23/24 10:47:38] INFO PromptTask 6a51060d1fb74e57840a91aa319f26dc Input: what is 7^12 [07/23/24 10:47:39] INFO Subtask 0c984616fd2345a7b48a0b0d692daa3c Actions: [ @@ -123,7 +123,7 @@ Here is the chain of thought from the Agent. Notice where it realizes it can use ] INFO Subtask 0c984616fd2345a7b48a0b0d692daa3c Response: 13841287201 -[07/23/24 10:47:40] INFO ToolkitTask 6a51060d1fb74e57840a91aa319f26dc +[07/23/24 10:47:40] INFO PromptTask 6a51060d1fb74e57840a91aa319f26dc Output: 13,841,287,201 Answer: 13,841,287,201 ``` @@ -137,7 +137,7 @@ Agents are great for getting started, but they are intentionally limited to a si ``` ``` -[08/12/24 14:50:28] INFO ToolkitTask 19dcf6020968468a91aa8a93c2a3f645 +[08/12/24 14:50:28] INFO PromptTask 19dcf6020968468a91aa8a93c2a3f645 Input: Load https://www.griptape.ai, summarize it, and store it in griptape.txt [08/12/24 14:50:30] INFO Subtask a685799379c5421b91768353fc219939 Actions: [ @@ -202,7 +202,7 @@ Agents are great for getting started, but they are intentionally limited to a si ] INFO Subtask aaaeca1a089844d4915d065deb3c00cf Response: Successfully saved file -[08/12/24 14:50:39] INFO ToolkitTask 19dcf6020968468a91aa8a93c2a3f645 +[08/12/24 14:50:39] INFO PromptTask 19dcf6020968468a91aa8a93c2a3f645 Output: The content from https://www.griptape.ai has been summarized and stored in griptape.txt. INFO PromptTask dbbb38f144f445db896dc12854f17ad3 Input: Say the following in spanish: The content from https://www.griptape.ai has been summarized and stored in griptape.txt. diff --git a/docs/griptape-framework/misc/src/events_3.py b/docs/griptape-framework/misc/src/events_3.py index beacf814a..a17130f94 100644 --- a/docs/griptape-framework/misc/src/events_3.py +++ b/docs/griptape-framework/misc/src/events_3.py @@ -1,7 +1,7 @@ from griptape.drivers import OpenAiChatPromptDriver from griptape.events import BaseChunkEvent, EventBus, EventListener from griptape.structures import Pipeline -from griptape.tasks import ToolkitTask +from griptape.tasks import PromptTask from griptape.tools import PromptSummaryTool, WebScraperTool EventBus.add_event_listeners( @@ -15,7 +15,7 @@ pipeline = Pipeline() pipeline.add_tasks( - ToolkitTask( + PromptTask( "Based on https://griptape.ai, tell me what griptape is.", prompt_driver=OpenAiChatPromptDriver(model="gpt-4o", stream=True), tools=[WebScraperTool(off_prompt=True), PromptSummaryTool(off_prompt=False)], diff --git a/docs/griptape-framework/misc/src/events_chunk_stream.py b/docs/griptape-framework/misc/src/events_chunk_stream.py index 3ab5517f4..087c19f84 100644 --- a/docs/griptape-framework/misc/src/events_chunk_stream.py +++ b/docs/griptape-framework/misc/src/events_chunk_stream.py @@ -1,7 +1,7 @@ from griptape.drivers import OpenAiChatPromptDriver from griptape.events import ActionChunkEvent, EventBus, EventListener, TextChunkEvent from griptape.structures import Pipeline -from griptape.tasks import ToolkitTask +from griptape.tasks import PromptTask from griptape.tools import PromptSummaryTool, WebScraperTool EventBus.add_event_listeners( @@ -19,7 +19,7 @@ pipeline = Pipeline() pipeline.add_tasks( - ToolkitTask( + PromptTask( "Based on https://griptape.ai, tell me what griptape is.", prompt_driver=OpenAiChatPromptDriver(model="gpt-4o", stream=True), tools=[WebScraperTool(off_prompt=True), PromptSummaryTool(off_prompt=False)], diff --git a/docs/griptape-framework/src/index_4.py b/docs/griptape-framework/src/index_4.py index 50465f99e..7facd0e18 100644 --- a/docs/griptape-framework/src/index_4.py +++ b/docs/griptape-framework/src/index_4.py @@ -1,6 +1,6 @@ from griptape.memory.structure import ConversationMemory from griptape.structures import Pipeline -from griptape.tasks import PromptTask, ToolkitTask +from griptape.tasks import PromptTask from griptape.tools import FileManagerTool, PromptSummaryTool, WebScraperTool # Pipelines represent sequences of tasks. @@ -8,7 +8,7 @@ pipeline.add_tasks( # Load up the first argument from `pipeline.run`. - ToolkitTask( + PromptTask( "{{ args[0] }}", # Add tools for web scraping, and file management tools=[WebScraperTool(off_prompt=True), FileManagerTool(off_prompt=True), PromptSummaryTool(off_prompt=False)], diff --git a/docs/griptape-framework/structures/agents.md b/docs/griptape-framework/structures/agents.md index a337a476e..147e64be0 100644 --- a/docs/griptape-framework/structures/agents.md +++ b/docs/griptape-framework/structures/agents.md @@ -7,21 +7,33 @@ search: An [Agent](../../reference/griptape/structures/agent.md) is the quickest way to get started with Griptape. Agents take in [tools](../../reference/griptape/structures/agent.md#griptape.structures.agent.Agent.tools) and [input](../../reference/griptape/structures/agent.md#griptape.structures.agent.Agent.input) -directly, which the agent uses to dynamically determine whether to use a [Prompt Task](./tasks.md#prompt-task) or [Toolkit Task](./tasks.md#toolkit-task). - -If [tools](../../reference/griptape/structures/agent.md#griptape.structures.agent.Agent.tools) are passed provided to the Agent, a [Toolkit Task](./tasks.md#toolkit-task) will be used. If no [tools](../../reference/griptape/structures/agent.md#griptape.structures.agent.Agent.tools) -are provided, a [Prompt Task](./tasks.md#prompt-task) will be used. +directly, which the agent uses to add a [Prompt Task](./tasks.md#prompt-task). You can access the final output of the Agent by using the [output](../../reference/griptape/structures/structure.md#griptape.structures.structure.Structure.output) attribute. -## Toolkit Task Agent +### Agent Input + +```python +--8<-- "docs/griptape-framework/structures/src/agents_2.py" +``` + +``` +[09/08/23 10:10:24] INFO PromptTask e70fb08090b24b91a9307fa83479e851 + Input: Write me a haiku about Skateboards and Programming +[09/08/23 10:10:28] INFO PromptTask e70fb08090b24b91a9307fa83479e851 + Output: Code on wheels in flight, + Skateboards meet algorithms bright, + In binary, we ignite. +``` + +### Agent Tools ```python --8<-- "docs/griptape-framework/structures/src/agents_1.py" ``` ``` -[07/23/24 10:53:41] INFO ToolkitTask 487db777bc014193ba90b061451b69a6 +[07/23/24 10:53:41] INFO PromptTask 487db777bc014193ba90b061451b69a6 Input: Calculate the following: what's 13^7? [07/23/24 10:53:42] INFO Subtask 126cefa3ac5347b88495e25af52f3268 Actions: [ @@ -38,22 +50,7 @@ You can access the final output of the Agent by using the [output](../../referen ] INFO Subtask 126cefa3ac5347b88495e25af52f3268 Response: 62748517 -[07/23/24 10:53:43] INFO ToolkitTask 487db777bc014193ba90b061451b69a6 +[07/23/24 10:53:43] INFO PromptTask 487db777bc014193ba90b061451b69a6 Output: 62,748,517 Answer: 62,748,517 ``` - -## Prompt Task Agent - -```python ---8<-- "docs/griptape-framework/structures/src/agents_2.py" -``` - -``` -[09/08/23 10:10:24] INFO PromptTask e70fb08090b24b91a9307fa83479e851 - Input: Write me a haiku about Skateboards and Programming -[09/08/23 10:10:28] INFO PromptTask e70fb08090b24b91a9307fa83479e851 - Output: Code on wheels in flight, - Skateboards meet algorithms bright, - In binary, we ignite. -``` diff --git a/docs/griptape-framework/structures/src/agents_1.py b/docs/griptape-framework/structures/src/agents_1.py index 9ce4aec22..decec439f 100644 --- a/docs/griptape-framework/structures/src/agents_1.py +++ b/docs/griptape-framework/structures/src/agents_1.py @@ -1,7 +1,7 @@ from griptape.structures import Agent -from griptape.tools import CalculatorTool -agent = Agent(input="Calculate the following: {{ args[0] }}", tools=[CalculatorTool()]) +agent = Agent( + input="Write me a {{ args[0] }} about {{ args[1] }} and {{ args[2] }}", +) -agent.run("what's 13^7?") -print("Answer:", agent.output) +agent.run("Haiku", "Skateboards", "Programming") diff --git a/docs/griptape-framework/structures/src/agents_2.py b/docs/griptape-framework/structures/src/agents_2.py index 84d431c1e..9ce4aec22 100644 --- a/docs/griptape-framework/structures/src/agents_2.py +++ b/docs/griptape-framework/structures/src/agents_2.py @@ -1,11 +1,7 @@ from griptape.structures import Agent -from griptape.tasks import PromptTask +from griptape.tools import CalculatorTool -agent = Agent() -agent.add_task( - PromptTask( - "Write me a {{ creative_medium }} about {{ args[0] }} and {{ args[1] }}", context={"creative_medium": "haiku"} - ) -) +agent = Agent(input="Calculate the following: {{ args[0] }}", tools=[CalculatorTool()]) -agent.run("Skateboards", "Programming") +agent.run("what's 13^7?") +print("Answer:", agent.output) diff --git a/docs/griptape-framework/structures/src/tasks_4.py b/docs/griptape-framework/structures/src/tasks_4.py index cd73b3ada..adc495597 100644 --- a/docs/griptape-framework/structures/src/tasks_4.py +++ b/docs/griptape-framework/structures/src/tasks_4.py @@ -1,10 +1,10 @@ from griptape.structures import Agent -from griptape.tasks import ToolkitTask +from griptape.tasks import PromptTask from griptape.tools import FileManagerTool, PromptSummaryTool, WebScraperTool agent = Agent() agent.add_task( - ToolkitTask( + PromptTask( "Load https://www.griptape.ai, summarize it, and store it in a file called griptape.txt", tools=[WebScraperTool(off_prompt=True), FileManagerTool(off_prompt=True), PromptSummaryTool(off_prompt=True)], ), diff --git a/docs/griptape-framework/structures/task-memory.md b/docs/griptape-framework/structures/task-memory.md index 691dba6e4..673e67dd8 100644 --- a/docs/griptape-framework/structures/task-memory.md +++ b/docs/griptape-framework/structures/task-memory.md @@ -26,14 +26,14 @@ Lets look at a simple example where `off_prompt` is set to `False`: ``` ``` -[04/26/24 13:06:42] INFO ToolkitTask 36b9dea13b9d479fb752014f41dca54c +[04/26/24 13:06:42] INFO PromptTask 36b9dea13b9d479fb752014f41dca54c Input: What is the square root of 12345? [04/26/24 13:06:48] INFO Subtask a88c0feeaef6493796a9148ed68c9caf Thought: To find the square root of 12345, I can use the CalculatorTool action with the expression "12345 ** 0.5". Actions: [{"name": "CalculatorTool", "path": "calculate", "input": {"values": {"expression": "12345 ** 0.5"}}, "tag": "sqrt_12345"}] INFO Subtask a88c0feeaef6493796a9148ed68c9caf Response: 111.1080555135405 -[04/26/24 13:06:49] INFO ToolkitTask 36b9dea13b9d479fb752014f41dca54c +[04/26/24 13:06:49] INFO PromptTask 36b9dea13b9d479fb752014f41dca54c Output: The square root of 12345 is approximately 111.108. ``` @@ -46,7 +46,7 @@ Let's explore what happens when `off_prompt` is set to `True`: ``` ``` -[04/26/24 13:07:02] INFO ToolkitTask ecbb788d9830491ab72a8a2bbef5fb0a +[04/26/24 13:07:02] INFO PromptTask ecbb788d9830491ab72a8a2bbef5fb0a Input: What is the square root of 12345? [04/26/24 13:07:10] INFO Subtask 4700dc0c2e934d1a9af60a28bd770bc6 Thought: To find the square root of a number, we can use the CalculatorTool action with the expression "sqrt(12345)". However, the CalculatorTool @@ -85,7 +85,7 @@ If we had kept it as `True`, the results would have been stored back Task Memory ``` ``` -[08/12/24 14:54:04] INFO ToolkitTask f7ebd8acc3d64e3ca9db82ef9ec4e65f +[08/12/24 14:54:04] INFO PromptTask f7ebd8acc3d64e3ca9db82ef9ec4e65f Input: What is the square root of 12345? [08/12/24 14:54:05] INFO Subtask 777693d039e74ed288f663742fdde2ea Actions: [ @@ -121,7 +121,7 @@ If we had kept it as `True`, the results would have been stored back Task Memory ] [08/12/24 14:54:07] INFO Subtask c8394ca51f1f4ae1b715618a2c5c8120 Response: The text contains a single numerical value: 111.1080555135405. -[08/12/24 14:54:08] INFO ToolkitTask f7ebd8acc3d64e3ca9db82ef9ec4e65f +[08/12/24 14:54:08] INFO PromptTask f7ebd8acc3d64e3ca9db82ef9ec4e65f Output: The square root of 12345 is approximately 111.108. ``` @@ -139,7 +139,7 @@ Let's say we want to query the contents of a very large webpage. When running this example, we get the following error: ``` -[04/26/24 13:20:02] ERROR ToolkitTask 67e2f907f95d4850ae79f9da67df54c1 +[04/26/24 13:20:02] ERROR PromptTask 67e2f907f95d4850ae79f9da67df54c1 Error code: 400 - {'error': {'message': "This model's maximum context length is 8192 tokens. However, your messages resulted in 73874 tokens. Please reduce the length of the messages.", 'type': 'invalid_request_error', 'param': 'messages', 'code': 'context_length_exceeded'}} ``` @@ -154,7 +154,7 @@ Note that we're setting `off_prompt` to `False` on the `QueryTool` so that the _ And now we get the expected output: ``` -[08/12/24 14:56:18] INFO ToolkitTask d3ce58587dc944b0a30a205631b82944 +[08/12/24 14:56:18] INFO PromptTask d3ce58587dc944b0a30a205631b82944 Input: According to this page https://en.wikipedia.org/wiki/Elden_Ring, how many copies of Elden Ring have been sold? [08/12/24 14:56:20] INFO Subtask 494850ec40fe474c83d48b5620c5dcbb Actions: [ @@ -192,7 +192,7 @@ And now we get the expected output: [08/12/24 14:56:29] INFO Subtask 8669ee523bb64550850566011bcd14e2 Response: "Elden Ring" sold 13.4 million copies worldwide by the end of March 2022 and 25 million by June 2024. The downloadable content (DLC) "Shadow of the Erdtree" sold five million copies within three days of its release. -[08/12/24 14:56:30] INFO ToolkitTask d3ce58587dc944b0a30a205631b82944 +[08/12/24 14:56:30] INFO PromptTask d3ce58587dc944b0a30a205631b82944 Output: Elden Ring sold 13.4 million copies worldwide by the end of March 2022 and 25 million by June 2024. ``` @@ -208,7 +208,7 @@ In this example, GPT-4 _never_ sees the contents of the page, only that it was s ``` ``` -[08/12/24 14:55:21] INFO ToolkitTask 329b1abc760e4d30bbf23e349451d930 +[08/12/24 14:55:21] INFO PromptTask 329b1abc760e4d30bbf23e349451d930 Input: Use this page https://en.wikipedia.org/wiki/Elden_Ring to find how many copies of Elden Ring have been sold, and then save the result to a file. [08/12/24 14:55:23] INFO Subtask 26205b5623174424b618abafd886c4d8 @@ -265,7 +265,7 @@ In this example, GPT-4 _never_ sees the contents of the page, only that it was s ] INFO Subtask 7aafcb3fb0d845858e2fcf9b8dc8a7ec Response: Successfully saved memory artifacts to disk -[08/12/24 14:55:40] INFO ToolkitTask 329b1abc760e4d30bbf23e349451d930 +[08/12/24 14:55:40] INFO PromptTask 329b1abc760e4d30bbf23e349451d930 Output: Successfully saved the number of copies sold of Elden Ring to a file named "elden_ring_sales.txt" in the "results" directory. ``` diff --git a/docs/griptape-framework/structures/tasks.md b/docs/griptape-framework/structures/tasks.md index 8504d5419..5cdced709 100644 --- a/docs/griptape-framework/structures/tasks.md +++ b/docs/griptape-framework/structures/tasks.md @@ -78,7 +78,7 @@ These hooks can be used to perform actions before and after the Task is run. For ## Prompt Task -For general purpose prompting, use the [PromptTask](../../reference/griptape/tasks/prompt_task.md): +For general-purpose interaction with LLMs, use the [PromptTask](../../reference/griptape/tasks/prompt_task.md): ```python --8<-- "docs/griptape-framework/structures/src/tasks_2.py" @@ -94,35 +94,16 @@ For general purpose prompting, use the [PromptTask](../../reference/griptape/tas Day begins anew. ``` -If the model supports it, you can also pass image inputs: - -```python ---8<-- "docs/griptape-framework/structures/src/tasks_3.py" -``` +### Tools -``` -[06/21/24 10:01:08] INFO PromptTask c229d1792da34ab1a7c45768270aada9 - Input: What's in this image? - - Media, type: image/jpeg, size: 82351 bytes -[06/21/24 10:01:12] INFO PromptTask c229d1792da34ab1a7c45768270aada9 - Output: The image depicts a stunning mountain landscape at sunrise or sunset. The sun is partially visible on the left side of the image, - casting a warm golden light over the scene. The mountains are covered with snow at their peaks, and a layer of clouds or fog is settled in the - valleys between them. The sky is a mix of warm colors near the horizon, transitioning to cooler blues higher up, with some scattered clouds - adding texture to the sky. The overall scene is serene and majestic, highlighting the natural beauty of the mountainous terrain. -``` - -## Toolkit Task - -To use [Griptape Tools](../../griptape-framework/tools/index.md), use a [Toolkit Task](../../reference/griptape/tasks/toolkit_task.md). -This Task takes in one or more Tools which the LLM will decide to use through Chain of Thought (CoT) reasoning. Because this Task uses CoT, it is recommended to only use with very capable models. +You can pass in one or more Tools which the LLM will decide to use through Chain of Thought (CoT) reasoning. Because tool execution uses CoT, it is recommended to only use with very capable models. ```python --8<-- "docs/griptape-framework/structures/src/tasks_4.py" ``` ``` -[08/12/24 15:16:30] INFO ToolkitTask f5b44fe1dadc4e6688053df71d97e0de +[08/12/24 15:16:30] INFO PromptTask f5b44fe1dadc4e6688053df71d97e0de Input: Load https://www.griptape.ai, summarize it, and store it in a file called griptape.txt [08/12/24 15:16:32] INFO Subtask a4483eddfbe84129b0f4c04ef0f5d695 Actions: [ @@ -177,10 +158,30 @@ This Task takes in one or more Tools which the LLM will decide to use through Ch ] INFO Subtask d9b2dd9f96d841f49f5d460e33905183 Response: Successfully saved memory artifacts to disk -[08/12/24 15:16:39] INFO ToolkitTask f5b44fe1dadc4e6688053df71d97e0de +[08/12/24 15:16:39] INFO PromptTask f5b44fe1dadc4e6688053df71d97e0de Output: The content from https://www.griptape.ai has been summarized and stored in a file called `griptape.txt`. ``` +### Images + +If the model supports it, you can also pass image inputs: + +```python +--8<-- "docs/griptape-framework/structures/src/tasks_3.py" +``` + +``` +[06/21/24 10:01:08] INFO PromptTask c229d1792da34ab1a7c45768270aada9 + Input: What's in this image? + + Media, type: image/jpeg, size: 82351 bytes +[06/21/24 10:01:12] INFO PromptTask c229d1792da34ab1a7c45768270aada9 + Output: The image depicts a stunning mountain landscape at sunrise or sunset. The sun is partially visible on the left side of the image, + casting a warm golden light over the scene. The mountains are covered with snow at their peaks, and a layer of clouds or fog is settled in the + valleys between them. The sky is a mix of warm colors near the horizon, transitioning to cooler blues higher up, with some scattered clouds + adding texture to the sky. The overall scene is serene and majestic, highlighting the natural beauty of the mountainous terrain. +``` + ## Tool Task Another way to use [Griptape Tools](../../griptape-framework/tools/index.md), is with a [Tool Task](../../reference/griptape/tasks/tool_task.md). diff --git a/docs/griptape-framework/tools/index.md b/docs/griptape-framework/tools/index.md index 4f7d06408..f75aaa929 100644 --- a/docs/griptape-framework/tools/index.md +++ b/docs/griptape-framework/tools/index.md @@ -20,7 +20,7 @@ Here is an example of a Pipeline using Tools: ``` ``` -[08/12/24 15:18:19] INFO ToolkitTask 48ac0486e5374e1ea53e8d2b955e511f +[08/12/24 15:18:19] INFO PromptTask 48ac0486e5374e1ea53e8d2b955e511f Input: Load https://www.griptape.ai, summarize it, and store it in griptape.txt [08/12/24 15:18:20] INFO Subtask 3b8365c077ae4a7e94087bfeff7a858c Actions: [ @@ -82,7 +82,7 @@ Here is an example of a Pipeline using Tools: ] INFO Subtask d0f22504f576401f8d7e8ea78270a376 Response: Successfully saved file -[08/12/24 15:18:28] INFO ToolkitTask 48ac0486e5374e1ea53e8d2b955e511f +[08/12/24 15:18:28] INFO PromptTask 48ac0486e5374e1ea53e8d2b955e511f Output: The content from https://www.griptape.ai has been summarized and stored in griptape.txt. INFO PromptTask 4a9c59b1c06d4c549373d243a12f1285 Input: Say the following in spanish: The content from https://www.griptape.ai has been summarized and stored in griptape.txt. diff --git a/docs/griptape-framework/tools/src/index_1.py b/docs/griptape-framework/tools/src/index_1.py index a894e2037..9bc228e85 100644 --- a/docs/griptape-framework/tools/src/index_1.py +++ b/docs/griptape-framework/tools/src/index_1.py @@ -1,11 +1,11 @@ from griptape.structures import Pipeline -from griptape.tasks import ToolkitTask +from griptape.tasks import PromptTask from griptape.tools import FileManagerTool, PromptSummaryTool, WebScraperTool pipeline = Pipeline() pipeline.add_tasks( - ToolkitTask( + PromptTask( "Load https://www.griptape.ai, summarize it, and store it in a file called griptape.txt", tools=[ WebScraperTool(off_prompt=True), diff --git a/docs/griptape-tools/official-tools/computer-tool.md b/docs/griptape-tools/official-tools/computer-tool.md index 67d0f260c..07c92838e 100644 --- a/docs/griptape-tools/official-tools/computer-tool.md +++ b/docs/griptape-tools/official-tools/computer-tool.md @@ -10,7 +10,7 @@ You can specify a local working directory and environment variables during tool ``` ❮ poetry run python src/docs/task-memory.py -[08/12/24 15:13:56] INFO ToolkitTask 203ee958d1934811afe0bb86fb246e86 +[08/12/24 15:13:56] INFO PromptTask 203ee958d1934811afe0bb86fb246e86 Input: Make 2 files and then list the files in the current directory [08/12/24 15:13:58] INFO Subtask eb4e843b6f37498f9f0e85ada68114ac Actions: [ @@ -43,6 +43,6 @@ You can specify a local working directory and environment variables during tool INFO Subtask 032770e7697d44f6a0c8559bfea60420 Response: file1.txt file2.txt -[08/12/24 15:14:00] INFO ToolkitTask 203ee958d1934811afe0bb86fb246e86 +[08/12/24 15:14:00] INFO PromptTask 203ee958d1934811afe0bb86fb246e86 Output: file1.txt, file2.txt ``` diff --git a/docs/griptape-tools/official-tools/extraction-tool.md b/docs/griptape-tools/official-tools/extraction-tool.md index 157a7161d..46861be36 100644 --- a/docs/griptape-tools/official-tools/extraction-tool.md +++ b/docs/griptape-tools/official-tools/extraction-tool.md @@ -5,7 +5,7 @@ The [ExractionTool](../../reference/griptape/tools/extraction/tool.md) enables L ``` ``` -[08/12/24 15:58:03] INFO ToolkitTask 43b3d209a83c470d8371b7ef4af175b4 +[08/12/24 15:58:03] INFO PromptTask 43b3d209a83c470d8371b7ef4af175b4 Input: Load https://griptape.ai and extract key info [08/12/24 15:58:05] INFO Subtask 6a9a63802faf4717bab24bbbea2cb49b Actions: [ @@ -49,6 +49,6 @@ The [ExractionTool](../../reference/griptape/tools/extraction/tool.md) enables L extract, prep/transform, and load into a vector database index.", "Retrieval as a Service (RAG): Generate answers, summaries, and details from your own data with ready-made or custom retrieval patterns.", "Structure Runtime (RUN): Build AI agents, pipelines, and workflows for real-time interfaces, transactional processes, and batch workloads."]} -[08/12/24 15:58:14] INFO ToolkitTask 43b3d209a83c470d8371b7ef4af175b4 +[08/12/24 15:58:14] INFO PromptTask 43b3d209a83c470d8371b7ef4af175b4 Output: Extracted key information from Griptape's website. ``` diff --git a/docs/griptape-tools/official-tools/prompt-summary-tool.md b/docs/griptape-tools/official-tools/prompt-summary-tool.md index 23c35c367..3b90807dc 100644 --- a/docs/griptape-tools/official-tools/prompt-summary-tool.md +++ b/docs/griptape-tools/official-tools/prompt-summary-tool.md @@ -5,7 +5,7 @@ The [PromptSummaryTool](../../reference/griptape/tools/prompt_summary/tool.md) e ``` ```` -[08/12/24 15:54:46] INFO ToolkitTask 8be73eb542c44418ba880399044c017a +[08/12/24 15:54:46] INFO PromptTask 8be73eb542c44418ba880399044c017a Input: How can I build Neovim from source for MacOS according to this https://github.com/neovim/neovim/blob/master/BUILD.md [08/12/24 15:54:47] INFO Subtask cd362a149e1d400997be93c1342d1663 Actions: [ @@ -67,7 +67,7 @@ The [PromptSummaryTool](../../reference/griptape/tools/prompt_summary/tool.md) e - Commands for checking and updating translations. The text also includes troubleshooting tips and links to additional resources for further guidance. -[08/12/24 15:55:01] INFO ToolkitTask 8be73eb542c44418ba880399044c017a +[08/12/24 15:55:01] INFO PromptTask 8be73eb542c44418ba880399044c017a Output: To build Neovim from source on macOS, follow these steps: 1. **Install Dependencies**: diff --git a/docs/griptape-tools/official-tools/query-tool.md b/docs/griptape-tools/official-tools/query-tool.md index 8b1b0a50e..643181df2 100644 --- a/docs/griptape-tools/official-tools/query-tool.md +++ b/docs/griptape-tools/official-tools/query-tool.md @@ -5,7 +5,7 @@ The [QueryTool](../../reference/griptape/tools/query/tool.md) enables Agents to ``` ``` -[08/12/24 15:49:23] INFO ToolkitTask a88abda2e5324bdf81a3e2b99c26b9df +[08/12/24 15:49:23] INFO PromptTask a88abda2e5324bdf81a3e2b99c26b9df Input: Tell me about the architecture as described here: https://neovim.io/doc/user/vim_diff.html [08/12/24 15:49:24] INFO Subtask 3dc9910bcac44c718b3aedd6222e372a Actions: [ @@ -63,7 +63,7 @@ The [QueryTool](../../reference/griptape/tools/query/tool.md) enables Agents to plugins/extensions. These architectural decisions make Nvim more stable, extensible, and user-friendly compared to traditional Vim. -[08/12/24 15:49:37] INFO ToolkitTask a88abda2e5324bdf81a3e2b99c26b9df +[08/12/24 15:49:37] INFO PromptTask a88abda2e5324bdf81a3e2b99c26b9df Output: The architecture of Neovim (Nvim) is designed to enhance stability, performance, and extensibility. Here are the key points: 1. **Decoupled UI**: The user interface (UI) is separated from the core editor. All UIs, including the built-in terminal user interface (TUI), diff --git a/docs/griptape-tools/official-tools/rag-tool.md b/docs/griptape-tools/official-tools/rag-tool.md index f96f9c7fe..110bd6629 100644 --- a/docs/griptape-tools/official-tools/rag-tool.md +++ b/docs/griptape-tools/official-tools/rag-tool.md @@ -9,7 +9,7 @@ Here is an example of how it can be used with a local vector store driver: ``` ``` -[07/11/24 13:30:43] INFO ToolkitTask a6d057d5c71d4e9cb6863a2adb64b76c +[07/11/24 13:30:43] INFO PromptTask a6d057d5c71d4e9cb6863a2adb64b76c Input: what is Griptape? [07/11/24 13:30:44] INFO Subtask 8fd89ed9eefe49b8892187f2fca3890a Actions: [ @@ -27,7 +27,7 @@ Here is an example of how it can be used with a local vector store driver: [07/11/24 13:30:49] INFO Subtask 8fd89ed9eefe49b8892187f2fca3890a Response: Griptape builds AI-powered applications that connect securely to your enterprise data and APIs. Griptape Agents provide incredible power and flexibility when working with large language models. - INFO ToolkitTask a6d057d5c71d4e9cb6863a2adb64b76c + INFO PromptTask a6d057d5c71d4e9cb6863a2adb64b76c Output: Griptape builds AI-powered applications that connect securely to your enterprise data and APIs. Griptape Agents provide incredible power and flexibility when working with large language models. ``` diff --git a/docs/griptape-tools/official-tools/sql-tool.md b/docs/griptape-tools/official-tools/sql-tool.md index a0b023038..1e1adcd63 100644 --- a/docs/griptape-tools/official-tools/sql-tool.md +++ b/docs/griptape-tools/official-tools/sql-tool.md @@ -7,7 +7,7 @@ This tool enables LLMs to execute SQL statements via [SQLAlchemy](https://www.sq ``` ``` -[08/12/24 14:59:31] INFO ToolkitTask e302f7315d1a4f939e0125103ff4f09f +[08/12/24 14:59:31] INFO PromptTask e302f7315d1a4f939e0125103ff4f09f Input: SELECT * FROM people; [08/12/24 14:59:34] INFO Subtask 809d1a281b85447f90706d431b77b845 Actions: [ @@ -42,7 +42,7 @@ This tool enables LLMs to execute SQL statements via [SQLAlchemy](https://www.sq 9,David,Macdonald,Public relations account executive 10,Erica,Ramos,"Accountant, chartered public finance" -[08/12/24 14:59:43] INFO ToolkitTask e302f7315d1a4f939e0125103ff4f09f +[08/12/24 14:59:43] INFO PromptTask e302f7315d1a4f939e0125103ff4f09f Output: 1. Lee Andrews - Engineer, electrical 2. Michael Woods - Therapist, art diff --git a/docs/griptape-tools/official-tools/src/rest_api_tool_1.py b/docs/griptape-tools/official-tools/src/rest_api_tool_1.py index 3f6b3b663..416ce0062 100644 --- a/docs/griptape-tools/official-tools/src/rest_api_tool_1.py +++ b/docs/griptape-tools/official-tools/src/rest_api_tool_1.py @@ -5,7 +5,7 @@ from griptape.drivers import OpenAiChatPromptDriver from griptape.memory.structure import ConversationMemory from griptape.structures import Pipeline -from griptape.tasks import ToolkitTask +from griptape.tasks import PromptTask from griptape.tools import RestApiTool Defaults.drivers_config = DriversConfig( @@ -116,27 +116,27 @@ ) pipeline.add_tasks( - ToolkitTask( + PromptTask( "Output the title of post 1.", tools=[posts_client], ), - ToolkitTask( + PromptTask( "Create a post for user 1 with title 'My First Post' and body 'Hello world!'.", tools=[posts_client], ), - ToolkitTask( + PromptTask( "Update post 1 with a new body: 'Hello universe!'.", tools=[posts_client], ), - ToolkitTask( + PromptTask( "Patch post 1 with a new title: 'My First Post, A Journey'.", tools=[posts_client], ), - ToolkitTask( + PromptTask( "Delete post 1.", tools=[posts_client], ), - ToolkitTask( + PromptTask( "Output the body of all the comments for post 1.", tools=[posts_client], ), diff --git a/docs/griptape-tools/official-tools/structure-run-tool.md b/docs/griptape-tools/official-tools/structure-run-tool.md index 37366b586..6748da2fe 100644 --- a/docs/griptape-tools/official-tools/structure-run-tool.md +++ b/docs/griptape-tools/official-tools/structure-run-tool.md @@ -8,7 +8,7 @@ It requires you to provide a [Structure Run Driver](../../griptape-framework/dri ``` ``` -[05/02/24 13:50:03] INFO ToolkitTask 4e9458375bda4fbcadb77a94624ed64c +[05/02/24 13:50:03] INFO PromptTask 4e9458375bda4fbcadb77a94624ed64c Input: what is modular RAG? [05/02/24 13:50:10] INFO Subtask 5ef2d72028fc495aa7faf6f46825b004 Thought: To answer this question, I need to run a search for the term "modular RAG". I will use the StructureRunTool action to execute a @@ -32,7 +32,7 @@ It requires you to provide a [Structure Run Driver](../../griptape-framework/dri processing capabilities. The Modular RAG framework allows for module substitution or reconfiguration to address specific challenges, expanding flexibility by integrating new modules or adjusting interaction flow among existing ones. This approach supports both sequential processing and integrated end-to-end training across its components, illustrating progression and refinement within the RAG family.'} -[05/02/24 13:50:44] INFO ToolkitTask 4e9458375bda4fbcadb77a94624ed64c +[05/02/24 13:50:44] INFO PromptTask 4e9458375bda4fbcadb77a94624ed64c Output: Modular Retrieval-Augmented Generation (RAG) is an advanced approach that goes beyond the traditional RAG paradigms, offering enhanced adaptability and versatility. It involves incorporating diverse strategies to improve its components by adding specialized modules for retrieval and processing capabilities. The Modular RAG framework allows for module substitution or reconfiguration to address specific diff --git a/docs/griptape-tools/official-tools/web-scraper-tool.md b/docs/griptape-tools/official-tools/web-scraper-tool.md index 7001494ff..33fcc21a2 100644 --- a/docs/griptape-tools/official-tools/web-scraper-tool.md +++ b/docs/griptape-tools/official-tools/web-scraper-tool.md @@ -7,7 +7,7 @@ This tool enables LLMs to scrape web pages for full text, summaries, authors, ti ``` ``` -[08/12/24 15:32:08] INFO ToolkitTask b14a4305365f4b17a4dcf235f84397e2 +[08/12/24 15:32:08] INFO PromptTask b14a4305365f4b17a4dcf235f84397e2 Input: Based on https://www.griptape.ai/, tell me what griptape is [08/12/24 15:32:10] INFO Subtask bf396977ea634eb28f55388d3f828f5d Actions: [ @@ -65,7 +65,7 @@ This tool enables LLMs to scrape web pages for full text, summaries, authors, ti - Automated Data Prep (ETL): Connect, extract, transform, and load data into a vector database index. - Retrieval as a Service (RAG): Generate answers, summaries, and details from your data using customizable retrieval patterns. - Structure Runtime (RUN): Build and integrate AI agents, pipelines, and workflows into client applications. -[08/12/24 15:32:21] INFO ToolkitTask b14a4305365f4b17a4dcf235f84397e2 +[08/12/24 15:32:21] INFO PromptTask b14a4305365f4b17a4dcf235f84397e2 Output: Griptape is a comprehensive solution designed to facilitate the building, deploying, and scaling of AI applications in the cloud. It provides developers with a framework and cloud services that simplify the creation of retrieval-driven AI-powered applications, even for those without extensive AI or prompt engineering expertise. diff --git a/griptape/structures/agent.py b/griptape/structures/agent.py index 128c02faa..daf9665e5 100644 --- a/griptape/structures/agent.py +++ b/griptape/structures/agent.py @@ -8,7 +8,7 @@ from griptape.common import observable from griptape.configs import Defaults from griptape.structures import Structure -from griptape.tasks import PromptTask, ToolkitTask +from griptape.tasks import PromptTask if TYPE_CHECKING: from griptape.artifacts import BaseArtifact @@ -43,18 +43,12 @@ def __attrs_post_init__(self) -> None: self.prompt_driver.stream = self.stream if len(self.tasks) == 0: - if self.tools: - task = ToolkitTask( - self.input, - prompt_driver=self.prompt_driver, - tools=self.tools, - max_meta_memory_entries=self.max_meta_memory_entries, - ) - else: - task = PromptTask( - self.input, prompt_driver=self.prompt_driver, max_meta_memory_entries=self.max_meta_memory_entries - ) - + task = PromptTask( + self.input, + prompt_driver=self.prompt_driver, + tools=self.tools, + max_meta_memory_entries=self.max_meta_memory_entries, + ) self.add_task(task) @property diff --git a/griptape/tasks/__init__.py b/griptape/tasks/__init__.py index 6a9f95c69..fab6f88e9 100644 --- a/griptape/tasks/__init__.py +++ b/griptape/tasks/__init__.py @@ -1,7 +1,7 @@ from .base_task import BaseTask from .base_text_input_task import BaseTextInputTask -from .prompt_task import PromptTask from .actions_subtask import ActionsSubtask +from .prompt_task import PromptTask from .toolkit_task import ToolkitTask from .text_summary_task import TextSummaryTask from .tool_task import ToolTask diff --git a/griptape/tasks/prompt_task.py b/griptape/tasks/prompt_task.py index 7216536b8..4ed313bf0 100644 --- a/griptape/tasks/prompt_task.py +++ b/griptape/tasks/prompt_task.py @@ -1,29 +1,40 @@ from __future__ import annotations +import json import logging from typing import TYPE_CHECKING, Callable, Optional, Union -from attrs import NOTHING, Factory, NothingType, define, field +from attrs import NOTHING, Attribute, Factory, NothingType, define, field -from griptape.artifacts import BaseArtifact, ListArtifact, TextArtifact -from griptape.common import PromptStack +from griptape import utils +from griptape.artifacts import ActionArtifact, BaseArtifact, ErrorArtifact, ListArtifact, TextArtifact +from griptape.common import PromptStack, ToolAction from griptape.configs import Defaults from griptape.memory.structure import Run +from griptape.mixins.actions_subtask_origin_mixin import ActionsSubtaskOriginMixin from griptape.mixins.rule_mixin import RuleMixin from griptape.rules import Ruleset -from griptape.tasks import BaseTask +from griptape.tasks import ActionsSubtask, BaseTask from griptape.utils import J2 if TYPE_CHECKING: + from schema import Schema + from griptape.drivers import BasePromptDriver + from griptape.memory import TaskMemory from griptape.memory.structure.base_conversation_memory import BaseConversationMemory from griptape.structures import Structure + from griptape.tools import BaseTool logger = logging.getLogger(Defaults.logging_config.logger_name) @define -class PromptTask(RuleMixin, BaseTask): +class PromptTask(BaseTask, RuleMixin, ActionsSubtaskOriginMixin): + DEFAULT_MAX_STEPS = 20 + # Stop sequence for chain-of-thought in the framework. Using this "token-like" string to make it more unique, + # so that it doesn't trigger on accident. + RESPONSE_STOP_SEQUENCE = "<|Response|>" prompt_driver: BasePromptDriver = field( default=Factory(lambda: Defaults.drivers_config.prompt_driver), kw_only=True, metadata={"serializable": True} ) @@ -38,6 +49,19 @@ class PromptTask(RuleMixin, BaseTask): default=lambda task: task.full_context["args"][0] if task.full_context["args"] else TextArtifact(value=""), alias="input", ) + tools: list[BaseTool] = field(factory=list, kw_only=True, metadata={"serializable": True}) + max_subtasks: int = field(default=DEFAULT_MAX_STEPS, kw_only=True, metadata={"serializable": True}) + task_memory: Optional[TaskMemory] = field(default=None, kw_only=True) + subtasks: list[ActionsSubtask] = field(factory=list) + generate_assistant_subtask_template: Callable[[ActionsSubtask], str] = field( + default=Factory(lambda self: self.default_generate_assistant_subtask_template, takes_self=True), + kw_only=True, + ) + generate_user_subtask_template: Callable[[ActionsSubtask], str] = field( + default=Factory(lambda self: self.default_generate_user_subtask_template, takes_self=True), + kw_only=True, + ) + response_stop_sequence: str = field(default=RESPONSE_STOP_SEQUENCE, kw_only=True) @property def rulesets(self) -> list: @@ -63,12 +87,10 @@ def input(self) -> BaseArtifact: def input(self, value: str | list | tuple | BaseArtifact | Callable[[BaseTask], BaseArtifact]) -> None: self._input = value - output: Optional[BaseArtifact] = field(default=None, init=False) - @property def prompt_stack(self) -> PromptStack: - stack = PromptStack() - memory = self.conversation_memory + stack = PromptStack(tools=self.tools) + memory = self.structure.conversation_memory if self.structure is not None else None system_template = self.generate_system_template(self) if system_template: @@ -77,18 +99,75 @@ def prompt_stack(self) -> PromptStack: stack.add_user_message(self.input) if self.output: - stack.add_assistant_message(self.output) - - if memory is not None and memory is not NOTHING: - # insert memory into the stack right before the user messages + stack.add_assistant_message(self.output.to_text()) + else: + for s in self.subtasks: + if self.prompt_driver.use_native_tools: + action_calls = [ + ToolAction(name=action.name, path=action.path, tag=action.tag, input=action.input) + for action in s.actions + ] + action_results = [ + ToolAction( + name=action.name, + path=action.path, + tag=action.tag, + output=action.output if action.output is not None else s.output, + ) + for action in s.actions + ] + + stack.add_assistant_message( + ListArtifact( + [ + *([TextArtifact(s.thought)] if s.thought else []), + *[ActionArtifact(a) for a in action_calls], + ], + ), + ) + stack.add_user_message( + ListArtifact( + [ + *[ActionArtifact(a) for a in action_results], + *([] if s.output else [TextArtifact("Please keep going")]), + ], + ), + ) + else: + stack.add_assistant_message(self.generate_assistant_subtask_template(s)) + stack.add_user_message(self.generate_user_subtask_template(s)) + + if memory is not None: + # inserting at index 1 to place memory right after system prompt memory.add_to_prompt_stack(self.prompt_driver, stack, 1 if system_template else 0) return stack - def default_generate_system_template(self, _: PromptTask) -> str: - return J2("tasks/prompt_task/system.j2").render( - rulesets=J2("rulesets/rulesets.j2").render(rulesets=self.rulesets), - ) + @property + def tool_output_memory(self) -> list[TaskMemory]: + unique_memory_dict = {} + + for memories in [tool.output_memory for tool in self.tools if tool.output_memory]: + for memory_list in memories.values(): + for memory in memory_list: + if memory.name not in unique_memory_dict: + unique_memory_dict[memory.name] = memory + + return list(unique_memory_dict.values()) + + @tools.validator # pyright: ignore[reportAttributeAccessIssue] + def validate_tools(self, _: Attribute, tools: list[BaseTool]) -> None: + tool_names = [t.name for t in tools] + + if len(tool_names) > len(set(tool_names)): + raise ValueError("tools names have to be unique in task") + + def __attrs_post_init__(self) -> None: + super().__attrs_post_init__() + if self.task_memory: + self.set_default_tools_memory(self.task_memory) + + output: Optional[BaseArtifact] = field(default=None, init=False) def before_run(self) -> None: super().before_run() @@ -116,9 +195,31 @@ def after_run(self) -> None: conversation_memory.add_run(run) def try_run(self) -> BaseArtifact: - message = self.prompt_driver.run(self.prompt_stack) + from griptape.tasks import ActionsSubtask + + self.subtasks.clear() - return message.to_artifact() + if self.response_stop_sequence not in self.prompt_driver.tokenizer.stop_sequences: + self.prompt_driver.tokenizer.stop_sequences.extend([self.response_stop_sequence]) + + result = self.prompt_driver.run(self.prompt_stack) + subtask = self.add_subtask(ActionsSubtask(result.to_artifact())) + + while True: + if subtask.output is None: + if len(self.subtasks) >= self.max_subtasks: + subtask.output = ErrorArtifact(f"Exceeded tool limit of {self.max_subtasks} subtasks per task") + else: + subtask.run() + + result = self.prompt_driver.run(self.prompt_stack) + subtask = self.add_subtask(ActionsSubtask(result.to_artifact())) + else: + break + + self.output = subtask.output + + return self.output def preprocess(self, structure: Structure) -> BaseTask: super().preprocess(structure) @@ -129,8 +230,79 @@ def preprocess(self, structure: Structure) -> BaseTask: else: self.conversation_memory = None + if self.task_memory is None and structure.task_memory: + self.set_default_tools_memory(structure.task_memory) + return self + def default_generate_system_template(self, _: PromptTask) -> str: + schema = self.actions_schema().json_schema("Actions Schema") + schema["minItems"] = 1 # The `schema` library doesn't support `minItems` so we must add it manually. + + return J2("tasks/prompt_task/system.j2").render( + rulesets=J2("rulesets/rulesets.j2").render(rulesets=self.rulesets), + action_names=str.join(", ", [tool.name for tool in self.tools]), + actions_schema=utils.minify_json(json.dumps(schema)), + meta_memory=J2("memory/meta/meta_memory.j2").render(meta_memories=self.meta_memories), + use_native_tools=self.prompt_driver.use_native_tools, + stop_sequence=self.response_stop_sequence, + ) + + def default_generate_assistant_subtask_template(self, subtask: ActionsSubtask) -> str: + return J2("tasks/prompt_task/assistant_subtask.j2").render( + stop_sequence=self.response_stop_sequence, + subtask=subtask, + ) + + def default_generate_user_subtask_template(self, subtask: ActionsSubtask) -> str: + return J2("tasks/prompt_task/user_subtask.j2").render( + stop_sequence=self.response_stop_sequence, + subtask=subtask, + ) + + def actions_schema(self) -> Schema: + return self._actions_schema_for_tools(self.tools) + + def set_default_tools_memory(self, memory: TaskMemory) -> None: + self.task_memory = memory + + for tool in self.tools: + if self.task_memory: + if tool.input_memory is None: + tool.input_memory = [self.task_memory] + if tool.output_memory is None and tool.off_prompt: + tool.output_memory = {getattr(a, "name"): [self.task_memory] for a in tool.activities()} + + def find_subtask(self, subtask_id: str) -> ActionsSubtask: + for subtask in self.subtasks: + if subtask.id == subtask_id: + return subtask + raise ValueError(f"Subtask with id {subtask_id} not found.") + + def add_subtask(self, subtask: ActionsSubtask) -> ActionsSubtask: + subtask.attach_to(self) + subtask.structure = self.structure + + if len(self.subtasks) > 0: + self.subtasks[-1].add_child(subtask) + subtask.add_parent(self.subtasks[-1]) + + self.subtasks.append(subtask) + + return subtask + + def find_tool(self, tool_name: str) -> BaseTool: + for tool in self.tools: + if tool.name == tool_name: + return tool + raise ValueError(f"Tool with name {tool_name} not found.") + + def find_memory(self, memory_name: str) -> TaskMemory: + for memory in self.tool_output_memory: + if memory.name == memory_name: + return memory + raise ValueError(f"Memory with name {memory_name} not found.") + def _process_task_input( self, task_input: str | tuple | list | BaseArtifact | Callable[[BaseTask], BaseArtifact], diff --git a/griptape/tasks/tool_task.py b/griptape/tasks/tool_task.py index a0b1caf9d..200a230c6 100644 --- a/griptape/tasks/tool_task.py +++ b/griptape/tasks/tool_task.py @@ -23,11 +23,14 @@ @define class ToolTask(PromptTask, ActionsSubtaskOriginMixin): + DEFAULT_MAX_STEPS = 0 ACTION_PATTERN = r"(?s)[^{]*({.*})" tool: BaseTool = field(kw_only=True, metadata={"serializable": True}) subtask: Optional[ActionsSubtask] = field(default=None, kw_only=True) task_memory: Optional[TaskMemory] = field(default=None, kw_only=True) + tools: list[BaseTool] = field(factory=list, kw_only=True, metadata={"serializable": False}) + max_subtasks: int = field(default=DEFAULT_MAX_STEPS, kw_only=True, metadata={"serializable": False}) @property def prompt_stack(self) -> PromptStack: diff --git a/griptape/tasks/toolkit_task.py b/griptape/tasks/toolkit_task.py index d9104ed68..6544a11a5 100644 --- a/griptape/tasks/toolkit_task.py +++ b/griptape/tasks/toolkit_task.py @@ -1,223 +1,23 @@ from __future__ import annotations -import json -from typing import TYPE_CHECKING, Callable, Optional +import warnings +from typing import TYPE_CHECKING -from attrs import Attribute, Factory, define, field +from attrs import define -from griptape import utils -from griptape.artifacts import ActionArtifact, BaseArtifact, ErrorArtifact, ListArtifact, TextArtifact -from griptape.common import PromptStack, ToolAction -from griptape.mixins.actions_subtask_origin_mixin import ActionsSubtaskOriginMixin -from griptape.tasks import ActionsSubtask, PromptTask -from griptape.utils import J2 +from griptape.tasks import PromptTask if TYPE_CHECKING: - from schema import Schema - - from griptape.memory import TaskMemory - from griptape.structures import Structure - from griptape.tools import BaseTool + from griptape.artifacts import BaseArtifact @define -class ToolkitTask(PromptTask, ActionsSubtaskOriginMixin): - DEFAULT_MAX_STEPS = 20 - # Stop sequence for chain-of-thought in the framework. Using this "token-like" string to make it more unique, - # so that it doesn't trigger on accident. - RESPONSE_STOP_SEQUENCE = "<|Response|>" - - tools: list[BaseTool] = field(factory=list, kw_only=True, metadata={"serializable": True}) - max_subtasks: int = field(default=DEFAULT_MAX_STEPS, kw_only=True, metadata={"serializable": True}) - task_memory: Optional[TaskMemory] = field(default=None, kw_only=True) - subtasks: list[ActionsSubtask] = field(factory=list) - generate_assistant_subtask_template: Callable[[ActionsSubtask], str] = field( - default=Factory(lambda self: self.default_generate_assistant_subtask_template, takes_self=True), - kw_only=True, - ) - generate_user_subtask_template: Callable[[ActionsSubtask], str] = field( - default=Factory(lambda self: self.default_generate_user_subtask_template, takes_self=True), - kw_only=True, - ) - response_stop_sequence: str = field(default=RESPONSE_STOP_SEQUENCE, kw_only=True) - - def __attrs_post_init__(self) -> None: - super().__attrs_post_init__() - if self.task_memory: - self.set_default_tools_memory(self.task_memory) - - @tools.validator # pyright: ignore[reportAttributeAccessIssue] - def validate_tools(self, _: Attribute, tools: list[BaseTool]) -> None: - tool_names = [t.name for t in tools] - - if len(tool_names) > len(set(tool_names)): - raise ValueError("tools names have to be unique in task") - - @property - def tool_output_memory(self) -> list[TaskMemory]: - unique_memory_dict = {} - - for memories in [tool.output_memory for tool in self.tools if tool.output_memory]: - for memory_list in memories.values(): - for memory in memory_list: - if memory.name not in unique_memory_dict: - unique_memory_dict[memory.name] = memory - - return list(unique_memory_dict.values()) - - @property - def prompt_stack(self) -> PromptStack: - stack = PromptStack(tools=self.tools) - memory = self.structure.conversation_memory if self.structure is not None else None - - stack.add_system_message(self.generate_system_template(self)) - - stack.add_user_message(self.input) - - if self.output: - stack.add_assistant_message(self.output.to_text()) - else: - for s in self.subtasks: - if self.prompt_driver.use_native_tools: - action_calls = [ - ToolAction(name=action.name, path=action.path, tag=action.tag, input=action.input) - for action in s.actions - ] - action_results = [ - ToolAction( - name=action.name, - path=action.path, - tag=action.tag, - output=action.output if action.output is not None else s.output, - ) - for action in s.actions - ] - - stack.add_assistant_message( - ListArtifact( - [ - *([TextArtifact(s.thought)] if s.thought else []), - *[ActionArtifact(a) for a in action_calls], - ], - ), - ) - stack.add_user_message( - ListArtifact( - [ - *[ActionArtifact(a) for a in action_results], - *([] if s.output else [TextArtifact("Please keep going")]), - ], - ), - ) - else: - stack.add_assistant_message(self.generate_assistant_subtask_template(s)) - stack.add_user_message(self.generate_user_subtask_template(s)) - - if memory is not None: - # inserting at index 1 to place memory right after system prompt - memory.add_to_prompt_stack(self.prompt_driver, stack, 1) - - return stack - - def preprocess(self, structure: Structure) -> ToolkitTask: - super().preprocess(structure) - - if self.task_memory is None and structure.task_memory: - self.set_default_tools_memory(structure.task_memory) - - return self - - def default_generate_system_template(self, _: PromptTask) -> str: - schema = self.actions_schema().json_schema("Actions Schema") - schema["minItems"] = 1 # The `schema` library doesn't support `minItems` so we must add it manually. - - return J2("tasks/toolkit_task/system.j2").render( - rulesets=J2("rulesets/rulesets.j2").render(rulesets=self.rulesets), - action_names=str.join(", ", [tool.name for tool in self.tools]), - actions_schema=utils.minify_json(json.dumps(schema)), - meta_memory=J2("memory/meta/meta_memory.j2").render(meta_memories=self.meta_memories), - use_native_tools=self.prompt_driver.use_native_tools, - stop_sequence=self.response_stop_sequence, - ) - - def default_generate_assistant_subtask_template(self, subtask: ActionsSubtask) -> str: - return J2("tasks/toolkit_task/assistant_subtask.j2").render( - stop_sequence=self.response_stop_sequence, - subtask=subtask, - ) - - def default_generate_user_subtask_template(self, subtask: ActionsSubtask) -> str: - return J2("tasks/toolkit_task/user_subtask.j2").render( - stop_sequence=self.response_stop_sequence, - subtask=subtask, - ) - - def actions_schema(self) -> Schema: - return self._actions_schema_for_tools(self.tools) - - def set_default_tools_memory(self, memory: TaskMemory) -> None: - self.task_memory = memory - - for tool in self.tools: - if self.task_memory: - if tool.input_memory is None: - tool.input_memory = [self.task_memory] - if tool.output_memory is None and tool.off_prompt: - tool.output_memory = {getattr(a, "name"): [self.task_memory] for a in tool.activities()} - +class ToolkitTask(PromptTask): def try_run(self) -> BaseArtifact: - from griptape.tasks import ActionsSubtask - - self.subtasks.clear() - - if self.response_stop_sequence not in self.prompt_driver.tokenizer.stop_sequences: - self.prompt_driver.tokenizer.stop_sequences.extend([self.response_stop_sequence]) - - result = self.prompt_driver.run(self.prompt_stack) - subtask = self.add_subtask(ActionsSubtask(result.to_artifact())) - - while True: - if subtask.output is None: - if len(self.subtasks) >= self.max_subtasks: - subtask.output = ErrorArtifact(f"Exceeded tool limit of {self.max_subtasks} subtasks per task") - else: - subtask.run() - - result = self.prompt_driver.run(self.prompt_stack) - subtask = self.add_subtask(ActionsSubtask(result.to_artifact())) - else: - break - - self.output = subtask.output - - return self.output - - def find_subtask(self, subtask_id: str) -> ActionsSubtask: - for subtask in self.subtasks: - if subtask.id == subtask_id: - return subtask - raise ValueError(f"Subtask with id {subtask_id} not found.") - - def add_subtask(self, subtask: ActionsSubtask) -> ActionsSubtask: - subtask.attach_to(self) - subtask.structure = self.structure - - if len(self.subtasks) > 0: - self.subtasks[-1].add_child(subtask) - subtask.add_parent(self.subtasks[-1]) - - self.subtasks.append(subtask) - - return subtask - - def find_tool(self, tool_name: str) -> BaseTool: - for tool in self.tools: - if tool.name == tool_name: - return tool - raise ValueError(f"Tool with name {tool_name} not found.") + warnings.warn( + "`PromptTask` is deprecated and will be removed in a future release. `PromptTask` is a drop-in replacement.", + DeprecationWarning, + stacklevel=2, + ) - def find_memory(self, memory_name: str) -> TaskMemory: - for memory in self.tool_output_memory: - if memory.name == memory_name: - return memory - raise ValueError(f"Memory with name {memory_name} not found.") + return super().try_run() diff --git a/griptape/templates/tasks/prompt_task/assistant_subtask.j2 b/griptape/templates/tasks/prompt_task/assistant_subtask.j2 new file mode 100644 index 000000000..4a4038430 --- /dev/null +++ b/griptape/templates/tasks/prompt_task/assistant_subtask.j2 @@ -0,0 +1,4 @@ +{% if subtask.thought %} +Thought: {{ subtask.thought }} +{% endif %} +Actions: {{ subtask.actions_to_json() }} diff --git a/griptape/templates/tasks/prompt_task/system.j2 b/griptape/templates/tasks/prompt_task/system.j2 index d8311e582..b262e7c72 100644 --- a/griptape/templates/tasks/prompt_task/system.j2 +++ b/griptape/templates/tasks/prompt_task/system.j2 @@ -1,3 +1,28 @@ +{%if action_names %} +You can think step-by-step and execute actions sequentially or in parallel to get your final answer. +{% if not use_native_tools %} + +You must use the following format when executing actions: + +Thought: +Actions: +{{ stop_sequence }}: +"Thought", "Actions", "{{ stop_sequence }}" must always start on a new line. + +You must use the following format when providing your final answer: +Answer: +{% endif %} +Repeat executing actions as many times as you need. +If an action's output contains an error, you MUST ALWAYS try to fix the error by executing another action. + +Be truthful. ALWAYS be proactive and NEVER ask the user for more information input. Keep using actions until you have your final answer. +NEVER make up actions, action names, or action paths. NEVER make up facts. NEVER reference tags in other action input values. +{% endif %} +{% if meta_memory %} + +{{ meta_memory }} +{% endif %} {% if rulesets %} + {{ rulesets }} {% endif %} diff --git a/griptape/templates/tasks/prompt_task/user_subtask.j2 b/griptape/templates/tasks/prompt_task/user_subtask.j2 new file mode 100644 index 000000000..aa34cdd87 --- /dev/null +++ b/griptape/templates/tasks/prompt_task/user_subtask.j2 @@ -0,0 +1,5 @@ +{% if subtask.output %} +{{ stop_sequence }}: {{ subtask.output.to_text() }} +{% else %} +Please, keep going! +{% endif %} diff --git a/tests/integration/tasks/test_toolkit_task.py b/tests/integration/tasks/test_toolkit_task.py index 7593c5391..e43576500 100644 --- a/tests/integration/tasks/test_toolkit_task.py +++ b/tests/integration/tasks/test_toolkit_task.py @@ -3,7 +3,7 @@ from tests.utils.structure_tester import StructureTester -class TestToolkitTask: +class TestPromptTask: @pytest.fixture( autouse=True, params=StructureTester.TOOLKIT_TASK_CAPABLE_PROMPT_DRIVERS, diff --git a/tests/unit/drivers/prompt/test_base_prompt_driver.py b/tests/unit/drivers/prompt/test_base_prompt_driver.py index f9ad70573..58720bbc5 100644 --- a/tests/unit/drivers/prompt/test_base_prompt_driver.py +++ b/tests/unit/drivers/prompt/test_base_prompt_driver.py @@ -3,7 +3,7 @@ from griptape.events import FinishPromptEvent, StartPromptEvent from griptape.events.event_bus import _EventBus from griptape.structures import Pipeline -from griptape.tasks import PromptTask, ToolkitTask +from griptape.tasks import PromptTask from tests.mocks.mock_failing_prompt_driver import MockFailingPromptDriver from tests.mocks.mock_prompt_driver import MockPromptDriver from tests.mocks.mock_tool.tool import MockTool @@ -50,7 +50,7 @@ def test_run_with_tools(self, mock_config): mock_config.drivers_config.prompt_driver = MockPromptDriver(max_attempts=1, use_native_tools=True) pipeline = Pipeline() - pipeline.add_task(ToolkitTask(tools=[MockTool()])) + pipeline.add_task(PromptTask(tools=[MockTool()])) output = pipeline.run().output_task.output assert isinstance(output, TextArtifact) @@ -60,7 +60,7 @@ def test_run_with_tools_and_stream(self, mock_config): mock_config.driver = MockPromptDriver(max_attempts=1, stream=True, use_native_tools=True) pipeline = Pipeline() - pipeline.add_task(ToolkitTask(tools=[MockTool()])) + pipeline.add_task(PromptTask(tools=[MockTool()])) output = pipeline.run().output_task.output assert isinstance(output, TextArtifact) diff --git a/tests/unit/events/test_event_listener.py b/tests/unit/events/test_event_listener.py index d26107bc6..9a48360dc 100644 --- a/tests/unit/events/test_event_listener.py +++ b/tests/unit/events/test_event_listener.py @@ -19,7 +19,7 @@ ) from griptape.events.base_event import BaseEvent from griptape.structures import Pipeline -from griptape.tasks import ActionsSubtask, ToolkitTask +from griptape.tasks import ActionsSubtask, PromptTask from tests.mocks.mock_event import MockEvent from tests.mocks.mock_event_listener_driver import MockEventListenerDriver from tests.mocks.mock_prompt_driver import MockPromptDriver @@ -30,7 +30,7 @@ class TestEventListener: @pytest.fixture() def pipeline(self, mock_config): mock_config.drivers_config.prompt_driver = MockPromptDriver(stream=True, use_native_tools=True) - task = ToolkitTask("test", tools=[MockTool(name="Tool1")]) + task = PromptTask("test", tools=[MockTool(name="Tool1")]) pipeline = Pipeline() pipeline.add_task(task) diff --git a/tests/unit/events/test_finish_actions_subtask_event.py b/tests/unit/events/test_finish_actions_subtask_event.py index c59a0eea7..06b7c9dd8 100644 --- a/tests/unit/events/test_finish_actions_subtask_event.py +++ b/tests/unit/events/test_finish_actions_subtask_event.py @@ -2,7 +2,7 @@ from griptape.events import FinishActionsSubtaskEvent from griptape.structures import Agent -from griptape.tasks import ActionsSubtask, ToolkitTask +from griptape.tasks import ActionsSubtask, PromptTask from tests.mocks.mock_tool.tool import MockTool @@ -15,7 +15,7 @@ def finish_subtask_event(self): "<|Response|>: test observation\n" "Answer: test output" ) - task = ToolkitTask(tools=[MockTool()]) + task = PromptTask(tools=[MockTool()]) agent = Agent() agent.add_task(task) subtask = ActionsSubtask(valid_input) diff --git a/tests/unit/events/test_start_actions_subtask_event.py b/tests/unit/events/test_start_actions_subtask_event.py index fc709bde1..ff571c85e 100644 --- a/tests/unit/events/test_start_actions_subtask_event.py +++ b/tests/unit/events/test_start_actions_subtask_event.py @@ -2,7 +2,7 @@ from griptape.events import StartActionsSubtaskEvent from griptape.structures import Agent -from griptape.tasks import ActionsSubtask, ToolkitTask +from griptape.tasks import ActionsSubtask, PromptTask from tests.mocks.mock_tool.tool import MockTool @@ -15,7 +15,7 @@ def start_subtask_event(self): "<|Response|>: test observation\n" "Answer: test output" ) - task = ToolkitTask(tools=[MockTool()]) + task = PromptTask(tools=[MockTool()]) agent = Agent() agent.add_task(task) subtask = ActionsSubtask(valid_input) diff --git a/tests/unit/structures/test_agent.py b/tests/unit/structures/test_agent.py index 387910f40..ecd1ac6dc 100644 --- a/tests/unit/structures/test_agent.py +++ b/tests/unit/structures/test_agent.py @@ -7,7 +7,7 @@ from griptape.memory.task.storage import TextArtifactStorage from griptape.rules import Rule, Ruleset from griptape.structures import Agent -from griptape.tasks import BaseTask, PromptTask, ToolkitTask +from griptape.tasks import BaseTask, PromptTask from tests.mocks.mock_prompt_driver import MockPromptDriver from tests.mocks.mock_tool.tool import MockTool @@ -23,7 +23,7 @@ def test_init(self): assert agent.rulesets[0].name == "TestRuleset" assert agent.rulesets[0].rules[0].value == "test" assert isinstance(agent.conversation_memory, ConversationMemory) - assert isinstance(Agent(tools=[MockTool()]).task, ToolkitTask) + assert isinstance(Agent(tools=[MockTool()]).task, PromptTask) def test_rulesets(self): agent = Agent(rulesets=[Ruleset("Foo", [Rule("foo test")])]) diff --git a/tests/unit/structures/test_pipeline.py b/tests/unit/structures/test_pipeline.py index 82dd8604b..d461b5bbf 100644 --- a/tests/unit/structures/test_pipeline.py +++ b/tests/unit/structures/test_pipeline.py @@ -6,7 +6,7 @@ from griptape.memory.structure import ConversationMemory from griptape.rules import Rule, Ruleset from griptape.structures import Pipeline -from griptape.tasks import BaseTask, CodeExecutionTask, PromptTask, ToolkitTask +from griptape.tasks import BaseTask, CodeExecutionTask, PromptTask from griptape.tokenizers import OpenAiTokenizer from tests.mocks.mock_tool.tool import MockTool @@ -82,9 +82,9 @@ def test_rules_and_rulesets(self): def test_with_no_task_memory(self): pipeline = Pipeline() - pipeline.add_task(ToolkitTask(tools=[MockTool()])) + pipeline.add_task(PromptTask(tools=[MockTool()])) - assert isinstance(pipeline.tasks[0], ToolkitTask) + assert isinstance(pipeline.tasks[0], PromptTask) assert pipeline.tasks[0].tools[0].input_memory is not None assert pipeline.tasks[0].tools[0].input_memory[0] == pipeline.task_memory assert pipeline.tasks[0].tools[0].output_memory is None @@ -92,9 +92,9 @@ def test_with_no_task_memory(self): def test_with_task_memory(self): pipeline = Pipeline() - pipeline.add_task(ToolkitTask(tools=[MockTool(off_prompt=True)])) + pipeline.add_task(PromptTask(tools=[MockTool(off_prompt=True)])) - assert isinstance(pipeline.tasks[0], ToolkitTask) + assert isinstance(pipeline.tasks[0], PromptTask) assert pipeline.tasks[0].task_memory == pipeline.task_memory assert pipeline.tasks[0].tools[0].input_memory is not None assert pipeline.tasks[0].tools[0].input_memory[0] == pipeline.task_memory @@ -104,17 +104,17 @@ def test_with_task_memory(self): def test_with_task_memory_and_empty_tool_output_memory(self): pipeline = Pipeline() - pipeline.add_task(ToolkitTask(tools=[MockTool(output_memory={}, off_prompt=True)])) + pipeline.add_task(PromptTask(tools=[MockTool(output_memory={}, off_prompt=True)])) - assert isinstance(pipeline.tasks[0], ToolkitTask) + assert isinstance(pipeline.tasks[0], PromptTask) assert pipeline.tasks[0].tools[0].output_memory == {} def test_without_task_memory(self): pipeline = Pipeline(task_memory=None) - pipeline.add_task(ToolkitTask(tools=[MockTool()])) + pipeline.add_task(PromptTask(tools=[MockTool()])) - assert isinstance(pipeline.tasks[0], ToolkitTask) + assert isinstance(pipeline.tasks[0], PromptTask) assert pipeline.tasks[0].tools[0].input_memory is None assert pipeline.tasks[0].tools[0].output_memory is None diff --git a/tests/unit/structures/test_structure.py b/tests/unit/structures/test_structure.py index 088e60d19..3b0e508e0 100644 --- a/tests/unit/structures/test_structure.py +++ b/tests/unit/structures/test_structure.py @@ -73,6 +73,8 @@ def test_to_dict(self): "max_meta_memory_entries": agent.tasks[0].max_meta_memory_entries, "context": agent.tasks[0].context, "rulesets": [], + "max_subtasks": 20, + "tools": [], "prompt_driver": { "extra_params": {}, "max_tokens": None, diff --git a/tests/unit/structures/test_workflow.py b/tests/unit/structures/test_workflow.py index fe0bbc1eb..a40b20b93 100644 --- a/tests/unit/structures/test_workflow.py +++ b/tests/unit/structures/test_workflow.py @@ -6,7 +6,7 @@ from griptape.memory.structure import ConversationMemory from griptape.rules import Rule, Ruleset from griptape.structures import Workflow -from griptape.tasks import BaseTask, CodeExecutionTask, PromptTask, ToolkitTask +from griptape.tasks import BaseTask, CodeExecutionTask, PromptTask from tests.mocks.mock_tool.tool import MockTool @@ -80,9 +80,9 @@ def test_rules_and_rulesets(self): def test_with_no_task_memory(self): workflow = Workflow() - workflow.add_task(ToolkitTask(tools=[MockTool()])) + workflow.add_task(PromptTask(tools=[MockTool()])) - assert isinstance(workflow.tasks[0], ToolkitTask) + assert isinstance(workflow.tasks[0], PromptTask) assert workflow.tasks[0].tools[0].input_memory is not None assert workflow.tasks[0].tools[0].input_memory[0] == workflow.task_memory assert workflow.tasks[0].tools[0].output_memory is None @@ -90,9 +90,9 @@ def test_with_no_task_memory(self): def test_with_task_memory(self): workflow = Workflow() - workflow.add_task(ToolkitTask(tools=[MockTool(off_prompt=True)])) + workflow.add_task(PromptTask(tools=[MockTool(off_prompt=True)])) - assert isinstance(workflow.tasks[0], ToolkitTask) + assert isinstance(workflow.tasks[0], PromptTask) assert workflow.tasks[0].tools[0].input_memory is not None assert workflow.tasks[0].tools[0].input_memory[0] == workflow.task_memory assert workflow.tasks[0].tools[0].output_memory is not None @@ -101,17 +101,17 @@ def test_with_task_memory(self): def test_with_task_memory_and_empty_tool_output_memory(self): workflow = Workflow() - workflow.add_task(ToolkitTask(tools=[MockTool(output_memory={}, off_prompt=True)])) + workflow.add_task(PromptTask(tools=[MockTool(output_memory={}, off_prompt=True)])) - assert isinstance(workflow.tasks[0], ToolkitTask) + assert isinstance(workflow.tasks[0], PromptTask) assert workflow.tasks[0].tools[0].output_memory == {} def test_without_task_memory(self): workflow = Workflow(task_memory=None) - workflow.add_task(ToolkitTask(tools=[MockTool()])) + workflow.add_task(PromptTask(tools=[MockTool()])) - assert isinstance(workflow.tasks[0], ToolkitTask) + assert isinstance(workflow.tasks[0], PromptTask) assert workflow.tasks[0].tools[0].input_memory is None assert workflow.tasks[0].tools[0].output_memory is None diff --git a/tests/unit/tasks/test_actions_subtask.py b/tests/unit/tasks/test_actions_subtask.py index b9c692315..e7d44b5af 100644 --- a/tests/unit/tasks/test_actions_subtask.py +++ b/tests/unit/tasks/test_actions_subtask.py @@ -6,7 +6,7 @@ from griptape.artifacts.error_artifact import ErrorArtifact from griptape.common import ToolAction from griptape.structures import Agent -from griptape.tasks import ActionsSubtask, ToolkitTask +from griptape.tasks import ActionsSubtask, PromptTask from tests.mocks.mock_tool.tool import MockTool @@ -19,7 +19,7 @@ def test_prompt_input(self): "Answer: test output" ) - task = ToolkitTask(tools=[MockTool()]) + task = PromptTask(tools=[MockTool()]) Agent().add_task(task) subtask = task.add_subtask(ActionsSubtask(valid_input)) json_dict = json.loads(subtask.actions_to_json()) @@ -40,7 +40,7 @@ def test_artifact_input(self): TextArtifact("answer"), ] ) - task = ToolkitTask(tools=[MockTool()]) + task = PromptTask(tools=[MockTool()]) Agent().add_task(task) subtask = task.add_subtask(ActionsSubtask(valid_input)) json_dict = json.loads(subtask.actions_to_json()) @@ -60,7 +60,7 @@ def test_artifact_action_and_thought_input(self): ), ] ) - task = ToolkitTask(tools=[MockTool()]) + task = PromptTask(tools=[MockTool()]) Agent().add_task(task) subtask = task.add_subtask(ActionsSubtask(valid_input)) json_dict = json.loads(subtask.actions_to_json()) @@ -73,7 +73,7 @@ def test_artifact_action_and_thought_input(self): def test_prompt_answer(self): valid_input = "Answer: test output" - task = ToolkitTask(tools=[MockTool()]) + task = PromptTask(tools=[MockTool()]) Agent().add_task(task) subtask = task.add_subtask(ActionsSubtask(valid_input)) @@ -84,7 +84,7 @@ def test_prompt_answer(self): def test_prompt_implicit_answer(self): valid_input = "test output" - task = ToolkitTask(tools=[MockTool()]) + task = PromptTask(tools=[MockTool()]) Agent().add_task(task) subtask = task.add_subtask(ActionsSubtask(valid_input)) @@ -98,7 +98,7 @@ def test_artifact_answer(self): TextArtifact("answer"), ] ) - task = ToolkitTask(tools=[MockTool()]) + task = PromptTask(tools=[MockTool()]) Agent().add_task(task) subtask = task.add_subtask(ActionsSubtask(valid_input)) @@ -115,7 +115,7 @@ def test_callable_input(self): ), ] ) - task = ToolkitTask(tools=[MockTool()]) + task = PromptTask(tools=[MockTool()]) Agent().add_task(task) subtask = task.add_subtask(ActionsSubtask(lambda task: valid_input)) json_dict = json.loads(subtask.actions_to_json()) @@ -133,7 +133,7 @@ def test_input_with_multiline_actions(self): "Answer: test output" ) - task = ToolkitTask(tools=[MockTool()]) + task = PromptTask(tools=[MockTool()]) Agent().add_task(task) subtask = task.add_subtask(ActionsSubtask(valid_input)) json_dict = json.loads(subtask.actions_to_json()) @@ -153,7 +153,7 @@ def test_with_no_action_input(self): "Answer: test output" ) - task = ToolkitTask(tools=[MockTool()]) + task = PromptTask(tools=[MockTool()]) Agent().add_task(task) subtask = task.add_subtask(ActionsSubtask(valid_input)) json_dict = json.loads(subtask.actions_to_json()) @@ -165,7 +165,7 @@ def test_with_no_action_input(self): def test_no_actions(self): valid_input = "Thought: need to test\n" "<|Response|>: test observation\n" "Answer: test output" - task = ToolkitTask(tools=[MockTool()]) + task = PromptTask(tools=[MockTool()]) Agent().add_task(task) subtask = task.add_subtask(ActionsSubtask(valid_input)) json_dict = json.loads(subtask.actions_to_json()) @@ -175,7 +175,7 @@ def test_no_actions(self): def test_empty_actions(self): valid_input = "Thought: need to test\n" "Actions: []\n" "<|Response|>: test observation\n" "Answer: test output" - task = ToolkitTask(tools=[MockTool()]) + task = PromptTask(tools=[MockTool()]) Agent().add_task(task) subtask = task.add_subtask(ActionsSubtask(valid_input)) json_dict = json.loads(subtask.actions_to_json()) @@ -187,7 +187,7 @@ def test_invalid_actions(self): "Thought: need to test\n" "Actions: [{,{]\n" "<|Response|>: test observation\n" "Answer: test output" ) - task = ToolkitTask(tools=[MockTool()]) + task = PromptTask(tools=[MockTool()]) Agent().add_task(task) subtask = task.add_subtask(ActionsSubtask(invalid_input)) @@ -204,7 +204,7 @@ def test_implicit_values(self): "Answer: test output" ) - task = ToolkitTask(tools=[MockTool()]) + task = PromptTask(tools=[MockTool()]) Agent().add_task(task) subtask = task.add_subtask(ActionsSubtask(valid_input)) json_dict = json.loads(subtask.actions_to_json()) @@ -219,7 +219,7 @@ def test_execute_tool(self): 'Actions:[{"tag": "foo", "name": "MockTool","path": "test","input": {"values": {"test": "value"}}}]' ) - task = ToolkitTask(tools=[MockTool()]) + task = PromptTask(tools=[MockTool()]) Agent().add_task(task) subtask = task.add_subtask(ActionsSubtask(valid_input)) subtask.run() @@ -234,7 +234,7 @@ def test_execute_tool_exception(self): 'Actions:[{"tag": "foo", "name": "MockTool","path": "test_exception","input": {"values": {"test": "value"}}}]' ) - task = ToolkitTask(tools=[MockTool()]) + task = PromptTask(tools=[MockTool()]) Agent().add_task(task) subtask = task.add_subtask(ActionsSubtask(valid_input)) subtask.run() @@ -249,7 +249,7 @@ def test_origin_task(self): 'Actions:[{"tag": "foo", "name": "MockTool","path": "test","input": {"values": {"test": "value"}}}]' ) - task = ToolkitTask(tools=[MockTool()]) + task = PromptTask(tools=[MockTool()]) Agent().add_task(task) subtask = task.add_subtask(ActionsSubtask(valid_input)) diff --git a/tests/unit/tasks/test_toolkit_task.py b/tests/unit/tasks/test_toolkit_task.py index a8c51d7c1..3c17ff479 100644 --- a/tests/unit/tasks/test_toolkit_task.py +++ b/tests/unit/tasks/test_toolkit_task.py @@ -1,7 +1,9 @@ +import pytest + from griptape.artifacts import ErrorArtifact, TextArtifact from griptape.common import ToolAction from griptape.structures import Agent -from griptape.tasks import ActionsSubtask, PromptTask, ToolkitTask +from griptape.tasks import ActionsSubtask, ToolkitTask from tests.mocks.mock_tool.tool import MockTool from tests.utils import defaults @@ -365,7 +367,7 @@ def test_meta_memory(self): agent.add_task(task) - system_template = task.generate_system_template(PromptTask()) + system_template = task.generate_system_template(ToolkitTask()) assert "You have access to additional contextual information" in system_template @@ -421,3 +423,9 @@ def test_from_dict(self): serialized_task["prompt_driver"]["module_name"] = "tests.mocks.mock_prompt_driver" assert ToolkitTask.from_dict(serialized_task).to_dict() == task.to_dict() + + def test_deprecation_warning(self): + task = ToolkitTask("test") + + with pytest.warns(DeprecationWarning): + task.run() diff --git a/tests/unit/tools/test_base_tool.py b/tests/unit/tools/test_base_tool.py index 097279a80..9c319ed4e 100644 --- a/tests/unit/tools/test_base_tool.py +++ b/tests/unit/tools/test_base_tool.py @@ -9,7 +9,7 @@ from griptape.artifacts.info_artifact import InfoArtifact from griptape.common import ToolAction -from griptape.tasks import ActionsSubtask, ToolkitTask +from griptape.tasks import ActionsSubtask, PromptTask from griptape.tools import BaseTool from griptape.utils.decorators import activity from tests.mocks.mock_tool.tool import MockTool @@ -190,13 +190,13 @@ def tool(self): def test_off_prompt(self, tool): assert ( - not ToolkitTask(task_memory=defaults.text_task_memory("TestMemory"), tools=[MockTool()]) + not PromptTask(task_memory=defaults.text_task_memory("TestMemory"), tools=[MockTool()]) .tools[0] .output_memory ) assert ( - ToolkitTask(task_memory=defaults.text_task_memory("TestMemory"), tools=[MockTool(off_prompt=True)]) + PromptTask(task_memory=defaults.text_task_memory("TestMemory"), tools=[MockTool(off_prompt=True)]) .tools[0] .output_memory )