From 60bcd79711c7fdf13d7ff7fa648c607c350db839 Mon Sep 17 00:00:00 2001 From: Collin Dutter Date: Fri, 16 Aug 2024 11:19:14 -0700 Subject: [PATCH 1/9] Clean up Workflow threads (#1067) --- CHANGELOG.md | 5 +++++ griptape/structures/workflow.py | 31 ++++++++++++++++--------------- 2 files changed, 21 insertions(+), 15 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 3b1683472..e06aa4c6d 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -6,6 +6,11 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ## Unreleased +## [0.29.2] - 2024-08-16 + +### Fixed +- `Workflow` threads not being properly cleaned up after completion. + ## [0.29.1] - 2024-08-02 ### Changed diff --git a/griptape/structures/workflow.py b/griptape/structures/workflow.py index 2ecfb8676..89f8305ca 100644 --- a/griptape/structures/workflow.py +++ b/griptape/structures/workflow.py @@ -87,21 +87,22 @@ def insert_task( def try_run(self, *args) -> Workflow: exit_loop = False - while not self.is_finished() and not exit_loop: - futures_list = {} - ordered_tasks = self.order_tasks() - - for task in ordered_tasks: - if task.can_execute(): - future = self.futures_executor_fn().submit(task.execute) - futures_list[future] = task - - # Wait for all tasks to complete - for future in futures.as_completed(futures_list): - if isinstance(future.result(), ErrorArtifact) and self.fail_fast: - exit_loop = True - - break + with self.futures_executor_fn() as executor: + while not self.is_finished() and not exit_loop: + futures_list = {} + ordered_tasks = self.order_tasks() + + for task in ordered_tasks: + if task.can_execute(): + future = executor.submit(task.execute) + futures_list[future] = task + + # Wait for all tasks to complete + for future in futures.as_completed(futures_list): + if isinstance(future.result(), ErrorArtifact) and self.fail_fast: + exit_loop = True + + break if self.conversation_memory and self.output is not None: run = Run(input=self.input_task.input, output=self.output) From f1d8a399a631d6ec572d19064038cac39308fb68 Mon Sep 17 00:00:00 2001 From: Collin Dutter Date: Fri, 16 Aug 2024 12:06:00 -0700 Subject: [PATCH 2/9] Use subtask output if no action output (#1068) --- CHANGELOG.md | 1 + griptape/tasks/toolkit_task.py | 7 ++++++- 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index e06aa4c6d..33ccbc3d5 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -10,6 +10,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ### Fixed - `Workflow` threads not being properly cleaned up after completion. +- Crash when `ToolAction`s were missing output due to an `ActionsSubtask` exception. ## [0.29.1] - 2024-08-02 diff --git a/griptape/tasks/toolkit_task.py b/griptape/tasks/toolkit_task.py index 59d8f9f90..cb6058141 100644 --- a/griptape/tasks/toolkit_task.py +++ b/griptape/tasks/toolkit_task.py @@ -83,7 +83,12 @@ def prompt_stack(self) -> PromptStack: for action in s.actions ] action_results = [ - ToolAction(name=action.name, path=action.path, tag=action.tag, output=action.output) + ToolAction( + name=action.name, + path=action.path, + tag=action.tag, + output=action.output if action.output is not None else s.output, + ) for action in s.actions ] From 694232c92c78fd99ac9ea264ba9cf52531d6f7dd Mon Sep 17 00:00:00 2001 From: Collin Dutter Date: Fri, 16 Aug 2024 12:10:33 -0700 Subject: [PATCH 3/9] Version bump v0.29.2 (#1070) --- pyproject.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pyproject.toml b/pyproject.toml index a07745e3f..65b7f6340 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "griptape" -version = "0.29.1" +version = "0.29.2" description = "Modular Python framework for LLM workflows, tools, memory, and data." authors = ["Griptape "] license = "Apache 2.0" From a2520d3fdf2eec6a1a8cf43b957ada75e3baf7be Mon Sep 17 00:00:00 2001 From: Collin Dutter Date: Tue, 20 Aug 2024 12:19:37 -0700 Subject: [PATCH 4/9] Release/v0.30.0 (#1088) Co-authored-by: Andrew French Co-authored-by: Vasily Vasinov Co-authored-by: Matt Vallillo Co-authored-by: dylanholmes <4370153+dylanholmes@users.noreply.github.com> Co-authored-by: Michal Co-authored-by: Zach Giordano <32624672+zachgiordano@users.noreply.github.com> Co-authored-by: Ikko Eltociear Ashimine Co-authored-by: torabshaikh Co-authored-by: Aodhan Roche Co-authored-by: Kyle Roche Co-authored-by: Emily Danielson <2302515+emjay07@users.noreply.github.com> Co-authored-by: CJ Kindel Co-authored-by: hkhajgiwale Co-authored-by: Harsh Khajgiwale <13365920+hkhajgiwale@users.noreply.github.com> Co-authored-by: Anush Co-authored-by: datashaman Co-authored-by: Stefano Lottini Co-authored-by: James Clarendon --- .github/ISSUE_TEMPLATE/bug_report.md | 6 +- .../actions/init-bare-environment/action.yml | 2 +- .github/actions/init-environment/action.yml | 2 +- .github/workflows/docs-integration-tests.yml | 24 +- CHANGELOG.md | 72 +- Makefile | 3 +- README.md | 81 +- _typos.toml | 3 + docs/assets/css/extra.css | 5 - docs/examples/amazon-dynamodb-sessions.md | 35 +- docs/examples/load-and-query-pinecone.md | 43 +- docs/examples/load-query-and-chat-marqo.md | 44 +- docs/examples/multi-agent-workflow.md | 183 +- docs/examples/multiple-agent-shared-memory.md | 64 +- docs/examples/query-webpage-astra-db.md | 16 + docs/examples/query-webpage.md | 29 +- .../src/amazon_dynamodb_sessions_1.py | 34 + .../examples/src/load_and_query_pinecone_1.py | 44 + .../src/load_query_and_chat_marqo_1.py | 44 + docs/examples/src/multi_agent_workflow_1.py | 171 ++ .../src/multiple_agent_shared_memory_1.py | 60 + docs/examples/src/query_webpage_1.py | 20 + docs/examples/src/query_webpage_astra_db_1.py | 57 + docs/examples/src/talk_to_a_pdf_1.py | 42 + docs/examples/src/talk_to_a_video_1.py | 28 + docs/examples/src/talk_to_a_webpage_1.py | 51 + docs/examples/src/talk_to_redshift_1.py | 43 + docs/examples/talk-to-a-pdf.md | 55 +- docs/examples/talk-to-a-video.md | 26 +- docs/examples/talk-to-a-webpage.md | 69 +- docs/examples/talk-to-redshift.md | 42 +- docs/griptape-framework/data/artifacts.md | 30 +- docs/griptape-framework/data/chunkers.md | 9 +- docs/griptape-framework/data/loaders.md | 138 +- .../griptape-framework/data/src}/__init__.py | 0 .../griptape-framework/data/src/chunkers_1.py | 9 + docs/griptape-framework/data/src/loaders_1.py | 19 + .../griptape-framework/data/src/loaders_10.py | 10 + docs/griptape-framework/data/src/loaders_2.py | 8 + docs/griptape-framework/data/src/loaders_3.py | 16 + docs/griptape-framework/data/src/loaders_4.py | 13 + docs/griptape-framework/data/src/loaders_5.py | 12 + docs/griptape-framework/data/src/loaders_6.py | 5 + docs/griptape-framework/data/src/loaders_7.py | 9 + docs/griptape-framework/data/src/loaders_8.py | 16 + docs/griptape-framework/data/src/loaders_9.py | 7 + .../drivers/audio-transcription-drivers.md | 19 +- .../drivers/conversation-memory-drivers.md | 58 +- .../drivers/embedding-drivers.md | 127 +- .../drivers/event-listener-drivers.md | 194 +- .../drivers/image-generation-drivers.md | 212 +- .../drivers/image-query-drivers.md | 108 +- .../drivers/observability-drivers.md | 30 +- .../drivers/prompt-drivers.md | 305 +- .../griptape-framework/drivers/sql-drivers.md | 41 +- .../src/audio_transcription_drivers_1.py | 15 + .../src/conversation_memory_drivers_1.py | 9 + .../src/conversation_memory_drivers_2.py | 19 + .../src/conversation_memory_drivers_3.py | 20 + ...versation_memory_drivers_griptape_cloud.py | 15 + .../drivers/src/embedding_drivers_1.py | 6 + .../drivers/src/embedding_drivers_10.py | 24 + .../drivers/src/embedding_drivers_2.py | 11 + .../drivers/src/embedding_drivers_3.py | 6 + .../drivers/src/embedding_drivers_4.py | 6 + .../drivers/src/embedding_drivers_5.py | 18 + .../drivers/src/embedding_drivers_6.py | 10 + .../drivers/src/embedding_drivers_7.py | 13 + .../drivers/src/embedding_drivers_8.py | 10 + .../drivers/src/embedding_drivers_9.py | 14 + .../drivers/src/event_listener_drivers_1.py | 32 + .../drivers/src/event_listener_drivers_2.py | 14 + .../drivers/src/event_listener_drivers_3.py | 31 + .../drivers/src/event_listener_drivers_4.py | 27 + .../drivers/src/event_listener_drivers_5.py | 17 + .../drivers/src/event_listener_drivers_6.py | 20 + .../drivers/src/event_listener_drivers_7.py | 26 + .../drivers/src/image_generation_drivers_1.py | 18 + .../drivers/src/image_generation_drivers_2.py | 23 + .../drivers/src/image_generation_drivers_3.py | 21 + .../drivers/src/image_generation_drivers_4.py | 23 + .../drivers/src/image_generation_drivers_5.py | 23 + .../drivers/src/image_generation_drivers_6.py | 19 + .../drivers/src/image_generation_drivers_7.py | 21 + .../drivers/src/image_generation_drivers_8.py | 30 + .../drivers/src/image_generation_drivers_9.py | 31 + .../drivers/src/image_query_drivers_1.py | 18 + .../drivers/src/image_query_drivers_2.py | 22 + .../drivers/src/image_query_drivers_3.py | 18 + .../drivers/src/image_query_drivers_4.py | 22 + .../drivers/src/image_query_drivers_5.py | 24 + .../drivers/src/observability_drivers_1.py | 10 + .../drivers/src/observability_drivers_2.py | 14 + .../drivers/src/prompt_drivers_1.py | 11 + .../drivers/src/prompt_drivers_10.py | 11 + .../drivers/src/prompt_drivers_11.py | 26 + .../drivers/src/prompt_drivers_12.py | 13 + .../drivers/src/prompt_drivers_13.py | 17 + .../drivers/src/prompt_drivers_14.py | 15 + .../drivers/src/prompt_drivers_2.py | 20 + .../drivers/src/prompt_drivers_3.py | 19 + .../drivers/src/prompt_drivers_4.py | 12 + .../drivers/src/prompt_drivers_5.py | 22 + .../drivers/src/prompt_drivers_6.py | 13 + .../drivers/src/prompt_drivers_7.py | 13 + .../drivers/src/prompt_drivers_8.py | 13 + .../drivers/src/prompt_drivers_9.py | 26 + .../drivers/src/sql_drivers_1.py | 5 + .../drivers/src/sql_drivers_2.py | 15 + .../drivers/src/sql_drivers_3.py | 22 + .../drivers/src/structure_run_drivers_1.py | 43 + .../drivers/src/structure_run_drivers_2.py | 41 + .../drivers/src/text_to_speech_drivers_1.py | 20 + .../drivers/src/text_to_speech_drivers_2.py | 14 + .../drivers/src/vector_store_drivers_1.py | 24 + .../drivers/src/vector_store_drivers_10.py | 41 + .../drivers/src/vector_store_drivers_11.py | 36 + .../drivers/src/vector_store_drivers_2.py | 17 + .../drivers/src/vector_store_drivers_3.py | 30 + .../drivers/src/vector_store_drivers_4.py | 39 + .../drivers/src/vector_store_drivers_5.py | 45 + .../drivers/src/vector_store_drivers_6.py | 45 + .../drivers/src/vector_store_drivers_7.py | 35 + .../drivers/src/vector_store_drivers_8.py | 36 + .../drivers/src/vector_store_drivers_9.py | 42 + .../drivers/src/web_scraper_drivers_1.py | 20 + .../drivers/src/web_scraper_drivers_2.py | 5 + .../drivers/src/web_scraper_drivers_3.py | 14 + .../drivers/src/web_scraper_drivers_4.py | 5 + .../drivers/src/web_search_drivers_1.py | 10 + .../drivers/src/web_search_drivers_2.py | 18 + .../drivers/src/web_search_drivers_3.py | 5 + .../drivers/structure-run-drivers.md | 89 +- .../drivers/text-to-speech-drivers.md | 37 +- .../drivers/vector-store-drivers.md | 328 +- .../drivers/web-scraper-drivers.md | 52 +- .../drivers/web-search-drivers.md | 34 +- .../engines/audio-engines.md | 37 +- .../engines/extraction-engines.md | 47 +- .../engines/image-generation-engines.md | 127 +- .../engines/image-query-engines.md | 18 +- .../griptape-framework/engines/rag-engines.md | 67 +- .../engines/src}/__init__.py | 0 .../engines/src/audio_engines_1.py | 18 + .../engines/src/audio_engines_2.py | 13 + .../engines/src/extraction_engines_1.py | 24 + .../engines/src/extraction_engines_2.py | 27 + .../engines/src/image_generation_engines_1.py | 23 + .../engines/src/image_generation_engines_2.py | 17 + .../engines/src/image_generation_engines_3.py | 23 + .../engines/src/image_generation_engines_4.py | 26 + .../engines/src/image_generation_engines_5.py | 26 + .../engines/src/image_query_engines_1.py | 13 + .../engines/src/rag_engines_1.py | 47 + .../engines/src/summary_engines_1.py | 20 + .../engines/summary-engines.md | 18 +- docs/griptape-framework/index.md | 165 +- docs/griptape-framework/misc/events.md | 145 +- .../griptape-framework/misc/src}/__init__.py | 0 docs/griptape-framework/misc/src/events_1.py | 37 + docs/griptape-framework/misc/src/events_2.py | 22 + docs/griptape-framework/misc/src/events_3.py | 27 + docs/griptape-framework/misc/src/events_4.py | 22 + docs/griptape-framework/misc/src/events_5.py | 27 + docs/griptape-framework/misc/src/events_6.py | 16 + .../misc/src/tokenizers_1.py | 7 + .../misc/src/tokenizers_2.py | 11 + .../misc/src/tokenizers_3.py | 7 + .../misc/src/tokenizers_4.py | 9 + .../misc/src/tokenizers_5.py | 10 + .../misc/src/tokenizers_6.py | 7 + .../misc/src/tokenizers_7.py | 7 + docs/griptape-framework/misc/tokenizers.md | 70 +- .../griptape-framework/src}/__init__.py | 0 docs/griptape-framework/src/index_1.py | 5 + docs/griptape-framework/src/index_2.py | 4 + docs/griptape-framework/src/index_3.py | 9 + docs/griptape-framework/src/index_4.py | 20 + docs/griptape-framework/structures/agents.md | 32 +- docs/griptape-framework/structures/config.md | 170 -- docs/griptape-framework/structures/configs.md | 89 + .../structures/conversation-memory.md | 51 +- .../structures/observability.md | 43 +- .../structures/pipelines.md | 17 +- .../griptape-framework/structures/rulesets.md | 103 +- .../structures/src}/__init__.py | 0 .../structures/src/agents_1.py | 7 + .../structures/src/agents_2.py | 11 + .../structures/src/conversation_memory_1.py | 6 + .../structures/src/conversation_memory_2.py | 3 + .../structures/src/conversation_memory_3.py | 8 + .../structures/src/conversation_memory_4.py | 13 + .../structures/src/conversation_memory_5.py | 11 + .../structures/src/drivers_config_1.py | 7 + .../structures/src/drivers_config_2.py | 11 + .../structures/src/drivers_config_3.py | 17 + .../structures/src/drivers_config_4.py | 7 + .../structures/src/drivers_config_5.py | 7 + .../structures/src/drivers_config_6.py | 9 + .../structures/src/drivers_config_7.py | 16 + .../structures/src/drivers_config_8.py | 18 + .../structures/src/logging_config.py | 14 + .../structures/src/observability_1.py | 10 + .../structures/src/observability_2.py | 29 + .../structures/src/pipelines_1.py | 13 + .../structures/src/rulesets_1.py | 30 + .../structures/src/rulesets_2.py | 16 + .../structures/src/rulesets_3.py | 32 + .../structures/src/rulesets_4.py | 16 + .../structures/src/task_memory_1.py | 7 + .../structures/src/task_memory_2.py | 7 + .../structures/src/task_memory_3.py | 7 + .../structures/src/task_memory_4.py | 9 + .../structures/src/task_memory_5.py | 13 + .../structures/src/task_memory_6.py | 31 + .../structures/src/task_memory_7.py | 12 + .../structures/src/task_memory_8.py | 14 + .../structures/src/task_memory_9.py | 9 + .../structures/src/tasks_1.py | 12 + .../structures/src/tasks_10.py | 22 + .../structures/src/tasks_11.py | 30 + .../structures/src/tasks_12.py | 35 + .../structures/src/tasks_13.py | 35 + .../structures/src/tasks_14.py | 37 + .../structures/src/tasks_15.py | 34 + .../structures/src/tasks_16.py | 130 + .../structures/src/tasks_17.py | 20 + .../structures/src/tasks_18.py | 17 + .../structures/src/tasks_2.py | 10 + .../structures/src/tasks_3.py | 10 + .../structures/src/tasks_4.py | 13 + .../structures/src/tasks_5.py | 10 + .../structures/src/tasks_6.py | 29 + .../structures/src/tasks_7.py | 29 + .../structures/src/tasks_8.py | 24 + .../structures/src/tasks_9.py | 39 + .../structures/src/workflows_1.py | 34 + .../structures/src/workflows_2.py | 14 + .../structures/src/workflows_3.py | 16 + .../structures/src/workflows_4.py | 19 + .../structures/src/workflows_5.py | 17 + .../structures/src/workflows_6.py | 17 + .../structures/src/workflows_7.py | 18 + .../structures/src/workflows_8.py | 19 + .../structures/src/workflows_9.py | 23 + .../structures/task-memory.md | 411 +-- docs/griptape-framework/structures/tasks.md | 674 +---- .../structures/workflows.md | 209 +- docs/griptape-framework/tools/index.md | 117 +- .../griptape-framework/tools/src}/__init__.py | 0 docs/griptape-framework/tools/src/index_1.py | 18 + docs/griptape-tools/custom-tools/index.md | 40 +- .../custom-tools/src/index_1.py | 20 + .../custom-tools/src/index_2.py | 28 + docs/griptape-tools/index.md | 28 +- .../audio-transcription-client.md | 24 - .../audio-transcription-tool.md | 7 + .../official-tools/aws-iam-client.md | 68 - .../official-tools/aws-iam-tool.md | 64 + .../official-tools/aws-s3-client.md | 66 - .../official-tools/aws-s3-tool.md | 41 + .../{calculator.md => calculator-tool.md} | 17 +- .../official-tools/computer-tool.md | 47 + .../griptape-tools/official-tools/computer.md | 107 - .../{date-time.md => date-time-tool.md} | 15 +- .../official-tools/email-client.md | 22 - .../official-tools/email-tool.md | 13 + .../official-tools/extraction-tool.md | 53 + .../{file-manager.md => file-manager-tool.md} | 27 +- .../official-tools/google-calendar-tool.md | 8 + ...gle-docs-client.md => google-docs-tool.md} | 39 +- ...e-drive-client.md => google-drive-tool.md} | 39 +- ...e-gmail-client.md => google-gmail-tool.md} | 40 +- .../griptape-cloud-knowledge-base-client.md | 25 - .../griptape-cloud-knowledge-base-tool.md | 9 + .../official-tools/image-query-tool.md | 7 + .../inpainting-image-generation-client.md | 32 - .../inpainting-image-generation-tool.md | 7 + .../official-tools/openweather-client.md | 19 - .../official-tools/openweather-tool.md | 7 + .../outpainting-image-generation-client.md | 32 - .../outpainting-image-generation-tool.md | 7 + .../prompt-image-generation-tool.md | 7 + .../official-tools/prompt-summary-tool.md | 105 + .../official-tools/query-tool.md | 85 + .../official-tools/rag-client.md | 76 - .../griptape-tools/official-tools/rag-tool.md | 32 + .../official-tools/rest-api-tool.md | 12 + .../official-tools/sql-client.md | 71 - .../griptape-tools/official-tools/sql-tool.md | 56 + .../src/audio_transcription_tool_1.py | 17 + .../official-tools/src/aws_iam_tool_1.py | 13 + .../official-tools/src/aws_s3_tool_1.py | 13 + .../official-tools/src/calculator_tool_1.py | 8 + .../official-tools/src/computer_tool_1.py | 10 + .../official-tools/src/date_time_tool_1.py | 8 + .../official-tools/src/email_tool_1.py | 11 + .../official-tools/src/extraction_tool_1.py | 28 + .../official-tools/src/file_manager_tool_1.py | 19 + .../google_calendar_tool_1.py} | 23 +- .../official-tools/src/google_docs_tool_1.py | 29 + .../official-tools/src/google_drive_tool_1.py | 29 + .../official-tools/src/google_gmail_tool_1.py | 30 + .../griptape_cloud_knowledge_base_tool_1.py | 18 + .../image_query_tool_1.py} | 18 +- .../src/inpainting_image_generation_tool_1.py | 26 + .../official-tools/src/openweather_tool_1.py | 14 + .../outpainting_image_generation_tool_1.py | 26 + .../prompt_image_generation_tool_1.py} | 16 +- .../src/prompt_summary_tool_1.py | 8 + .../official-tools/src/query_tool_1.py | 6 + .../official-tools/src/rag_tool_1.py | 37 + .../rest_api_tool_1.py} | 29 +- .../official-tools/src/sql_tool_1.py | 28 + .../src/structure_run_tool_1.py | 24 + .../src/text_to_speech_tool_1.py | 20 + .../src/variation_image_generation_tool_1.py | 27 + .../src/variation_image_generation_tool_2.py | 42 + .../official-tools/src/vector_store_tool_1.py | 25 + .../official-tools/src/web_scraper_1.py | 6 + .../official-tools/src/web_scraper_tool_1.py | 6 + .../official-tools/src/web_search_tool_1.py | 22 + .../official-tools/src/web_search_tool_2.py | 28 + ...re-run-client.md => structure-run-tool.md} | 33 +- .../official-tools/task-memory-client.md | 11 - .../official-tools/text-to-speech-client.md | 27 - .../official-tools/text-to-speech-tool.md | 7 + .../variation-image-generation-client.md | 83 - .../variation-image-generation-tool.md | 15 + .../official-tools/vector-store-client.md | 35 - .../official-tools/vector-store-tool.md | 9 + .../official-tools/web-scraper-tool.md | 97 + .../official-tools/web-scraper.md | 76 - .../{web-search.md => web-search-tool.md} | 33 +- docs/griptape-tools/src/index_1.py | 23 + docs/plugins/swagger_ui_plugin.py | 7 +- griptape/artifacts/__init__.py | 2 + griptape/artifacts/json_artifact.py | 21 + griptape/common/prompt_stack/prompt_stack.py | 18 +- griptape/config/__init__.py | 24 - .../config/amazon_bedrock_structure_config.py | 82 - griptape/config/anthropic_structure_config.py | 39 - .../config/azure_openai_structure_config.py | 107 - griptape/config/base_structure_config.py | 83 - griptape/config/cohere_structure_config.py | 39 - griptape/config/google_structure_config.py | 32 - griptape/config/openai_structure_config.py | 60 - griptape/config/structure_config.py | 68 - griptape/configs/__init__.py | 8 + griptape/{config => configs}/base_config.py | 4 +- griptape/configs/defaults_config.py | 23 + griptape/configs/drivers/__init__.py | 20 + .../drivers/amazon_bedrock_drivers_config.py | 60 + .../drivers/anthropic_drivers_config.py | 29 + .../drivers/azure_openai_drivers_config.py | 94 + .../configs/drivers/base_drivers_config.py | 81 + .../configs/drivers/cohere_drivers_config.py | 36 + griptape/configs/drivers/drivers_config.py | 64 + .../configs/drivers/google_drivers_config.py | 24 + .../configs/drivers/openai_drivers_config.py | 44 + griptape/configs/logging/__init__.py | 5 + griptape/configs/logging/logging_config.py | 17 + .../configs/logging/newline_logging_filter.py | 13 + .../logging/truncate_logging_filter.py | 17 + griptape/drivers/__init__.py | 4 + .../base_audio_transcription_driver.py | 10 +- .../openai_audio_transcription_driver.py | 2 +- .../embedding/base_embedding_driver.py | 4 +- .../base_event_listener_driver.py | 14 +- .../griptape_cloud_event_listener_driver.py | 7 +- .../amazon_s3_file_manager_driver.py | 9 +- .../base_image_generation_driver.py | 10 +- .../openai_image_generation_driver.py | 2 +- ...diffusion_image_generation_model_driver.py | 6 +- .../anthropic_image_query_driver.py | 4 +- .../image_query/base_image_query_driver.py | 10 +- .../image_query/openai_image_query_driver.py | 2 +- ...bedrock_claude_image_query_model_driver.py | 4 +- ...zon_dynamodb_conversation_memory_driver.py | 12 +- .../base_conversation_memory_driver.py | 4 +- ...iptape_cloud_conversation_memory_driver.py | 124 + .../local_conversation_memory_driver.py | 15 +- .../redis_conversation_memory_driver.py | 13 +- .../griptape_cloud_observability_driver.py | 6 +- griptape/drivers/prompt/base_prompt_driver.py | 28 +- .../drivers/prompt/cohere_prompt_driver.py | 4 + .../drivers/prompt/google_prompt_driver.py | 12 +- .../drivers/sql/amazon_redshift_sql_driver.py | 3 +- griptape/drivers/sql/sql_driver.py | 1 + .../griptape_cloud_structure_run_driver.py | 6 +- .../base_text_to_speech_driver.py | 9 +- .../openai_text_to_speech_driver.py | 2 +- .../vector/astradb_vector_store_driver.py | 184 ++ .../vector/base_vector_store_driver.py | 49 +- ...loud_knowledge_base_vector_store_driver.py | 3 +- .../vector/local_vector_store_driver.py | 32 +- .../mongodb_atlas_vector_store_driver.py | 4 +- .../vector/opensearch_vector_store_driver.py | 6 +- .../vector/qdrant_vector_store_driver.py | 3 +- .../markdownify_web_scraper_driver.py | 1 + .../trafilatura_web_scraper_driver.py | 13 +- .../duck_duck_go_web_search_driver.py | 4 +- .../web_search/google_web_search_driver.py | 29 +- .../audio/audio_transcription_engine.py | 7 +- .../engines/audio/text_to_speech_engine.py | 8 +- .../extraction/base_extraction_engine.py | 7 +- .../extraction/csv_extraction_engine.py | 55 +- .../extraction/json_extraction_engine.py | 61 +- .../image/base_image_generation_engine.py | 8 +- .../engines/image_query/image_query_engine.py | 8 +- griptape/engines/rag/modules/__init__.py | 6 +- .../engines/rag/modules/base_rag_module.py | 25 +- .../query/translate_query_rag_module.py | 32 + .../response/base_response_rag_module.py | 3 +- .../metadata_before_response_rag_module.py | 25 - .../response/prompt_response_rag_module.py | 34 +- .../rulesets_before_response_rag_module.py | 22 - .../text_chunks_response_rag_module.py | 8 +- .../text_loader_retrieval_rag_module.py | 5 +- .../vector_store_retrieval_rag_module.py | 5 +- griptape/engines/rag/rag_context.py | 14 +- griptape/engines/rag/stages/base_rag_stage.py | 11 +- .../engines/rag/stages/query_rag_stage.py | 6 +- .../engines/rag/stages/response_rag_stage.py | 29 +- .../engines/rag/stages/retrieval_rag_stage.py | 11 +- .../engines/summary/prompt_summary_engine.py | 10 +- griptape/events/__init__.py | 2 + .../event_bus.py} | 26 +- griptape/exceptions/dummy_exception.py | 2 +- griptape/loaders/audio_loader.py | 4 +- griptape/loaders/base_loader.py | 22 +- griptape/loaders/image_loader.py | 4 +- .../structure/base_conversation_memory.py | 9 +- .../structure/summary_conversation_memory.py | 21 +- .../task/storage/base_artifact_storage.py | 10 +- .../task/storage/blob_artifact_storage.py | 10 +- .../task/storage/text_artifact_storage.py | 55 +- griptape/memory/task/task_memory.py | 29 +- griptape/mixins/__init__.py | 6 +- griptape/mixins/activity_mixin.py | 38 +- griptape/mixins/futures_executor_mixin.py | 27 + griptape/mixins/singleton_mixin.py | 10 + griptape/schemas/base_schema.py | 2 +- griptape/structures/agent.py | 21 +- griptape/structures/pipeline.py | 3 + griptape/structures/structure.py | 151 +- griptape/structures/workflow.py | 42 +- griptape/tasks/__init__.py | 4 - griptape/tasks/actions_subtask.py | 39 +- griptape/tasks/audio_transcription_task.py | 22 +- griptape/tasks/base_audio_generation_task.py | 8 +- griptape/tasks/base_audio_input_task.py | 8 +- griptape/tasks/base_image_generation_task.py | 7 +- griptape/tasks/base_multi_text_input_task.py | 8 +- griptape/tasks/base_task.py | 78 +- griptape/tasks/base_text_input_task.py | 8 +- griptape/tasks/csv_extraction_task.py | 24 - griptape/tasks/extraction_task.py | 8 +- griptape/tasks/image_query_task.py | 17 +- .../tasks/inpainting_image_generation_task.py | 24 +- griptape/tasks/json_extraction_task.py | 24 - .../outpainting_image_generation_task.py | 25 +- .../tasks/prompt_image_generation_task.py | 26 +- griptape/tasks/prompt_task.py | 21 +- griptape/tasks/rag_task.py | 33 +- griptape/tasks/text_summary_task.py | 19 +- griptape/tasks/text_to_speech_task.py | 19 +- griptape/tasks/tool_task.py | 1 + griptape/tasks/toolkit_task.py | 2 + .../tasks/variation_image_generation_task.py | 24 +- .../engines/extraction/csv/system.j2 | 7 + .../templates/engines/extraction/csv/user.j2 | 4 + .../engines/extraction/csv_extraction.j2 | 11 - .../engines/extraction/json/system.j2 | 6 + .../{json_extraction.j2 => json/user.j2} | 9 +- .../rag/modules/query/translate/user.j2 | 3 + .../rag/modules/response/prompt/system.j2 | 8 +- .../tokenizers/amazon_bedrock_tokenizer.py | 4 +- griptape/tokenizers/simple_tokenizer.py | 4 +- griptape/tools/__init__.py | 128 +- .../__init__.py | 0 .../manifest.yml | 2 +- .../tool.py | 2 +- .../__init__.py | 0 .../{aws_iam_client => aws_iam}/manifest.yml | 2 +- .../tools/{aws_iam_client => aws_iam}/tool.py | 4 +- .../__init__.py | 0 .../{aws_s3_client => aws_s3}/manifest.yml | 2 +- .../tools/{aws_s3_client => aws_s3}/tool.py | 4 +- .../{base_aws_client.py => base_aws_tool.py} | 2 +- ...e_google_client.py => base_google_tool.py} | 2 +- ..._client.py => base_griptape_cloud_tool.py} | 2 +- ...lient.py => base_image_generation_tool.py} | 2 +- griptape/tools/base_tool.py | 34 +- griptape/tools/calculator/manifest.yml | 4 +- griptape/tools/calculator/tool.py | 2 +- griptape/tools/computer/manifest.yml | 4 +- griptape/tools/computer/tool.py | 2 +- griptape/tools/date_time/manifest.yml | 4 +- griptape/tools/date_time/tool.py | 2 +- .../__init__.py | 0 .../{email_client => email}/manifest.yml | 2 +- .../tools/{email_client => email}/tool.py | 2 +- .../__init__.py | 0 griptape/tools/extraction/manifest.yml | 5 + .../requirements.txt | 0 griptape/tools/extraction/tool.py | 60 + griptape/tools/file_manager/manifest.yml | 4 +- griptape/tools/file_manager/tool.py | 6 +- .../__init__.py | 0 .../manifest.yml | 0 .../requirements.txt | 0 .../{google_cal => google_calendar}/tool.py | 4 +- griptape/tools/google_docs/tool.py | 4 +- griptape/tools/google_drive/tool.py | 4 +- griptape/tools/google_gmail/manifest.yml | 2 +- griptape/tools/google_gmail/tool.py | 4 +- .../__init__.py | 0 .../manifest.yml | 2 +- .../tool.py | 6 +- .../{sql_client => image_query}/__init__.py | 0 .../manifest.yml | 2 +- .../tool.py | 4 +- .../__init__.py | 0 .../manifest.yml | 2 +- .../requirements.txt | 0 .../tool.py | 12 +- .../__init__.py | 0 .../manifest.yml | 2 +- .../tool.py | 2 +- .../__init__.py | 0 .../manifest.yml | 2 +- .../requirements.txt | 0 .../tool.py | 12 +- .../__init__.py | 0 .../manifest.yml | 2 +- .../requirements.txt | 0 .../tool.py | 8 +- .../__init__.py | 0 griptape/tools/prompt_summary/manifest.yml | 5 + .../requirements.txt | 0 griptape/tools/prompt_summary/tool.py | 55 + .../tools/query}/__init__.py | 0 griptape/tools/query/manifest.yml | 5 + .../requirements.txt | 0 griptape/tools/query/tool.py | 85 + griptape/tools/rag/__init__.py | 0 .../tools/{rag_client => rag}/manifest.yml | 2 +- griptape/tools/rag/requirements.txt | 0 griptape/tools/{rag_client => rag}/tool.py | 22 +- griptape/tools/rest_api/__init__.py | 0 .../manifest.yml | 2 +- .../{rest_api_client => rest_api}/tool.py | 10 +- griptape/tools/sql/__init__.py | 0 .../tools/{sql_client => sql}/manifest.yml | 2 +- griptape/tools/{sql_client => sql}/tool.py | 2 +- griptape/tools/structure_run/__init__.py | 0 .../manifest.yml | 2 +- .../tool.py | 10 +- .../tools/task_memory_client/manifest.yml | 5 - griptape/tools/task_memory_client/tool.py | 52 - griptape/tools/text_to_speech/__init__.py | 0 .../manifest.yml | 2 +- .../tool.py | 2 +- .../variation_image_generation/__init__.py | 0 .../manifest.yml | 2 +- .../requirements.txt | 0 .../tool.py | 12 +- griptape/tools/vector_store/__init__.py | 0 .../manifest.yml | 2 +- griptape/tools/vector_store/requirements.txt | 0 .../tool.py | 2 +- griptape/tools/web_scraper/manifest.yml | 4 +- griptape/tools/web_scraper/tool.py | 2 +- griptape/tools/web_search/manifest.yml | 6 +- griptape/tools/web_search/tool.py | 12 +- griptape/utils/__init__.py | 4 +- griptape/utils/chat.py | 20 +- griptape/utils/conversation.py | 23 +- griptape/utils/decorators.py | 24 +- griptape/utils/futures.py | 6 + griptape/utils/j2.py | 3 +- griptape/utils/load_artifact_from_memory.py | 11 +- griptape/utils/stream.py | 18 +- griptape/utils/structure_visualizer.py | 3 +- griptape/utils/token_counter.py | 6 +- mkdocs.yml | 61 +- poetry.lock | 2653 +++++++++-------- pyproject.toml | 20 +- .../test_astra_db_vector_store_driver.py | 148 + tests/integration/tasks/test_tool_task.py | 4 +- tests/integration/tasks/test_toolkit_task.py | 8 +- tests/integration/test_code_blocks.py | 51 +- ..._calculator.py => test_calculator_tool.py} | 4 +- ...e_manager.py => test_file_manager_tool.py} | 4 +- ...ocs_client.py => test_google_docs_tool.py} | 6 +- ...ve_client.py => test_google_drive_tool.py} | 6 +- tests/mocks/docker/fake_api.py | 8 +- tests/mocks/mock_drivers_config.py | 32 + tests/mocks/mock_embedding_driver.py | 5 +- tests/mocks/mock_failing_prompt_driver.py | 11 +- tests/mocks/mock_futures_executor.py | 4 + tests/mocks/mock_image_generation_driver.py | 2 + tests/mocks/mock_structure_config.py | 23 - tests/unit/artifacts/test_json_artifact.py | 29 + tests/unit/common/test_observable.py | 3 + tests/unit/common/test_prompt_stack.py | 6 + tests/unit/config/test_structure_config.py | 97 - tests/unit/configs/__init__.py | 0 tests/unit/configs/drivers/__init__.py | 0 .../test_amazon_bedrock_drivers_config.py} | 17 +- .../drivers/test_anthropic_drivers_config.py} | 10 +- .../test_azure_openai_drivers_config.py} | 21 +- .../drivers/test_cohere_drivers_config.py} | 8 +- .../configs/drivers/test_drivers_config.py | 70 + .../drivers/test_google_drivers_config.py} | 10 +- .../drivers/test_openai_driver_config.py} | 10 +- tests/unit/configs/logging/__init__.py | 0 .../logging/test_newline_logging_filter.py | 20 + .../logging/test_truncate_logging_filter.py | 20 + tests/unit/configs/test_defaults_config.py | 14 + tests/unit/conftest.py | 29 + .../test_base_audio_transcription_driver.py | 6 +- .../test_base_event_listener_driver.py | 2 - ...st_griptape_cloud_event_listener_driver.py | 4 - .../test_base_image_generation_driver.py | 9 +- .../test_base_image_query_driver.py | 4 +- ...est_dynamodb_conversation_memory_driver.py | 13 +- ...iptape_cloud_conversation_memory_driver.py | 91 + .../test_local_conversation_memory_driver.py | 10 +- ...est_open_telemetry_observability_driver.py | 3 +- .../test_amazon_bedrock_prompt_driver.py | 5 + .../prompt/test_anthropic_prompt_driver.py | 5 + .../drivers/prompt/test_base_prompt_driver.py | 34 +- .../prompt/test_google_prompt_driver.py | 41 +- ...est_hugging_face_pipeline_prompt_driver.py | 3 +- .../prompt/test_ollama_prompt_driver.py | 5 + .../prompt/test_openai_chat_prompt_driver.py | 5 + .../drivers/sql/test_snowflake_sql_driver.py | 16 +- .../test_local_structure_run_driver.py | 7 +- .../test_base_audio_transcription_driver.py | 4 +- .../test_astra_db_vector_store_driver.py | 136 + .../vector/test_local_vector_store_driver.py | 13 + .../vector/test_qdrant_vector_store_driver.py | 3 +- .../test_duck_duck_go_web_search_driver.py | 15 +- .../extraction/test_csv_extraction_engine.py | 5 +- .../extraction/test_json_extraction_engine.py | 12 +- tests/unit/engines/query/__init__.py | 0 .../query/test_translate_query_rag_module.py | 10 + ...est_footnote_prompt_response_rag_module.py | 5 +- ...est_metadata_before_response_rag_module.py | 21 - .../test_prompt_response_rag_module.py | 11 +- ...est_rulesets_before_response_rag_module.py | 10 - .../test_text_chunks_response_rag_module.py | 2 +- .../test_vector_store_retrieval_rag_module.py | 10 +- .../rag/modules/test_base_rag_nodule.py | 10 +- tests/unit/engines/rag/test_rag_engine.py | 18 +- .../summary/test_prompt_summary_engine.py | 6 +- tests/unit/events/test_event_bus.py | 50 + tests/unit/events/test_event_listener.py | 60 +- .../test_finish_actions_subtask_event.py | 3 +- tests/unit/events/test_finish_task_event.py | 3 +- .../test_start_actions_subtask_event.py | 3 +- tests/unit/events/test_start_task_event.py | 3 +- .../structure/test_conversation_memory.py | 22 +- .../test_summary_conversation_memory.py | 11 +- .../storage/test_blob_artifact_storage.py | 10 - .../storage/test_text_artifact_storage.py | 10 - tests/unit/memory/tool/test_task_memory.py | 10 - tests/unit/mixins/test_activity_mixin.py | 34 +- tests/unit/mixins/test_events_mixin.py | 59 - .../mixins/test_futures_executor_mixin.py | 10 + tests/unit/structures/test_agent.py | 40 +- tests/unit/structures/test_pipeline.py | 78 +- tests/unit/structures/test_workflow.py | 155 +- .../tasks/test_audio_transcription_task.py | 6 +- .../tasks/test_base_multi_text_input_task.py | 3 +- tests/unit/tasks/test_base_task.py | 84 +- tests/unit/tasks/test_base_text_input_task.py | 3 +- tests/unit/tasks/test_code_execution_task.py | 3 +- tests/unit/tasks/test_csv_extraction_task.py | 33 - tests/unit/tasks/test_extraction_task.py | 5 +- tests/unit/tasks/test_image_query_task.py | 9 +- .../test_inpainting_image_generation_task.py | 9 +- tests/unit/tasks/test_json_extraction_task.py | 38 - .../test_outpainting_image_generation_task.py | 9 +- .../test_prompt_image_generation_task.py | 11 +- tests/unit/tasks/test_prompt_task.py | 13 +- tests/unit/tasks/test_rag_task.py | 2 +- tests/unit/tasks/test_structure_run_task.py | 8 +- tests/unit/tasks/test_text_summary_task.py | 13 +- tests/unit/tasks/test_text_to_speech_task.py | 6 +- tests/unit/tasks/test_tool_task.py | 10 +- tests/unit/tasks/test_toolkit_task.py | 16 +- .../test_variation_image_generation_task.py | 9 +- .../{test_aws_iam.py => test_aws_iam_tool.py} | 12 +- .../{test_aws_s3.py => test_aws_s3_tool.py} | 25 +- tests/unit/tools/test_calculator.py | 4 +- tests/unit/tools/test_computer.py | 4 +- tests/unit/tools/test_date_time.py | 12 +- ...est_email_client.py => test_email_tool.py} | 12 +- tests/unit/tools/test_extraction_tool.py | 67 + tests/unit/tools/test_file_manager.py | 22 +- ...ocs_client.py => test_google_docs_tool.py} | 6 +- ...ve_client.py => test_google_drive_tool.py} | 18 +- ...il_client.py => test_google_gmail_tool.py} | 6 +- ...est_griptape_cloud_knowledge_base_tool.py} | 20 +- ... test_inpainting_image_generation_tool.py} | 14 +- ...her_client.py => test_openweather_tool.py} | 8 +- ... test_outpainting_image_variation_tool.py} | 14 +- ...y => test_prompt_image_generation_tool.py} | 12 +- tests/unit/tools/test_prompt_summary_tool.py | 29 + tests/unit/tools/test_query_tool.py | 31 + .../{test_rag_client.py => test_rag_tool.py} | 8 +- ...st_api_client.py => test_rest_api_tool.py} | 4 +- .../{test_sql_client.py => test_sql_tool.py} | 8 +- ...n_client.py => test_structure_run_tool.py} | 10 +- tests/unit/tools/test_task_memory_client.py | 29 - ..._client.py => test_text_to_speech_tool.py} | 12 +- ...n_client.py => test_transcription_tool.py} | 10 +- ...> test_variation_image_generation_tool.py} | 14 +- ...re_client.py => test_vector_store_tool.py} | 18 +- tests/unit/tools/test_web_scraper.py | 4 +- tests/unit/tools/test_web_search.py | 14 +- tests/unit/utils/test_chat.py | 27 +- tests/unit/utils/test_conversation.py | 10 +- tests/unit/utils/test_file_utils.py | 5 +- tests/unit/utils/test_futures.py | 17 +- tests/unit/utils/test_stream.py | 13 +- tests/unit/utils/test_structure_visualizer.py | 5 +- tests/utils/code_blocks.py | 83 - tests/utils/defaults.py | 9 +- tests/utils/structure_tester.py | 16 +- tests/utils/test_reference_utils.py | 3 +- 734 files changed, 11453 insertions(+), 9647 deletions(-) create mode 100644 docs/examples/query-webpage-astra-db.md create mode 100644 docs/examples/src/amazon_dynamodb_sessions_1.py create mode 100644 docs/examples/src/load_and_query_pinecone_1.py create mode 100644 docs/examples/src/load_query_and_chat_marqo_1.py create mode 100644 docs/examples/src/multi_agent_workflow_1.py create mode 100644 docs/examples/src/multiple_agent_shared_memory_1.py create mode 100644 docs/examples/src/query_webpage_1.py create mode 100644 docs/examples/src/query_webpage_astra_db_1.py create mode 100644 docs/examples/src/talk_to_a_pdf_1.py create mode 100644 docs/examples/src/talk_to_a_video_1.py create mode 100644 docs/examples/src/talk_to_a_webpage_1.py create mode 100644 docs/examples/src/talk_to_redshift_1.py rename {griptape/tools/audio_transcription_client => docs/griptape-framework/data/src}/__init__.py (100%) create mode 100644 docs/griptape-framework/data/src/chunkers_1.py create mode 100644 docs/griptape-framework/data/src/loaders_1.py create mode 100644 docs/griptape-framework/data/src/loaders_10.py create mode 100644 docs/griptape-framework/data/src/loaders_2.py create mode 100644 docs/griptape-framework/data/src/loaders_3.py create mode 100644 docs/griptape-framework/data/src/loaders_4.py create mode 100644 docs/griptape-framework/data/src/loaders_5.py create mode 100644 docs/griptape-framework/data/src/loaders_6.py create mode 100644 docs/griptape-framework/data/src/loaders_7.py create mode 100644 docs/griptape-framework/data/src/loaders_8.py create mode 100644 docs/griptape-framework/data/src/loaders_9.py create mode 100644 docs/griptape-framework/drivers/src/audio_transcription_drivers_1.py create mode 100644 docs/griptape-framework/drivers/src/conversation_memory_drivers_1.py create mode 100644 docs/griptape-framework/drivers/src/conversation_memory_drivers_2.py create mode 100644 docs/griptape-framework/drivers/src/conversation_memory_drivers_3.py create mode 100644 docs/griptape-framework/drivers/src/conversation_memory_drivers_griptape_cloud.py create mode 100644 docs/griptape-framework/drivers/src/embedding_drivers_1.py create mode 100644 docs/griptape-framework/drivers/src/embedding_drivers_10.py create mode 100644 docs/griptape-framework/drivers/src/embedding_drivers_2.py create mode 100644 docs/griptape-framework/drivers/src/embedding_drivers_3.py create mode 100644 docs/griptape-framework/drivers/src/embedding_drivers_4.py create mode 100644 docs/griptape-framework/drivers/src/embedding_drivers_5.py create mode 100644 docs/griptape-framework/drivers/src/embedding_drivers_6.py create mode 100644 docs/griptape-framework/drivers/src/embedding_drivers_7.py create mode 100644 docs/griptape-framework/drivers/src/embedding_drivers_8.py create mode 100644 docs/griptape-framework/drivers/src/embedding_drivers_9.py create mode 100644 docs/griptape-framework/drivers/src/event_listener_drivers_1.py create mode 100644 docs/griptape-framework/drivers/src/event_listener_drivers_2.py create mode 100644 docs/griptape-framework/drivers/src/event_listener_drivers_3.py create mode 100644 docs/griptape-framework/drivers/src/event_listener_drivers_4.py create mode 100644 docs/griptape-framework/drivers/src/event_listener_drivers_5.py create mode 100644 docs/griptape-framework/drivers/src/event_listener_drivers_6.py create mode 100644 docs/griptape-framework/drivers/src/event_listener_drivers_7.py create mode 100644 docs/griptape-framework/drivers/src/image_generation_drivers_1.py create mode 100644 docs/griptape-framework/drivers/src/image_generation_drivers_2.py create mode 100644 docs/griptape-framework/drivers/src/image_generation_drivers_3.py create mode 100644 docs/griptape-framework/drivers/src/image_generation_drivers_4.py create mode 100644 docs/griptape-framework/drivers/src/image_generation_drivers_5.py create mode 100644 docs/griptape-framework/drivers/src/image_generation_drivers_6.py create mode 100644 docs/griptape-framework/drivers/src/image_generation_drivers_7.py create mode 100644 docs/griptape-framework/drivers/src/image_generation_drivers_8.py create mode 100644 docs/griptape-framework/drivers/src/image_generation_drivers_9.py create mode 100644 docs/griptape-framework/drivers/src/image_query_drivers_1.py create mode 100644 docs/griptape-framework/drivers/src/image_query_drivers_2.py create mode 100644 docs/griptape-framework/drivers/src/image_query_drivers_3.py create mode 100644 docs/griptape-framework/drivers/src/image_query_drivers_4.py create mode 100644 docs/griptape-framework/drivers/src/image_query_drivers_5.py create mode 100644 docs/griptape-framework/drivers/src/observability_drivers_1.py create mode 100644 docs/griptape-framework/drivers/src/observability_drivers_2.py create mode 100644 docs/griptape-framework/drivers/src/prompt_drivers_1.py create mode 100644 docs/griptape-framework/drivers/src/prompt_drivers_10.py create mode 100644 docs/griptape-framework/drivers/src/prompt_drivers_11.py create mode 100644 docs/griptape-framework/drivers/src/prompt_drivers_12.py create mode 100644 docs/griptape-framework/drivers/src/prompt_drivers_13.py create mode 100644 docs/griptape-framework/drivers/src/prompt_drivers_14.py create mode 100644 docs/griptape-framework/drivers/src/prompt_drivers_2.py create mode 100644 docs/griptape-framework/drivers/src/prompt_drivers_3.py create mode 100644 docs/griptape-framework/drivers/src/prompt_drivers_4.py create mode 100644 docs/griptape-framework/drivers/src/prompt_drivers_5.py create mode 100644 docs/griptape-framework/drivers/src/prompt_drivers_6.py create mode 100644 docs/griptape-framework/drivers/src/prompt_drivers_7.py create mode 100644 docs/griptape-framework/drivers/src/prompt_drivers_8.py create mode 100644 docs/griptape-framework/drivers/src/prompt_drivers_9.py create mode 100644 docs/griptape-framework/drivers/src/sql_drivers_1.py create mode 100644 docs/griptape-framework/drivers/src/sql_drivers_2.py create mode 100644 docs/griptape-framework/drivers/src/sql_drivers_3.py create mode 100644 docs/griptape-framework/drivers/src/structure_run_drivers_1.py create mode 100644 docs/griptape-framework/drivers/src/structure_run_drivers_2.py create mode 100644 docs/griptape-framework/drivers/src/text_to_speech_drivers_1.py create mode 100644 docs/griptape-framework/drivers/src/text_to_speech_drivers_2.py create mode 100644 docs/griptape-framework/drivers/src/vector_store_drivers_1.py create mode 100644 docs/griptape-framework/drivers/src/vector_store_drivers_10.py create mode 100644 docs/griptape-framework/drivers/src/vector_store_drivers_11.py create mode 100644 docs/griptape-framework/drivers/src/vector_store_drivers_2.py create mode 100644 docs/griptape-framework/drivers/src/vector_store_drivers_3.py create mode 100644 docs/griptape-framework/drivers/src/vector_store_drivers_4.py create mode 100644 docs/griptape-framework/drivers/src/vector_store_drivers_5.py create mode 100644 docs/griptape-framework/drivers/src/vector_store_drivers_6.py create mode 100644 docs/griptape-framework/drivers/src/vector_store_drivers_7.py create mode 100644 docs/griptape-framework/drivers/src/vector_store_drivers_8.py create mode 100644 docs/griptape-framework/drivers/src/vector_store_drivers_9.py create mode 100644 docs/griptape-framework/drivers/src/web_scraper_drivers_1.py create mode 100644 docs/griptape-framework/drivers/src/web_scraper_drivers_2.py create mode 100644 docs/griptape-framework/drivers/src/web_scraper_drivers_3.py create mode 100644 docs/griptape-framework/drivers/src/web_scraper_drivers_4.py create mode 100644 docs/griptape-framework/drivers/src/web_search_drivers_1.py create mode 100644 docs/griptape-framework/drivers/src/web_search_drivers_2.py create mode 100644 docs/griptape-framework/drivers/src/web_search_drivers_3.py rename {griptape/tools/aws_iam_client => docs/griptape-framework/engines/src}/__init__.py (100%) create mode 100644 docs/griptape-framework/engines/src/audio_engines_1.py create mode 100644 docs/griptape-framework/engines/src/audio_engines_2.py create mode 100644 docs/griptape-framework/engines/src/extraction_engines_1.py create mode 100644 docs/griptape-framework/engines/src/extraction_engines_2.py create mode 100644 docs/griptape-framework/engines/src/image_generation_engines_1.py create mode 100644 docs/griptape-framework/engines/src/image_generation_engines_2.py create mode 100644 docs/griptape-framework/engines/src/image_generation_engines_3.py create mode 100644 docs/griptape-framework/engines/src/image_generation_engines_4.py create mode 100644 docs/griptape-framework/engines/src/image_generation_engines_5.py create mode 100644 docs/griptape-framework/engines/src/image_query_engines_1.py create mode 100644 docs/griptape-framework/engines/src/rag_engines_1.py create mode 100644 docs/griptape-framework/engines/src/summary_engines_1.py rename {griptape/tools/aws_s3_client => docs/griptape-framework/misc/src}/__init__.py (100%) create mode 100644 docs/griptape-framework/misc/src/events_1.py create mode 100644 docs/griptape-framework/misc/src/events_2.py create mode 100644 docs/griptape-framework/misc/src/events_3.py create mode 100644 docs/griptape-framework/misc/src/events_4.py create mode 100644 docs/griptape-framework/misc/src/events_5.py create mode 100644 docs/griptape-framework/misc/src/events_6.py create mode 100644 docs/griptape-framework/misc/src/tokenizers_1.py create mode 100644 docs/griptape-framework/misc/src/tokenizers_2.py create mode 100644 docs/griptape-framework/misc/src/tokenizers_3.py create mode 100644 docs/griptape-framework/misc/src/tokenizers_4.py create mode 100644 docs/griptape-framework/misc/src/tokenizers_5.py create mode 100644 docs/griptape-framework/misc/src/tokenizers_6.py create mode 100644 docs/griptape-framework/misc/src/tokenizers_7.py rename {griptape/tools/email_client => docs/griptape-framework/src}/__init__.py (100%) create mode 100644 docs/griptape-framework/src/index_1.py create mode 100644 docs/griptape-framework/src/index_2.py create mode 100644 docs/griptape-framework/src/index_3.py create mode 100644 docs/griptape-framework/src/index_4.py delete mode 100644 docs/griptape-framework/structures/config.md create mode 100644 docs/griptape-framework/structures/configs.md rename {griptape/tools/google_cal => docs/griptape-framework/structures/src}/__init__.py (100%) create mode 100644 docs/griptape-framework/structures/src/agents_1.py create mode 100644 docs/griptape-framework/structures/src/agents_2.py create mode 100644 docs/griptape-framework/structures/src/conversation_memory_1.py create mode 100644 docs/griptape-framework/structures/src/conversation_memory_2.py create mode 100644 docs/griptape-framework/structures/src/conversation_memory_3.py create mode 100644 docs/griptape-framework/structures/src/conversation_memory_4.py create mode 100644 docs/griptape-framework/structures/src/conversation_memory_5.py create mode 100644 docs/griptape-framework/structures/src/drivers_config_1.py create mode 100644 docs/griptape-framework/structures/src/drivers_config_2.py create mode 100644 docs/griptape-framework/structures/src/drivers_config_3.py create mode 100644 docs/griptape-framework/structures/src/drivers_config_4.py create mode 100644 docs/griptape-framework/structures/src/drivers_config_5.py create mode 100644 docs/griptape-framework/structures/src/drivers_config_6.py create mode 100644 docs/griptape-framework/structures/src/drivers_config_7.py create mode 100644 docs/griptape-framework/structures/src/drivers_config_8.py create mode 100644 docs/griptape-framework/structures/src/logging_config.py create mode 100644 docs/griptape-framework/structures/src/observability_1.py create mode 100644 docs/griptape-framework/structures/src/observability_2.py create mode 100644 docs/griptape-framework/structures/src/pipelines_1.py create mode 100644 docs/griptape-framework/structures/src/rulesets_1.py create mode 100644 docs/griptape-framework/structures/src/rulesets_2.py create mode 100644 docs/griptape-framework/structures/src/rulesets_3.py create mode 100644 docs/griptape-framework/structures/src/rulesets_4.py create mode 100644 docs/griptape-framework/structures/src/task_memory_1.py create mode 100644 docs/griptape-framework/structures/src/task_memory_2.py create mode 100644 docs/griptape-framework/structures/src/task_memory_3.py create mode 100644 docs/griptape-framework/structures/src/task_memory_4.py create mode 100644 docs/griptape-framework/structures/src/task_memory_5.py create mode 100644 docs/griptape-framework/structures/src/task_memory_6.py create mode 100644 docs/griptape-framework/structures/src/task_memory_7.py create mode 100644 docs/griptape-framework/structures/src/task_memory_8.py create mode 100644 docs/griptape-framework/structures/src/task_memory_9.py create mode 100644 docs/griptape-framework/structures/src/tasks_1.py create mode 100644 docs/griptape-framework/structures/src/tasks_10.py create mode 100644 docs/griptape-framework/structures/src/tasks_11.py create mode 100644 docs/griptape-framework/structures/src/tasks_12.py create mode 100644 docs/griptape-framework/structures/src/tasks_13.py create mode 100644 docs/griptape-framework/structures/src/tasks_14.py create mode 100644 docs/griptape-framework/structures/src/tasks_15.py create mode 100644 docs/griptape-framework/structures/src/tasks_16.py create mode 100644 docs/griptape-framework/structures/src/tasks_17.py create mode 100644 docs/griptape-framework/structures/src/tasks_18.py create mode 100644 docs/griptape-framework/structures/src/tasks_2.py create mode 100644 docs/griptape-framework/structures/src/tasks_3.py create mode 100644 docs/griptape-framework/structures/src/tasks_4.py create mode 100644 docs/griptape-framework/structures/src/tasks_5.py create mode 100644 docs/griptape-framework/structures/src/tasks_6.py create mode 100644 docs/griptape-framework/structures/src/tasks_7.py create mode 100644 docs/griptape-framework/structures/src/tasks_8.py create mode 100644 docs/griptape-framework/structures/src/tasks_9.py create mode 100644 docs/griptape-framework/structures/src/workflows_1.py create mode 100644 docs/griptape-framework/structures/src/workflows_2.py create mode 100644 docs/griptape-framework/structures/src/workflows_3.py create mode 100644 docs/griptape-framework/structures/src/workflows_4.py create mode 100644 docs/griptape-framework/structures/src/workflows_5.py create mode 100644 docs/griptape-framework/structures/src/workflows_6.py create mode 100644 docs/griptape-framework/structures/src/workflows_7.py create mode 100644 docs/griptape-framework/structures/src/workflows_8.py create mode 100644 docs/griptape-framework/structures/src/workflows_9.py rename {griptape/tools/griptape_cloud_knowledge_base_client => docs/griptape-framework/tools/src}/__init__.py (100%) create mode 100644 docs/griptape-framework/tools/src/index_1.py create mode 100644 docs/griptape-tools/custom-tools/src/index_1.py create mode 100644 docs/griptape-tools/custom-tools/src/index_2.py delete mode 100644 docs/griptape-tools/official-tools/audio-transcription-client.md create mode 100644 docs/griptape-tools/official-tools/audio-transcription-tool.md delete mode 100644 docs/griptape-tools/official-tools/aws-iam-client.md create mode 100644 docs/griptape-tools/official-tools/aws-iam-tool.md delete mode 100644 docs/griptape-tools/official-tools/aws-s3-client.md create mode 100644 docs/griptape-tools/official-tools/aws-s3-tool.md rename docs/griptape-tools/official-tools/{calculator.md => calculator-tool.md} (71%) create mode 100644 docs/griptape-tools/official-tools/computer-tool.md delete mode 100644 docs/griptape-tools/official-tools/computer.md rename docs/griptape-tools/official-tools/{date-time.md => date-time-tool.md} (83%) delete mode 100644 docs/griptape-tools/official-tools/email-client.md create mode 100644 docs/griptape-tools/official-tools/email-tool.md create mode 100644 docs/griptape-tools/official-tools/extraction-tool.md rename docs/griptape-tools/official-tools/{file-manager.md => file-manager-tool.md} (70%) create mode 100644 docs/griptape-tools/official-tools/google-calendar-tool.md rename docs/griptape-tools/official-tools/{google-docs-client.md => google-docs-tool.md} (54%) rename docs/griptape-tools/official-tools/{google-drive-client.md => google-drive-tool.md} (50%) rename docs/griptape-tools/official-tools/{google-gmail-client.md => google-gmail-tool.md} (52%) delete mode 100644 docs/griptape-tools/official-tools/griptape-cloud-knowledge-base-client.md create mode 100644 docs/griptape-tools/official-tools/griptape-cloud-knowledge-base-tool.md create mode 100644 docs/griptape-tools/official-tools/image-query-tool.md delete mode 100644 docs/griptape-tools/official-tools/inpainting-image-generation-client.md create mode 100644 docs/griptape-tools/official-tools/inpainting-image-generation-tool.md delete mode 100644 docs/griptape-tools/official-tools/openweather-client.md create mode 100644 docs/griptape-tools/official-tools/openweather-tool.md delete mode 100644 docs/griptape-tools/official-tools/outpainting-image-generation-client.md create mode 100644 docs/griptape-tools/official-tools/outpainting-image-generation-tool.md create mode 100644 docs/griptape-tools/official-tools/prompt-image-generation-tool.md create mode 100644 docs/griptape-tools/official-tools/prompt-summary-tool.md create mode 100644 docs/griptape-tools/official-tools/query-tool.md delete mode 100644 docs/griptape-tools/official-tools/rag-client.md create mode 100644 docs/griptape-tools/official-tools/rag-tool.md create mode 100644 docs/griptape-tools/official-tools/rest-api-tool.md delete mode 100644 docs/griptape-tools/official-tools/sql-client.md create mode 100644 docs/griptape-tools/official-tools/sql-tool.md create mode 100644 docs/griptape-tools/official-tools/src/audio_transcription_tool_1.py create mode 100644 docs/griptape-tools/official-tools/src/aws_iam_tool_1.py create mode 100644 docs/griptape-tools/official-tools/src/aws_s3_tool_1.py create mode 100644 docs/griptape-tools/official-tools/src/calculator_tool_1.py create mode 100644 docs/griptape-tools/official-tools/src/computer_tool_1.py create mode 100644 docs/griptape-tools/official-tools/src/date_time_tool_1.py create mode 100644 docs/griptape-tools/official-tools/src/email_tool_1.py create mode 100644 docs/griptape-tools/official-tools/src/extraction_tool_1.py create mode 100644 docs/griptape-tools/official-tools/src/file_manager_tool_1.py rename docs/griptape-tools/official-tools/{google-cal-client.md => src/google_calendar_tool_1.py} (66%) create mode 100644 docs/griptape-tools/official-tools/src/google_docs_tool_1.py create mode 100644 docs/griptape-tools/official-tools/src/google_drive_tool_1.py create mode 100644 docs/griptape-tools/official-tools/src/google_gmail_tool_1.py create mode 100644 docs/griptape-tools/official-tools/src/griptape_cloud_knowledge_base_tool_1.py rename docs/griptape-tools/official-tools/{image-query-client.md => src/image_query_tool_1.py} (59%) create mode 100644 docs/griptape-tools/official-tools/src/inpainting_image_generation_tool_1.py create mode 100644 docs/griptape-tools/official-tools/src/openweather_tool_1.py create mode 100644 docs/griptape-tools/official-tools/src/outpainting_image_generation_tool_1.py rename docs/griptape-tools/official-tools/{prompt-image-generation-client.md => src/prompt_image_generation_tool_1.py} (73%) create mode 100644 docs/griptape-tools/official-tools/src/prompt_summary_tool_1.py create mode 100644 docs/griptape-tools/official-tools/src/query_tool_1.py create mode 100644 docs/griptape-tools/official-tools/src/rag_tool_1.py rename docs/griptape-tools/official-tools/{rest-api-client.md => src/rest_api_tool_1.py} (87%) create mode 100644 docs/griptape-tools/official-tools/src/sql_tool_1.py create mode 100644 docs/griptape-tools/official-tools/src/structure_run_tool_1.py create mode 100644 docs/griptape-tools/official-tools/src/text_to_speech_tool_1.py create mode 100644 docs/griptape-tools/official-tools/src/variation_image_generation_tool_1.py create mode 100644 docs/griptape-tools/official-tools/src/variation_image_generation_tool_2.py create mode 100644 docs/griptape-tools/official-tools/src/vector_store_tool_1.py create mode 100644 docs/griptape-tools/official-tools/src/web_scraper_1.py create mode 100644 docs/griptape-tools/official-tools/src/web_scraper_tool_1.py create mode 100644 docs/griptape-tools/official-tools/src/web_search_tool_1.py create mode 100644 docs/griptape-tools/official-tools/src/web_search_tool_2.py rename docs/griptape-tools/official-tools/{structure-run-client.md => structure-run-tool.md} (74%) delete mode 100644 docs/griptape-tools/official-tools/task-memory-client.md delete mode 100644 docs/griptape-tools/official-tools/text-to-speech-client.md create mode 100644 docs/griptape-tools/official-tools/text-to-speech-tool.md delete mode 100644 docs/griptape-tools/official-tools/variation-image-generation-client.md create mode 100644 docs/griptape-tools/official-tools/variation-image-generation-tool.md delete mode 100644 docs/griptape-tools/official-tools/vector-store-client.md create mode 100644 docs/griptape-tools/official-tools/vector-store-tool.md create mode 100644 docs/griptape-tools/official-tools/web-scraper-tool.md delete mode 100644 docs/griptape-tools/official-tools/web-scraper.md rename docs/griptape-tools/official-tools/{web-search.md => web-search-tool.md} (91%) create mode 100644 docs/griptape-tools/src/index_1.py create mode 100644 griptape/artifacts/json_artifact.py delete mode 100644 griptape/config/__init__.py delete mode 100644 griptape/config/amazon_bedrock_structure_config.py delete mode 100644 griptape/config/anthropic_structure_config.py delete mode 100644 griptape/config/azure_openai_structure_config.py delete mode 100644 griptape/config/base_structure_config.py delete mode 100644 griptape/config/cohere_structure_config.py delete mode 100644 griptape/config/google_structure_config.py delete mode 100644 griptape/config/openai_structure_config.py delete mode 100644 griptape/config/structure_config.py create mode 100644 griptape/configs/__init__.py rename griptape/{config => configs}/base_config.py (73%) create mode 100644 griptape/configs/defaults_config.py create mode 100644 griptape/configs/drivers/__init__.py create mode 100644 griptape/configs/drivers/amazon_bedrock_drivers_config.py create mode 100644 griptape/configs/drivers/anthropic_drivers_config.py create mode 100644 griptape/configs/drivers/azure_openai_drivers_config.py create mode 100644 griptape/configs/drivers/base_drivers_config.py create mode 100644 griptape/configs/drivers/cohere_drivers_config.py create mode 100644 griptape/configs/drivers/drivers_config.py create mode 100644 griptape/configs/drivers/google_drivers_config.py create mode 100644 griptape/configs/drivers/openai_drivers_config.py create mode 100644 griptape/configs/logging/__init__.py create mode 100644 griptape/configs/logging/logging_config.py create mode 100644 griptape/configs/logging/newline_logging_filter.py create mode 100644 griptape/configs/logging/truncate_logging_filter.py create mode 100644 griptape/drivers/memory/conversation/griptape_cloud_conversation_memory_driver.py create mode 100644 griptape/drivers/vector/astradb_vector_store_driver.py create mode 100644 griptape/engines/rag/modules/query/translate_query_rag_module.py delete mode 100644 griptape/engines/rag/modules/response/metadata_before_response_rag_module.py delete mode 100644 griptape/engines/rag/modules/response/rulesets_before_response_rag_module.py rename griptape/{mixins/event_publisher_mixin.py => events/event_bus.py} (56%) create mode 100644 griptape/mixins/futures_executor_mixin.py create mode 100644 griptape/mixins/singleton_mixin.py delete mode 100644 griptape/tasks/csv_extraction_task.py delete mode 100644 griptape/tasks/json_extraction_task.py create mode 100644 griptape/templates/engines/extraction/csv/system.j2 create mode 100644 griptape/templates/engines/extraction/csv/user.j2 delete mode 100644 griptape/templates/engines/extraction/csv_extraction.j2 create mode 100644 griptape/templates/engines/extraction/json/system.j2 rename griptape/templates/engines/extraction/{json_extraction.j2 => json/user.j2} (56%) create mode 100644 griptape/templates/engines/rag/modules/query/translate/user.j2 rename griptape/tools/{image_query_client => audio_transcription}/__init__.py (100%) rename griptape/tools/{audio_transcription_client => audio_transcription}/manifest.yml (84%) rename griptape/tools/{audio_transcription_client => audio_transcription}/tool.py (98%) rename griptape/tools/{inpainting_image_generation_client => aws_iam}/__init__.py (100%) rename griptape/tools/{aws_iam_client => aws_iam}/manifest.yml (86%) rename griptape/tools/{aws_iam_client => aws_iam}/tool.py (97%) rename griptape/tools/{openweather_client => aws_s3}/__init__.py (100%) rename griptape/tools/{aws_s3_client => aws_s3}/manifest.yml (86%) rename griptape/tools/{aws_s3_client => aws_s3}/tool.py (99%) rename griptape/tools/{base_aws_client.py => base_aws_tool.py} (95%) rename griptape/tools/{base_google_client.py => base_google_tool.py} (98%) rename griptape/tools/{base_griptape_cloud_client.py => base_griptape_cloud_tool.py} (93%) rename griptape/tools/{base_image_generation_client.py => base_image_generation_tool.py} (88%) rename griptape/tools/{outpainting_image_generation_client => email}/__init__.py (100%) rename griptape/tools/{email_client => email}/manifest.yml (87%) rename griptape/tools/{email_client => email}/tool.py (99%) rename griptape/tools/{prompt_image_generation_client => extraction}/__init__.py (100%) create mode 100644 griptape/tools/extraction/manifest.yml rename griptape/tools/{inpainting_image_generation_client => extraction}/requirements.txt (100%) create mode 100644 griptape/tools/extraction/tool.py rename griptape/tools/{rag_client => google_calendar}/__init__.py (100%) rename griptape/tools/{google_cal => google_calendar}/manifest.yml (100%) rename griptape/tools/{google_cal => google_calendar}/requirements.txt (100%) rename griptape/tools/{google_cal => google_calendar}/tool.py (98%) rename griptape/tools/{rest_api_client => griptape_cloud_knowledge_base}/__init__.py (100%) rename griptape/tools/{griptape_cloud_knowledge_base_client => griptape_cloud_knowledge_base}/manifest.yml (78%) rename griptape/tools/{griptape_cloud_knowledge_base_client => griptape_cloud_knowledge_base}/tool.py (91%) rename griptape/tools/{sql_client => image_query}/__init__.py (100%) rename griptape/tools/{image_query_client => image_query}/manifest.yml (86%) rename griptape/tools/{image_query_client => image_query}/tool.py (97%) rename griptape/tools/{structure_run_client => inpainting_image_generation}/__init__.py (100%) rename griptape/tools/{inpainting_image_generation_client => inpainting_image_generation}/manifest.yml (79%) rename griptape/tools/{outpainting_image_generation_client => inpainting_image_generation}/requirements.txt (100%) rename griptape/tools/{inpainting_image_generation_client => inpainting_image_generation}/tool.py (93%) rename griptape/tools/{task_memory_client => openweather}/__init__.py (100%) rename griptape/tools/{openweather_client => openweather}/manifest.yml (86%) rename griptape/tools/{openweather_client => openweather}/tool.py (99%) rename griptape/tools/{text_to_speech_client => outpainting_image_generation}/__init__.py (100%) rename griptape/tools/{outpainting_image_generation_client => outpainting_image_generation}/manifest.yml (79%) rename griptape/tools/{prompt_image_generation_client => outpainting_image_generation}/requirements.txt (100%) rename griptape/tools/{outpainting_image_generation_client => outpainting_image_generation}/tool.py (93%) rename griptape/tools/{variation_image_generation_client => prompt_image_generation}/__init__.py (100%) rename griptape/tools/{prompt_image_generation_client => prompt_image_generation}/manifest.yml (80%) rename griptape/tools/{rag_client => prompt_image_generation}/requirements.txt (100%) rename griptape/tools/{prompt_image_generation_client => prompt_image_generation}/tool.py (87%) rename griptape/tools/{vector_store_client => prompt_summary}/__init__.py (100%) create mode 100644 griptape/tools/prompt_summary/manifest.yml rename griptape/tools/{variation_image_generation_client => prompt_summary}/requirements.txt (100%) create mode 100644 griptape/tools/prompt_summary/tool.py rename {tests/unit/config => griptape/tools/query}/__init__.py (100%) create mode 100644 griptape/tools/query/manifest.yml rename griptape/tools/{vector_store_client => query}/requirements.txt (100%) create mode 100644 griptape/tools/query/tool.py create mode 100644 griptape/tools/rag/__init__.py rename griptape/tools/{rag_client => rag}/manifest.yml (88%) create mode 100644 griptape/tools/rag/requirements.txt rename griptape/tools/{rag_client => rag}/tool.py (64%) create mode 100644 griptape/tools/rest_api/__init__.py rename griptape/tools/{rest_api_client => rest_api}/manifest.yml (87%) rename griptape/tools/{rest_api_client => rest_api}/tool.py (97%) create mode 100644 griptape/tools/sql/__init__.py rename griptape/tools/{sql_client => sql}/manifest.yml (88%) rename griptape/tools/{sql_client => sql}/tool.py (98%) create mode 100644 griptape/tools/structure_run/__init__.py rename griptape/tools/{structure_run_client => structure_run}/manifest.yml (83%) rename griptape/tools/{structure_run_client => structure_run}/tool.py (76%) delete mode 100644 griptape/tools/task_memory_client/manifest.yml delete mode 100644 griptape/tools/task_memory_client/tool.py create mode 100644 griptape/tools/text_to_speech/__init__.py rename griptape/tools/{text_to_speech_client => text_to_speech}/manifest.yml (83%) rename griptape/tools/{text_to_speech_client => text_to_speech}/tool.py (95%) create mode 100644 griptape/tools/variation_image_generation/__init__.py rename griptape/tools/{variation_image_generation_client => variation_image_generation}/manifest.yml (79%) create mode 100644 griptape/tools/variation_image_generation/requirements.txt rename griptape/tools/{variation_image_generation_client => variation_image_generation}/tool.py (91%) create mode 100644 griptape/tools/vector_store/__init__.py rename griptape/tools/{vector_store_client => vector_store}/manifest.yml (85%) create mode 100644 griptape/tools/vector_store/requirements.txt rename griptape/tools/{vector_store_client => vector_store}/tool.py (98%) create mode 100644 tests/integration/drivers/vector/test_astra_db_vector_store_driver.py rename tests/integration/tools/{test_calculator.py => test_calculator_tool.py} (73%) rename tests/integration/tools/{test_file_manager.py => test_file_manager_tool.py} (79%) rename tests/integration/tools/{test_google_docs_client.py => test_google_docs_tool.py} (95%) rename tests/integration/tools/{test_google_drive_client.py => test_google_drive_tool.py} (94%) create mode 100644 tests/mocks/mock_drivers_config.py create mode 100644 tests/mocks/mock_futures_executor.py delete mode 100644 tests/mocks/mock_structure_config.py create mode 100644 tests/unit/artifacts/test_json_artifact.py delete mode 100644 tests/unit/config/test_structure_config.py create mode 100644 tests/unit/configs/__init__.py create mode 100644 tests/unit/configs/drivers/__init__.py rename tests/unit/{config/test_amazon_bedrock_structure_config.py => configs/drivers/test_amazon_bedrock_drivers_config.py} (89%) rename tests/unit/{config/test_anthropic_structure_config.py => configs/drivers/test_anthropic_drivers_config.py} (85%) rename tests/unit/{config/test_azure_openai_structure_config.py => configs/drivers/test_azure_openai_drivers_config.py} (80%) rename tests/unit/{config/test_cohere_structure_config.py => configs/drivers/test_cohere_drivers_config.py} (87%) create mode 100644 tests/unit/configs/drivers/test_drivers_config.py rename tests/unit/{config/test_google_structure_config.py => configs/drivers/test_google_drivers_config.py} (86%) rename tests/unit/{config/test_openai_structure_config.py => configs/drivers/test_openai_driver_config.py} (91%) create mode 100644 tests/unit/configs/logging/__init__.py create mode 100644 tests/unit/configs/logging/test_newline_logging_filter.py create mode 100644 tests/unit/configs/logging/test_truncate_logging_filter.py create mode 100644 tests/unit/configs/test_defaults_config.py create mode 100644 tests/unit/conftest.py create mode 100644 tests/unit/drivers/memory/conversation/test_griptape_cloud_conversation_memory_driver.py create mode 100644 tests/unit/drivers/vector/test_astra_db_vector_store_driver.py create mode 100644 tests/unit/engines/query/__init__.py create mode 100644 tests/unit/engines/query/test_translate_query_rag_module.py delete mode 100644 tests/unit/engines/rag/modules/generation/test_metadata_before_response_rag_module.py delete mode 100644 tests/unit/engines/rag/modules/generation/test_rulesets_before_response_rag_module.py create mode 100644 tests/unit/events/test_event_bus.py delete mode 100644 tests/unit/mixins/test_events_mixin.py create mode 100644 tests/unit/mixins/test_futures_executor_mixin.py delete mode 100644 tests/unit/tasks/test_csv_extraction_task.py delete mode 100644 tests/unit/tasks/test_json_extraction_task.py rename tests/unit/tools/{test_aws_iam.py => test_aws_iam_tool.py} (56%) rename tests/unit/tools/{test_aws_s3.py => test_aws_s3_tool.py} (58%) rename tests/unit/tools/{test_email_client.py => test_email_tool.py} (93%) create mode 100644 tests/unit/tools/test_extraction_tool.py rename tests/unit/tools/{test_google_docs_client.py => test_google_docs_tool.py} (84%) rename tests/unit/tools/{test_google_drive_client.py => test_google_drive_tool.py} (68%) rename tests/unit/tools/{test_google_gmail_client.py => test_google_gmail_tool.py} (61%) rename tests/unit/tools/{test_griptape_cloud_knowledge_base_client.py => test_griptape_cloud_knowledge_base_tool.py} (83%) rename tests/unit/tools/{test_inpainting_image_generation_client.py => test_inpainting_image_generation_tool.py} (87%) rename tests/unit/tools/{test_openweather_client.py => test_openweather_tool.py} (92%) rename tests/unit/tools/{test_outpainting_image_variation_client.py => test_outpainting_image_variation_tool.py} (87%) rename tests/unit/tools/{test_prompt_image_generation_client.py => test_prompt_image_generation_tool.py} (77%) create mode 100644 tests/unit/tools/test_prompt_summary_tool.py create mode 100644 tests/unit/tools/test_query_tool.py rename tests/unit/tools/{test_rag_client.py => test_rag_tool.py} (57%) rename tests/unit/tools/{test_rest_api_client.py => test_rest_api_tool.py} (92%) rename tests/unit/tools/{test_sql_client.py => test_sql_tool.py} (87%) rename tests/unit/tools/{test_structure_run_client.py => test_structure_run_tool.py} (66%) delete mode 100644 tests/unit/tools/test_task_memory_client.py rename tests/unit/tools/{test_text_to_speech_client.py => test_text_to_speech_tool.py} (74%) rename tests/unit/tools/{test_transcription_client.py => test_transcription_tool.py} (83%) rename tests/unit/tools/{test_variation_image_generation_client.py => test_variation_image_generation_tool.py} (88%) rename tests/unit/tools/{test_vector_store_client.py => test_vector_store_tool.py} (68%) delete mode 100644 tests/utils/code_blocks.py diff --git a/.github/ISSUE_TEMPLATE/bug_report.md b/.github/ISSUE_TEMPLATE/bug_report.md index 1e185a5a0..9813b6aba 100644 --- a/.github/ISSUE_TEMPLATE/bug_report.md +++ b/.github/ISSUE_TEMPLATE/bug_report.md @@ -13,11 +13,7 @@ assignees: '' A clear and concise description of what the bug is. **To Reproduce** -Steps to reproduce the behavior: -1. Go to '...' -2. Click on '....' -3. Scroll down to '....' -4. See error +A minimal, reproducible code example. **Expected behavior** A clear and concise description of what you expected to happen. diff --git a/.github/actions/init-bare-environment/action.yml b/.github/actions/init-bare-environment/action.yml index 8cdee6027..01c4f2eae 100644 --- a/.github/actions/init-bare-environment/action.yml +++ b/.github/actions/init-bare-environment/action.yml @@ -27,7 +27,7 @@ runs: - name: Install dependencies if: steps.cached-poetry-dependencies.outputs.cache-hit != 'true' - run: poetry install --no-interaction --no-root --with test --with dev + run: poetry install --no-interaction --with test --with dev shell: bash - name: Activate venv diff --git a/.github/actions/init-environment/action.yml b/.github/actions/init-environment/action.yml index 0d9ad94fc..4b849dfe6 100644 --- a/.github/actions/init-environment/action.yml +++ b/.github/actions/init-environment/action.yml @@ -27,7 +27,7 @@ runs: - name: Install dependencies if: steps.cached-poetry-dependencies.outputs.cache-hit != 'true' - run: poetry install --no-interaction --no-root --with test --with dev --all-extras + run: poetry install --no-interaction --with test --with dev --with docs --all-extras shell: bash - name: Activate venv diff --git a/.github/workflows/docs-integration-tests.yml b/.github/workflows/docs-integration-tests.yml index ce30f83bb..33ebdd562 100644 --- a/.github/workflows/docs-integration-tests.yml +++ b/.github/workflows/docs-integration-tests.yml @@ -1,12 +1,8 @@ name: Docs Integration Tests on: - pull_request_review: - types: [submitted] push: - branches: - - main - - dev + branches: [ "main", "dev" ] concurrency: group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }} @@ -14,7 +10,6 @@ concurrency: jobs: test: - if: github.event.review.state == 'APPROVED' || github.event_name == 'push' runs-on: ubuntu-latest strategy: fail-fast: false @@ -124,6 +119,8 @@ jobs: ZENROWS_API_KEY: ${{ secrets.INTEG_ZENROWS_API_KEY }} QDRANT_CLUSTER_ENDPOINT: ${{ secrets.INTEG_QDRANT_CLUSTER_ENDPOINT }} QDRANT_CLUSTER_API_KEY: ${{ secrets.INTEG_QDRANT_CLUSTER_API_KEY }} + ASTRA_DB_API_ENDPOINT: ${{ secrets.INTEG_ASTRA_DB_API_ENDPOINT }} + ASTRA_DB_APPLICATION_TOKEN: ${{ secrets.INTEG_ASTRA_DB_APPLICATION_TOKEN }} services: postgres: image: ankane/pgvector:v0.5.0 @@ -143,22 +140,7 @@ jobs: uses: actions/checkout@v3 - name: Init environment uses: ./.github/actions/init-environment - - name: Get changed files - id: changed-files - uses: tj-actions/changed-files@v44 - if: github.event_name == 'pull_request_review' - with: - files: | - **.md - - name: List all changed files - env: - ALL_CHANGED_FILES: ${{ steps.changed-files.outputs.all_changed_files }} - run: | - for file in ${ALL_CHANGED_FILES}; do - echo "$file was changed" - done - name: Run integration tests - if: steps.changed-files.outputs.any_changed == 'true' || github.event_name == 'push' run: make test/integration env: DOCS_ALL_CHANGED_FILES: ${{ steps.changed-files.outputs.all_changed_files || '' }} diff --git a/CHANGELOG.md b/CHANGELOG.md index 33ccbc3d5..21f72fe50 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -6,6 +6,66 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ## Unreleased +### Added +- `AstraDbVectorStoreDriver` to support DataStax Astra DB as a vector store. +- Ability to set custom schema properties on Tool Activities via `extra_schema_properties`. +- Parameter `structure` to `BaseTask`. +- Method `try_find_task` to `Structure`. +- `TranslateQueryRagModule` `RagEngine` module for translating input queries. +- Global event bus, `griptape.events.EventBus`, for publishing and subscribing to events. +- Global object, `griptape.configs.Defaults`, for setting default values throughout the framework. +- Unique name generation for all `RagEngine` modules. +- `ExtractionTool` for having the LLM extract structured data from text. +- `PromptSummaryTool` for having the LLM summarize text. +- `QueryTool` for having the LLM query text. +- Support for bitshift composition in `BaseTask` for adding parent/child tasks. +- `JsonArtifact` for handling de/seralization of values. +- `Chat.logger_level` for setting what the `Chat` utility sets the logger level to. +- `FuturesExecutorMixin` to DRY up and optimize concurrent code across multiple classes. +- `utils.execute_futures_list_dict` for executing a dict of lists of futures. +- `GriptapeCloudConversationMemoryDriver` to store conversation history in Griptape Cloud. +- `griptape.utils.decorators.lazy_property` for creating lazy properties. + +### Changed +- **BREAKING**: Removed all uses of `EventPublisherMixin` in favor of `EventBus`. +- **BREAKING**: Removed `EventPublisherMixin`. +- **BREAKING**: Removed `Pipeline.prompt_driver` and `Workflow.prompt_driver`. Set this via `griptape.configs.Defaults.drivers.prompt_driver` instead. `Agent.prompt_driver` has not been removed. +- **BREAKING**: Removed `Pipeline.stream` and `Workflow.stream`. Set this via `griptape.configs.Defaults.drivers.prompt_driver.stream` instead. `Agent.stream` has not been removed. +- **BREAKING**: Removed `Structure.embedding_driver`, set this via `griptape.configs.Defaults.drivers.embedding_driver` instead. +- **BREAKING**: Removed `Structure.custom_logger` and `Structure.logger_level`, set these via `logging.getLogger(griptape.configs.Defaults.logger_name)` instead. +- **BREAKING**: Removed `BaseStructureConfig.merge_config`. +- **BREAKING**: Renamed `StructureConfig` to `DriversConfig`, moved to `griptape.configs.drivers` and renamed fields accordingly. +- **BREAKING**: `RagContext.output` was changed to `RagContext.outputs` to support multiple outputs. All relevant RAG modules were adjusted accordingly. +- **BREAKING**: Removed before and after response modules from `ResponseRagStage`. +- **BREAKING**: Moved ruleset and metadata ingestion from standalone modules to `PromptResponseRagModule`. +- **BREAKING**: Dropped `Client` from all Tool names for better naming consistency. +- **BREAKING**: Dropped `_client` suffix from all Tool packages. +- **BREAKING**: Added `Tool` suffix to all Tool names for better naming consistency. +- **BREAKING**: Removed `TextArtifactStorage.query` and `TextArtifactStorage.summarize`. +- **BREAKING**: Removed `TextArtifactStorage.rag_engine`, and `TextArtifactStorage.retrieval_rag_module_name`. +- **BREAKING**: Removed `TextArtifactStorage.summary_engine`, `TextArtifactStorage.csv_extraction_engine`, and `TextArtifactStorage.json_extraction_engine`. +- **BREAKING**: Removed `TaskMemory.summarize_namespace` and `TaskMemory.query_namespace`. +- **BREAKING**: Removed `Structure.rag_engine`. +- **BREAKING**: Split `JsonExtractionEngine.template_generator` into `JsonExtractionEngine.system_template_generator` and `JsonExtractionEngine.user_template_generator`. +- **BREAKING**: Split `CsvExtractionEngine.template_generator` into `CsvExtractionEngine.system_template_generator` and `CsvExtractionEngine.user_template_generator`. +- **BREAKING**: Changed `JsonExtractionEngine.template_schema` from a `run` argument to a class attribute. +- **BREAKING**: Changed `CsvExtractionEngine.column_names` from a `run` argument to a class attribute. +- **BREAKING**: Removed `JsonExtractionTask`, and `CsvExtractionTask` use `ExtractionTask` instead. +- **BREAKING**: Removed `TaskMemoryClient`, use `QueryClient`, `ExtractionTool`, or `PromptSummaryTool` instead. +- **BREAKING**: `BaseTask.add_parent/child` now take a `BaseTask` instead of `str | BaseTask`. +- Engines that previously required Drivers now pull from `griptape.configs.Defaults.drivers_config` by default. +- `BaseTask.add_parent/child` will now call `self.structure.add_task` if possible. +- `BaseTask.add_parent/child` now returns `self`, allowing for chaining. +- `Chat` now sets the `griptape` logger level to `logging.ERROR`, suppressing all logs except for errors. + +### Fixed +- `JsonExtractionEngine` failing to parse json when the LLM outputs more than just the json. +- Exception when adding `ErrorArtifact`'s to the Prompt Stack. +- Concurrency bug in `BaseVectorStoreDriver.upsert_text_artifacts`. +- Schema issues with Tools that use lists. +- Issue with native Tool calling and streaming with `GooglePromptDriver`. +- Description not being used properly in `StructureRunTool`. + ## [0.29.2] - 2024-08-16 ### Fixed @@ -36,6 +96,12 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - `@observable` decorator for selecting which functions/methods to provide observability for. - `GenericArtifact` for storing any data. - `BaseTextArtifact` for text-based Artifacts to subclass. +- `HuggingFacePipelineImageGenerationDriver` for generating images locally with HuggingFace pipelines. +- `BaseImageGenerationPipelineDriver` as the base class for drivers interfacing with HuggingFace image generation pipelines. +- `StableDiffusion3ImageGenerationPipelineDriver` for local text-to-image generation using a Stable Diffusion 3 pipeline. +- `StableDiffusion3Img2ImgImageGenerationPipelineDriver` for local image-to-image generation using a Stable Diffusion 3 pipeline. +- `StableDiffusion3ControlNetImageGenerationPipelineDriver` for local ControlNet image generation using a Stable Diffusion 3 pipeline. +- Optional `params` field to `WebSearch`'s `search` schema that the LLM can be steered into using. ### Changed - **BREAKING**: `BaseVectorStoreDriver.upsert_text_artifacts` optional arguments are now keyword-only arguments. @@ -55,6 +121,9 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - **BREAKING**: Renamed `drivers-vector-postgresql` extra to `drivers-vector-pgvector`. - **BREAKING**: Update `marqo` dependency to `^3.7.0`. - **BREAKING**: Removed `drivers-sql-postgresql` extra. Use `drivers-sql` extra and install necessary drivers (i.e. `psycopg2`) separately. +- **BREAKING**: `api_key` and `search_id` are now required fields in `GoogleWebSearchDriver`. +- **BREAKING**: `web_search_driver` is now required fields in the `WebSearch` Tool. +- `GoogleWebSearchDriver` and `DuckDuckGoWebSearchDriver` now use `kwargs` passed to the `run` method. - Removed unnecessary `sqlalchemy-redshift` dependency in `drivers-sql-amazon-redshift` extra. - Removed unnecessary `transformers` dependency in `drivers-prompt-huggingface` extra. - Removed unnecessary `huggingface-hub` dependency in `drivers-prompt-huggingface-pipeline` extra. @@ -378,6 +447,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - Default model of `AmazonBedrockStructureConfig` to `anthropic.claude-3-sonnet-20240229-v1:0`. - `AnthropicPromptDriver` and `BedrockClaudePromptModelDriver` to use Anthropic's Messages API. - `OpenAiVisionImageQueryDriver` now has a required field `max_tokens` that defaults to 256 +- `GriptapeCloudStructureRunDriver` now outputs a `BaseArtifact` instead of a `TextArtifact` ## [0.23.2] - 2024-03-15 @@ -407,7 +477,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - `JsonExtractionTask` for convenience over using `ExtractionTask` with a `JsonExtractionEngine`. - `CsvExtractionTask` for convenience over using `ExtractionTask` with a `CsvExtractionEngine`. - `OpenAiVisionImageQueryDriver` to support queries on images using OpenAI's Vision model. -- `ImageQueryClient` allowing an Agent to make queries on images on disk or in Task Memory. +- `ImageQueryTool` allowing an Agent to make queries on images on disk or in Task Memory. - `ImageQueryTask` and `ImageQueryEngine`. ### Fixed diff --git a/Makefile b/Makefile index 837a4b155..1db428b2c 100644 --- a/Makefile +++ b/Makefile @@ -23,6 +23,7 @@ install/core: ## Install core dependencies. .PHONY: install/all install/all: ## Install all dependencies. @poetry install --with dev --with test --with docs --all-extras + @poetry run pre-commit install .PHONY: install/dev install/dev: ## Install dev dependencies. @@ -68,7 +69,7 @@ check/lint: .PHONY: check/types check/types: - @poetry run pyright griptape/ + @poetry run pyright griptape $(shell find docs -type f -path "*/src/*") .PHONY: check/spell check/spell: diff --git a/README.md b/README.md index 42df8fbd6..d9f21c2d8 100644 --- a/README.md +++ b/README.md @@ -89,14 +89,14 @@ With Griptape, you can create Structures, such as Agents, Pipelines, and Workflo ```python from griptape.structures import Agent -from griptape.tools import WebScraper, FileManager, TaskMemoryClient +from griptape.tools import WebScraperTool, FileManagerTool, PromptSummaryTool agent = Agent( input="Load {{ args[0] }}, summarize it, and store it in a file called {{ args[1] }}.", tools=[ - WebScraper(off_prompt=True), - TaskMemoryClient(off_prompt=True), - FileManager() + WebScraperTool(off_prompt=True), + PromptSummaryTool(off_prompt=True), + FileManagerTool() ] ) agent.run("https://griptape.ai", "griptape.txt") @@ -104,42 +104,63 @@ agent.run("https://griptape.ai", "griptape.txt") And here is the output: ``` -[04/02/24 13:51:09] INFO ToolkitTask 85700ec1b0594e1a9502c0efe7da6ef4 +[08/12/24 14:48:15] INFO ToolkitTask c90d263ec69046e8b30323c131ae4ba0 Input: Load https://griptape.ai, summarize it, and store it in a file called griptape.txt. -[04/02/24 13:51:15] INFO Subtask db6a3e7cb2f549128c358149d340f91c - Thought: First, I need to load the content of the website using the WebScraper action. Then, I will use the TaskMemoryClient action to - summarize the content. Finally, I will save the summarized content to a file using the FileManager action. +[08/12/24 14:48:16] INFO Subtask ebe23832cbe2464fb9ecde9fcee7c30f Actions: [ { - "name": "WebScraper", + "tag": "call_62kBnkswnk9Y6GH6kn1GIKk6", + "name": "WebScraperTool", "path": "get_content", "input": { "values": { "url": "https://griptape.ai" } - }, - "tag": "load_website_content" + } } ] -[04/02/24 13:51:16] INFO Subtask db6a3e7cb2f549128c358149d340f91c - Response: Output of "WebScraper.get_content" was stored in memory with memory_name "TaskMemory" and artifact_namespace - "752b38bb86da4baabdbd9f444eb4a0d1" -[04/02/24 13:51:19] INFO Subtask c3edba87ebf845d4b85e3a791f8fde8d - Thought: Now that the website content is loaded into memory, I need to summarize it using the TaskMemoryClient action. - Actions: [{"tag": "summarize_content", "name": "TaskMemoryClient", "path": "summarize", "input": {"values": {"memory_name": "TaskMemory", - "artifact_namespace": "752b38bb86da4baabdbd9f444eb4a0d1"}}}] -[04/02/24 13:51:25] INFO Subtask c3edba87ebf845d4b85e3a791f8fde8d - Response: Output of "TaskMemoryClient.summarize" was stored in memory with memory_name "TaskMemory" and artifact_namespace - "c4f131c201f147dcab07be3925b46294" -[04/02/24 13:51:33] INFO Subtask 06fe01ca64a744b38a8c08eb152aaacb - Thought: Now that the content has been summarized and stored in memory, I need to save this summarized content to a file named 'griptape.txt' - using the FileManager action. - Actions: [{"tag": "save_summarized_content", "name": "FileManager", "path": "save_memory_artifacts_to_disk", "input": {"values": {"dir_name": - ".", "file_name": "griptape.txt", "memory_name": "TaskMemory", "artifact_namespace": "c4f131c201f147dcab07be3925b46294"}}}] - INFO Subtask 06fe01ca64a744b38a8c08eb152aaacb - Response: saved successfully -[04/02/24 13:51:35] INFO ToolkitTask 85700ec1b0594e1a9502c0efe7da6ef4 - Output: The summarized content of the website https://griptape.ai has been successfully saved to a file named 'griptape.txt'. +[08/12/24 14:48:17] INFO Subtask ebe23832cbe2464fb9ecde9fcee7c30f + Response: Output of "WebScraperTool.get_content" was stored in memory with memory_name "TaskMemory" and artifact_namespace + "cecca28eb0c74bcd8c7119ed7f790c95" +[08/12/24 14:48:18] INFO Subtask dca04901436d49d2ade86cd6b4e1038a + Actions: [ + { + "tag": "call_o9F1taIxHty0mDlWLcAjTAAu", + "name": "PromptSummaryTool", + "path": "summarize", + "input": { + "values": { + "summary": { + "memory_name": "TaskMemory", + "artifact_namespace": "cecca28eb0c74bcd8c7119ed7f790c95" + } + } + } + } + ] +[08/12/24 14:48:21] INFO Subtask dca04901436d49d2ade86cd6b4e1038a + Response: Output of "PromptSummaryTool.summarize" was stored in memory with memory_name "TaskMemory" and artifact_namespace + "73765e32b8404e32927822250dc2ae8b" +[08/12/24 14:48:22] INFO Subtask c233853450fb4fd6a3e9c04c52b33bf6 + Actions: [ + { + "tag": "call_eKvIUIw45aRYKDBpT1gGKc9b", + "name": "FileManagerTool", + "path": "save_memory_artifacts_to_disk", + "input": { + "values": { + "dir_name": ".", + "file_name": "griptape.txt", + "memory_name": "TaskMemory", + "artifact_namespace": "73765e32b8404e32927822250dc2ae8b" + } + } + } + ] + INFO Subtask c233853450fb4fd6a3e9c04c52b33bf6 + Response: Successfully saved memory artifacts to disk +[08/12/24 14:48:23] INFO ToolkitTask c90d263ec69046e8b30323c131ae4ba0 + Output: The content from https://griptape.ai has been summarized and stored in a file called `griptape.txt`. ``` During the run, the Griptape Agent loaded a webpage with a [Tool](https://docs.griptape.ai/stable/griptape-tools/), stored its full content in [Task Memory](https://docs.griptape.ai/stable/griptape-framework/structures/task-memory.md), queried it to answer the original question, and finally saved the answer to a file. diff --git a/_typos.toml b/_typos.toml index 1819b51ef..659cc6823 100644 --- a/_typos.toml +++ b/_typos.toml @@ -1,3 +1,6 @@ +[default] +extend-ignore-re = ["call_[[:alnum:]]+"] + [default.extend-words] # Don't correct the state ND ND = "ND" diff --git a/docs/assets/css/extra.css b/docs/assets/css/extra.css index 17c7b4335..cbc902c49 100644 --- a/docs/assets/css/extra.css +++ b/docs/assets/css/extra.css @@ -5,8 +5,3 @@ .md-typeset table:not([class]) { display: table; } - -/* Hide the code block title since we're using it for other purposes.*/ -.filename { - display: none !important -} diff --git a/docs/examples/amazon-dynamodb-sessions.md b/docs/examples/amazon-dynamodb-sessions.md index aa4050ab9..d9a6e4bdd 100644 --- a/docs/examples/amazon-dynamodb-sessions.md +++ b/docs/examples/amazon-dynamodb-sessions.md @@ -7,40 +7,7 @@ In this example, we will show you how to use the [AmazonDynamoDbConversationMemo This code implements the idea of a generic "Session" that represents a Conversation Memory entry. For example, a "Session" could be used to represent an individual user's conversation, or a group conversation thread. ```python -import sys -import os -import argparse - -import boto3 -from griptape.drivers import ( - AmazonDynamoDbConversationMemoryDriver, -) -from griptape.structures import Agent -from griptape.memory.structure import ConversationMemory - -if len(sys.argv) > 2: - input = sys.argv[1] - session_id = sys.argv[2] -else: - input = "Hello!" # Default input - session_id = "session-id-123" # Default session ID - -structure = Agent( - conversation_memory=ConversationMemory( - driver=AmazonDynamoDbConversationMemoryDriver( - session=boto3.Session( - aws_access_key_id=os.environ["AWS_ACCESS_KEY_ID"], - aws_secret_access_key=os.environ["AWS_SECRET_ACCESS_KEY"], - ), - table_name=os.environ["DYNAMODB_TABLE_NAME"], # The name of the DynamoDB table - partition_key="id", # The name of the partition key - partition_key_value=session_id, # The value of the partition key - value_attribute_key="value", # The key in the DynamoDB item that stores the memory value - ) - ) -) - -print(structure.run(input).output_task.output.value) +--8<-- "docs/examples/src/amazon_dynamodb_sessions_1.py" ``` Conversation Memory for an individual user: diff --git a/docs/examples/load-and-query-pinecone.md b/docs/examples/load-and-query-pinecone.md index dbe163f8e..549268ea0 100644 --- a/docs/examples/load-and-query-pinecone.md +++ b/docs/examples/load-and-query-pinecone.md @@ -1,44 +1,3 @@ ```python -import hashlib -import os -import json -from urllib.request import urlopen -from griptape.drivers import PineconeVectorStoreDriver, OpenAiEmbeddingDriver - -def load_data(driver: PineconeVectorStoreDriver) -> None: - response = urlopen( - "https://raw.githubusercontent.com/wedeploy-examples/" - "supermarket-web-example/master/products.json" - ) - - for product in json.loads(response.read()): - driver.upsert_text( - product["description"], - vector_id=hashlib.md5(product["title"].encode()).hexdigest(), - meta={ - "title": product["title"], - "description": product["description"], - "type": product["type"], - "price": product["price"], - "rating": product["rating"], - }, - namespace="supermarket-products", - ) - -vector_driver = PineconeVectorStoreDriver( - api_key=os.environ["PINECONE_API_KEY"], - environment=os.environ["PINECONE_ENVIRONMENT"], - index_name=os.environ["PINECONE_INDEX_NAME"], - embedding_driver=OpenAiEmbeddingDriver() -) - -load_data(vector_driver) - -result = vector_driver.query( - "fruit", - count=3, - filter={"price": {"$lte": 15}, "rating": {"$gte": 4}}, - namespace="supermarket-products", -) -print(result) +--8<-- "docs/examples/src/load_and_query_pinecone_1.py" ``` diff --git a/docs/examples/load-query-and-chat-marqo.md b/docs/examples/load-query-and-chat-marqo.md index fb5906e9e..882bc6228 100644 --- a/docs/examples/load-query-and-chat-marqo.md +++ b/docs/examples/load-query-and-chat-marqo.md @@ -1,43 +1,3 @@ -```python title="PYTEST_IGNORE" -import os -from griptape import utils -from griptape.drivers import MarqoVectorStoreDriver -from griptape.loaders import WebLoader -from griptape.structures import Agent -from griptape.tools import VectorStoreClient -from griptape.drivers import OpenAiEmbeddingDriver - -# Define the namespace -namespace = "griptape-ai" - -# # Initialize the vector store driver -vector_store = MarqoVectorStoreDriver( - api_key=os.environ["MARQO_API_KEY"], - url=os.environ["MARQO_URL"], - index=os.environ["MARQO_INDEX_NAME"], - embedding_driver=OpenAiEmbeddingDriver() -) - -# Initialize the knowledge base tool -vector_store_tool = VectorStoreClient( - description="Contains information about the Griptape Framework from www.griptape.ai", - namespace=namespace, - vector_store_driver=vector_store -) - -# Load artifacts from the web -artifacts = WebLoader().load("https://www.griptape.ai") - -# Upsert the artifacts into the vector store -vector_store.upsert_text_artifacts( - { - namespace: artifacts, - } -) - -# Initialize the agent -agent = Agent(tools=[vector_store_tool]) - -# Start the chat -utils.Chat(agent).start() +```python +--8<-- "docs/examples/src/load_query_and_chat_marqo_1.py" ``` diff --git a/docs/examples/multi-agent-workflow.md b/docs/examples/multi-agent-workflow.md index 4a26625b3..8763a0a3a 100644 --- a/docs/examples/multi-agent-workflow.md +++ b/docs/examples/multi-agent-workflow.md @@ -5,186 +5,5 @@ Additionally, this architecture opens us up to using services such as [Griptape ```python -import os - -from griptape.drivers import WebhookEventListenerDriver, LocalStructureRunDriver, GoogleWebSearchDriver -from griptape.events import EventListener, FinishStructureRunEvent -from griptape.rules import Rule, Ruleset -from griptape.structures import Agent, Workflow -from griptape.tasks import PromptTask, StructureRunTask -from griptape.tools import ( - TaskMemoryClient, - WebScraper, - WebSearch, -) - -WRITERS = [ - { - "role": "Travel Adventure Blogger", - "goal": "Inspire wanderlust with stories of hidden gems and exotic locales", - "backstory": "With a passport full of stamps, you bring distant cultures and breathtaking scenes to life through vivid storytelling and personal anecdotes.", - }, - { - "role": "Lifestyle Freelance Writer", - "goal": "Share practical advice on living a balanced and stylish life", - "backstory": "From the latest trends in home decor to tips for wellness, your articles help readers create a life that feels both aspirational and attainable.", - }, -] - - -def build_researcher(): - """Builds a Researcher Structure.""" - researcher = Agent( - id="researcher", - tools=[ - WebSearch( - web_search_driver=GoogleWebSearchDriver( - api_key=os.environ["GOOGLE_API_KEY"], - search_id=os.environ["GOOGLE_API_SEARCH_ID"], - ), - ), - WebScraper( - off_prompt=True, - ), - TaskMemoryClient(off_prompt=False), - ], - rulesets=[ - Ruleset( - name="Position", - rules=[ - Rule( - value="Lead Research Analyst", - ) - ], - ), - Ruleset( - name="Objective", - rules=[ - Rule( - value="Discover innovative advancements in artificial intelligence and data analytics", - ) - ], - ), - Ruleset( - name="Background", - rules=[ - Rule( - value="""You are part of a prominent technology research institute. - Your speciality is spotting new trends. - You excel at analyzing intricate data and delivering practical insights.""" - ) - ], - ), - Ruleset( - name="Desired Outcome", - rules=[ - Rule( - value="Comprehensive analysis report in list format", - ) - ], - ), - ], - ) - - return researcher - - -def build_writer(role: str, goal: str, backstory: str): - """Builds a Writer Structure. - - Args: - role: The role of the writer. - goal: The goal of the writer. - backstory: The backstory of the writer. - """ - writer = Agent( - id=role.lower().replace(" ", "_"), - event_listeners=[ - EventListener( - event_types=[FinishStructureRunEvent], - driver=WebhookEventListenerDriver( - webhook_url=os.environ["WEBHOOK_URL"], - ), - ) - ], - rulesets=[ - Ruleset( - name="Position", - rules=[ - Rule( - value=role, - ) - ], - ), - Ruleset( - name="Objective", - rules=[ - Rule( - value=goal, - ) - ], - ), - Ruleset( - name="Backstory", - rules=[Rule(value=backstory)], - ), - Ruleset( - name="Desired Outcome", - rules=[ - Rule( - value="Full blog post of at least 4 paragraphs", - ) - ], - ), - ], - ) - - return writer - - -if __name__ == "__main__": - # Build the team - team = Workflow() - research_task = team.add_task( - StructureRunTask( - ( - """Perform a detailed examination of the newest developments in AI as of 2024. - Pinpoint major trends, breakthroughs, and their implications for various industries.""", - ), - id="research", - driver=LocalStructureRunDriver( - structure_factory_fn=build_researcher, - ), - ), - ) - writer_tasks = team.add_tasks(*[ - StructureRunTask( - ( - """Using insights provided, develop an engaging blog - post that highlights the most significant AI advancements. - Your post should be informative yet accessible, catering to a tech-savvy audience. - Make it sound cool, avoid complex words so it doesn't sound like AI. - - Insights: - {{ parent_outputs["research"] }}""", - ), - driver=LocalStructureRunDriver( - structure_factory_fn=lambda: build_writer( - role=writer["role"], - goal=writer["goal"], - backstory=writer["backstory"], - ) - ), - parent_ids=[research_task.id], - ) - for writer in WRITERS - ]) - end_task = team.add_task( - PromptTask( - 'State "All Done!"', - parent_ids=[writer_task.id for writer_task in writer_tasks], - ) - ) - - team.run() +--8<-- "docs/examples/src/multi_agent_workflow_1.py" ``` diff --git a/docs/examples/multiple-agent-shared-memory.md b/docs/examples/multiple-agent-shared-memory.md index 109394d49..bd91f977d 100644 --- a/docs/examples/multiple-agent-shared-memory.md +++ b/docs/examples/multiple-agent-shared-memory.md @@ -1,69 +1,11 @@ -This example shows how to use one `Agent` to load content into `TaskMemory` and get that content from another `Agent` using `TaskMemoryClient`. +This example shows how to use one `Agent` to load content into `TaskMemory` and get that content from another `Agent` using `QueryTool`. -The first `Agent` uses a remote vector store (`MongoDbAtlasVectorStoreDriver` in this example) to handle memory operations. The second `Agent` uses the same instance of `TaskMemory` and the `TaskMemoryClient` with the same `MongoDbAtlasVectorStoreDriver` to get the data. +The first `Agent` uses a remote vector store (`MongoDbAtlasVectorStoreDriver` in this example) to handle memory operations. The second `Agent` uses the same instance of `TaskMemory` and the `QueryTool` with the same `MongoDbAtlasVectorStoreDriver` to get the data. The `MongoDbAtlasVectorStoreDriver` assumes that you have a vector index configured where the path to the content is called `vector`, and the number of dimensions set on the index is `1536` (this is a commonly used number of dimensions for embedding models). `asker` uses the same instance of `TaskMemory` as `loader` so that `asker` has access to the `namespace_storages` that `loader` has set. ```python -import os -from griptape.tools import WebScraper, TaskMemoryClient -from griptape.structures import Agent -from griptape.drivers import AzureOpenAiEmbeddingDriver, AzureMongoDbVectorStoreDriver -from griptape.config import AzureOpenAiStructureConfig - - -AZURE_OPENAI_ENDPOINT_1 = os.environ["AZURE_OPENAI_ENDPOINT_1"] -AZURE_OPENAI_API_KEY_1 = os.environ["AZURE_OPENAI_API_KEY_1"] - -MONGODB_HOST = os.environ["MONGODB_HOST"] -MONGODB_USERNAME = os.environ["MONGODB_USERNAME"] -MONGODB_PASSWORD = os.environ["MONGODB_PASSWORD"] -MONGODB_DATABASE_NAME = os.environ["MONGODB_DATABASE_NAME"] -MONGODB_COLLECTION_NAME = os.environ["MONGODB_COLLECTION_NAME"] -MONGODB_INDEX_NAME = os.environ["MONGODB_INDEX_NAME"] -MONGODB_VECTOR_PATH = os.environ["MONGODB_VECTOR_PATH"] -MONGODB_CONNECTION_STRING = f"mongodb+srv://{MONGODB_USERNAME}:{MONGODB_PASSWORD}@{MONGODB_HOST}/{MONGODB_DATABASE_NAME}?tls=true&authMechanism=SCRAM-SHA-256&retrywrites=false&maxIdleTimeMS=120000" - - -embedding_driver = AzureOpenAiEmbeddingDriver( - model='text-embedding-ada-002', - azure_endpoint=AZURE_OPENAI_ENDPOINT_1, - api_key=AZURE_OPENAI_API_KEY_1, -) - -mongo_driver = AzureMongoDbVectorStoreDriver( - connection_string=MONGODB_CONNECTION_STRING, - database_name=MONGODB_DATABASE_NAME, - collection_name=MONGODB_COLLECTION_NAME, - embedding_driver=embedding_driver, - index_name=MONGODB_INDEX_NAME, - vector_path=MONGODB_VECTOR_PATH, -) - -config = AzureOpenAiStructureConfig( - azure_endpoint=AZURE_OPENAI_ENDPOINT_1, - vector_store_driver=mongo_driver, - embedding_driver=embedding_driver, -) - -loader = Agent( - tools=[ - WebScraper(off_prompt=True), - ], - config=config, -) -asker = Agent( - tools=[ - TaskMemoryClient(off_prompt=False), - ], - meta_memory=loader.meta_memory, - task_memory=loader.task_memory, - config=config, -) - -if __name__ == "__main__": - loader.run("Load https://medium.com/enterprise-rag/a-first-intro-to-complex-rag-retrieval-augmented-generation-a8624d70090f") - asker.run("why is retrieval augmented generation useful?") +--8<-- "docs/examples/src/multiple_agent_shared_memory_1.py" ``` diff --git a/docs/examples/query-webpage-astra-db.md b/docs/examples/query-webpage-astra-db.md new file mode 100644 index 000000000..7e98b63ac --- /dev/null +++ b/docs/examples/query-webpage-astra-db.md @@ -0,0 +1,16 @@ +The following example script ingests a Web page (a blog post), +stores its chunked contents on Astra DB through the Astra DB vector store driver, +and finally runs a RAG process to answer a question specific to the topic of the +Web page. + +This script requires that a vector collection has been created in the Astra database +(with name `"griptape_test_collection"` and vector dimension matching the embedding being used, i.e. 1536 in this case). + +_Note:_ Besides the [Astra DB](../griptape-framework/drivers/vector-store-drivers.md#astra-db) extra, +this example requires the `drivers-web-scraper-trafilatura` +Griptape extra to be installed as well. + + +```python +--8<-- "docs/examples/src/query_webpage_astra_db_1.py" +``` diff --git a/docs/examples/query-webpage.md b/docs/examples/query-webpage.md index 07801b32c..b8cc23e5f 100644 --- a/docs/examples/query-webpage.md +++ b/docs/examples/query-webpage.md @@ -1,28 +1,3 @@ ```python -import os -from griptape.artifacts import BaseArtifact -from griptape.drivers import LocalVectorStoreDriver, OpenAiEmbeddingDriver -from griptape.loaders import WebLoader - - -vector_store = LocalVectorStoreDriver( - embedding_driver=OpenAiEmbeddingDriver( - api_key=os.environ["OPENAI_API_KEY"] - ) -) - -[ - vector_store.upsert_text_artifact(a, namespace="griptape") - for a in WebLoader(max_tokens=100).load("https://www.griptape.ai") -] - -results = vector_store.query( - "creativity", - count=3, - namespace="griptape" -) - -values = [r.to_artifact().value for r in results] - -print("\n\n".join(values)) -``` \ No newline at end of file +--8<-- "docs/examples/src/query_webpage_1.py" +``` diff --git a/docs/examples/src/amazon_dynamodb_sessions_1.py b/docs/examples/src/amazon_dynamodb_sessions_1.py new file mode 100644 index 000000000..f7a6d0cd6 --- /dev/null +++ b/docs/examples/src/amazon_dynamodb_sessions_1.py @@ -0,0 +1,34 @@ +import os +import sys + +import boto3 + +from griptape.drivers import ( + AmazonDynamoDbConversationMemoryDriver, +) +from griptape.memory.structure import ConversationMemory +from griptape.structures import Agent + +if len(sys.argv) > 2: + user_input = sys.argv[1] + session_id = sys.argv[2] +else: + user_input = "Hello!" # Default input + session_id = "session-id-123" # Default session ID + +structure = Agent( + conversation_memory=ConversationMemory( + driver=AmazonDynamoDbConversationMemoryDriver( + session=boto3.Session( + aws_access_key_id=os.environ["AWS_ACCESS_KEY_ID"], + aws_secret_access_key=os.environ["AWS_SECRET_ACCESS_KEY"], + ), + table_name=os.environ["DYNAMODB_TABLE_NAME"], # The name of the DynamoDB table + partition_key="id", # The name of the partition key + partition_key_value=session_id, # The value of the partition key + value_attribute_key="value", # The key in the DynamoDB item that stores the memory value + ) + ) +) + +print(structure.run(user_input).output_task.output.value) diff --git a/docs/examples/src/load_and_query_pinecone_1.py b/docs/examples/src/load_and_query_pinecone_1.py new file mode 100644 index 000000000..6aad6655e --- /dev/null +++ b/docs/examples/src/load_and_query_pinecone_1.py @@ -0,0 +1,44 @@ +import hashlib +import json +import os +from urllib.request import urlopen + +from griptape.drivers import OpenAiEmbeddingDriver, PineconeVectorStoreDriver + + +def load_data(driver: PineconeVectorStoreDriver) -> None: + response = urlopen( + "https://raw.githubusercontent.com/wedeploy-examples/" "supermarket-web-example/master/products.json" + ) + + for product in json.loads(response.read()): + driver.upsert_text( + product["description"], + vector_id=hashlib.md5(product["title"].encode()).hexdigest(), + meta={ + "title": product["title"], + "description": product["description"], + "type": product["type"], + "price": product["price"], + "rating": product["rating"], + }, + namespace="supermarket-products", + ) + + +vector_driver = PineconeVectorStoreDriver( + api_key=os.environ["PINECONE_API_KEY"], + environment=os.environ["PINECONE_ENVIRONMENT"], + index_name=os.environ["PINECONE_INDEX_NAME"], + embedding_driver=OpenAiEmbeddingDriver(), +) + +load_data(vector_driver) + +result = vector_driver.query( + "fruit", + count=3, + filter={"price": {"$lte": 15}, "rating": {"$gte": 4}}, + namespace="supermarket-products", +) +print(result) diff --git a/docs/examples/src/load_query_and_chat_marqo_1.py b/docs/examples/src/load_query_and_chat_marqo_1.py new file mode 100644 index 000000000..013a0264f --- /dev/null +++ b/docs/examples/src/load_query_and_chat_marqo_1.py @@ -0,0 +1,44 @@ +import os + +from griptape import utils +from griptape.artifacts.error_artifact import ErrorArtifact +from griptape.drivers import MarqoVectorStoreDriver, OpenAiEmbeddingDriver +from griptape.loaders import WebLoader +from griptape.structures import Agent +from griptape.tools import VectorStoreTool + +# Define the namespace +namespace = "griptape-ai" + +# # Initialize the vector store driver +vector_store = MarqoVectorStoreDriver( + api_key=os.environ["MARQO_API_KEY"], + url=os.environ["MARQO_URL"], + index=os.environ["MARQO_INDEX_NAME"], + embedding_driver=OpenAiEmbeddingDriver(), +) + +# Initialize the knowledge base tool +vector_store_tool = VectorStoreTool( + description="Contains information about the Griptape Framework from www.griptape.ai", + vector_store_driver=vector_store, +) + +# Load artifacts from the web +artifacts = WebLoader().load("https://www.griptape.ai") + +if isinstance(artifacts, ErrorArtifact): + raise Exception(artifacts.value) + +# Upsert the artifacts into the vector store +vector_store.upsert_text_artifacts( + { + namespace: artifacts, + } +) + +# Initialize the agent +agent = Agent(tools=[vector_store_tool]) + +# Start the chat +utils.Chat(agent).start() diff --git a/docs/examples/src/multi_agent_workflow_1.py b/docs/examples/src/multi_agent_workflow_1.py new file mode 100644 index 000000000..ad9436a55 --- /dev/null +++ b/docs/examples/src/multi_agent_workflow_1.py @@ -0,0 +1,171 @@ +import os + +from griptape.drivers import GoogleWebSearchDriver, LocalStructureRunDriver +from griptape.rules import Rule, Ruleset +from griptape.structures import Agent, Workflow +from griptape.tasks import PromptTask, StructureRunTask +from griptape.tools import ( + PromptSummaryTool, + WebScraperTool, + WebSearchTool, +) + +WRITERS = [ + { + "role": "Travel Adventure Blogger", + "goal": "Inspire wanderlust with stories of hidden gems and exotic locales", + "backstory": "With a passport full of stamps, you bring distant cultures and breathtaking scenes to life through vivid storytelling and personal anecdotes.", + }, + { + "role": "Lifestyle Freelance Writer", + "goal": "Share practical advice on living a balanced and stylish life", + "backstory": "From the latest trends in home decor to tips for wellness, your articles help readers create a life that feels both aspirational and attainable.", + }, +] + + +def build_researcher() -> Agent: + """Builds a Researcher Structure.""" + return Agent( + id="researcher", + tools=[ + WebSearchTool( + web_search_driver=GoogleWebSearchDriver( + api_key=os.environ["GOOGLE_API_KEY"], + search_id=os.environ["GOOGLE_API_SEARCH_ID"], + ), + ), + WebScraperTool( + off_prompt=True, + ), + PromptSummaryTool(off_prompt=False), + ], + rulesets=[ + Ruleset( + name="Position", + rules=[ + Rule( + value="Lead Research Analyst", + ) + ], + ), + Ruleset( + name="Objective", + rules=[ + Rule( + value="Discover innovative advancements in artificial intelligence and data analytics", + ) + ], + ), + Ruleset( + name="Background", + rules=[ + Rule( + value="""You are part of a prominent technology research institute. + Your speciality is spotting new trends. + You excel at analyzing intricate data and delivering practical insights.""" + ) + ], + ), + Ruleset( + name="Desired Outcome", + rules=[ + Rule( + value="Comprehensive analysis report in list format", + ) + ], + ), + ], + ) + + +def build_writer(role: str, goal: str, backstory: str) -> Agent: + """Builds a Writer Structure. + + Args: + role: The role of the writer. + goal: The goal of the writer. + backstory: The backstory of the writer. + """ + return Agent( + id=role.lower().replace(" ", "_"), + rulesets=[ + Ruleset( + name="Position", + rules=[ + Rule( + value=role, + ) + ], + ), + Ruleset( + name="Objective", + rules=[ + Rule( + value=goal, + ) + ], + ), + Ruleset( + name="Backstory", + rules=[Rule(value=backstory)], + ), + Ruleset( + name="Desired Outcome", + rules=[ + Rule( + value="Full blog post of at least 4 paragraphs", + ) + ], + ), + ], + ) + + +if __name__ == "__main__": + # Build the team + team = Workflow() + research_task = team.add_task( + StructureRunTask( + ( + """Perform a detailed examination of the newest developments in AI as of 2024. + Pinpoint major trends, breakthroughs, and their implications for various industries.""", + ), + id="research", + driver=LocalStructureRunDriver( + structure_factory_fn=build_researcher, + ), + ), + ) + writer_tasks = team.add_tasks( + *[ + StructureRunTask( + ( + """Using insights provided, develop an engaging blog + post that highlights the most significant AI advancements. + Your post should be informative yet accessible, catering to a tech-savvy audience. + Make it sound cool, avoid complex words so it doesn't sound like AI. + + Insights: + {{ parent_outputs["research"] }}""", + ), + driver=LocalStructureRunDriver( + structure_factory_fn=lambda writer=writer: build_writer( + role=writer["role"], + goal=writer["goal"], + backstory=writer["backstory"], + ) + ), + parent_ids=[research_task.id], + ) + for writer in WRITERS + ] + ) + end_task = team.add_task( + PromptTask( + 'State "All Done!"', + parent_ids=[writer_task.id for writer_task in writer_tasks], + ) + ) + + team.run() diff --git a/docs/examples/src/multiple_agent_shared_memory_1.py b/docs/examples/src/multiple_agent_shared_memory_1.py new file mode 100644 index 000000000..e09f29ab7 --- /dev/null +++ b/docs/examples/src/multiple_agent_shared_memory_1.py @@ -0,0 +1,60 @@ +import os + +from griptape.configs import Defaults +from griptape.configs.drivers import AzureOpenAiDriversConfig +from griptape.drivers import AzureMongoDbVectorStoreDriver, AzureOpenAiEmbeddingDriver +from griptape.structures import Agent +from griptape.tools import QueryTool, WebScraperTool + +AZURE_OPENAI_ENDPOINT_1 = os.environ["AZURE_OPENAI_ENDPOINT_1"] +AZURE_OPENAI_API_KEY_1 = os.environ["AZURE_OPENAI_API_KEY_1"] + +MONGODB_HOST = os.environ["MONGODB_HOST"] +MONGODB_USERNAME = os.environ["MONGODB_USERNAME"] +MONGODB_PASSWORD = os.environ["MONGODB_PASSWORD"] +MONGODB_DATABASE_NAME = os.environ["MONGODB_DATABASE_NAME"] +MONGODB_COLLECTION_NAME = os.environ["MONGODB_COLLECTION_NAME"] +MONGODB_INDEX_NAME = os.environ["MONGODB_INDEX_NAME"] +MONGODB_VECTOR_PATH = os.environ["MONGODB_VECTOR_PATH"] +MONGODB_CONNECTION_STRING = f"mongodb+srv://{MONGODB_USERNAME}:{MONGODB_PASSWORD}@{MONGODB_HOST}/{MONGODB_DATABASE_NAME}?tls=true&authMechanism=SCRAM-SHA-256&retrywrites=false&maxIdleTimeMS=120000" + + +embedding_driver = AzureOpenAiEmbeddingDriver( + model="text-embedding-ada-002", + azure_endpoint=AZURE_OPENAI_ENDPOINT_1, + api_key=AZURE_OPENAI_API_KEY_1, +) + +mongo_driver = AzureMongoDbVectorStoreDriver( + connection_string=MONGODB_CONNECTION_STRING, + database_name=MONGODB_DATABASE_NAME, + collection_name=MONGODB_COLLECTION_NAME, + embedding_driver=embedding_driver, + index_name=MONGODB_INDEX_NAME, + vector_path=MONGODB_VECTOR_PATH, +) + +Defaults.drivers_config = AzureOpenAiDriversConfig( + azure_endpoint=AZURE_OPENAI_ENDPOINT_1, + vector_store_driver=mongo_driver, + embedding_driver=embedding_driver, +) + +loader = Agent( + tools=[ + WebScraperTool(off_prompt=True), + ], +) +asker = Agent( + tools=[ + QueryTool(off_prompt=False), + ], + meta_memory=loader.meta_memory, + task_memory=loader.task_memory, +) + +if __name__ == "__main__": + loader.run( + "Load https://medium.com/enterprise-rag/a-first-intro-to-complex-rag-retrieval-augmented-generation-a8624d70090f" + ) + asker.run("why is retrieval augmented generation useful?") diff --git a/docs/examples/src/query_webpage_1.py b/docs/examples/src/query_webpage_1.py new file mode 100644 index 000000000..2ea32b718 --- /dev/null +++ b/docs/examples/src/query_webpage_1.py @@ -0,0 +1,20 @@ +import os + +from griptape.artifacts.error_artifact import ErrorArtifact +from griptape.drivers import LocalVectorStoreDriver, OpenAiEmbeddingDriver +from griptape.loaders import WebLoader + +vector_store = LocalVectorStoreDriver(embedding_driver=OpenAiEmbeddingDriver(api_key=os.environ["OPENAI_API_KEY"])) + +artifacts = WebLoader(max_tokens=100).load("https://www.griptape.ai") +if isinstance(artifacts, ErrorArtifact): + raise Exception(artifacts.value) + +for a in artifacts: + vector_store.upsert_text_artifact(a, namespace="griptape") + +results = vector_store.query("creativity", count=3, namespace="griptape") + +values = [r.to_artifact().value for r in results] + +print("\n\n".join(values)) diff --git a/docs/examples/src/query_webpage_astra_db_1.py b/docs/examples/src/query_webpage_astra_db_1.py new file mode 100644 index 000000000..3309e1dcd --- /dev/null +++ b/docs/examples/src/query_webpage_astra_db_1.py @@ -0,0 +1,57 @@ +import os + +from griptape.artifacts import ErrorArtifact +from griptape.drivers import ( + AstraDbVectorStoreDriver, + OpenAiChatPromptDriver, + OpenAiEmbeddingDriver, +) +from griptape.engines.rag import RagEngine +from griptape.engines.rag.modules import ( + PromptResponseRagModule, + VectorStoreRetrievalRagModule, +) +from griptape.engines.rag.stages import ResponseRagStage, RetrievalRagStage +from griptape.loaders import WebLoader +from griptape.structures import Agent +from griptape.tools import RagTool + +namespace = "datastax_blog" +input_blogpost = "www.datastax.com/blog/indexing-all-of-wikipedia-on-a-laptop" + +vector_store_driver = AstraDbVectorStoreDriver( + embedding_driver=OpenAiEmbeddingDriver(), + api_endpoint=os.environ["ASTRA_DB_API_ENDPOINT"], + token=os.environ["ASTRA_DB_APPLICATION_TOKEN"], + collection_name="griptape_test_collection", + astra_db_namespace=os.environ.get("ASTRA_DB_KEYSPACE"), +) + +engine = RagEngine( + retrieval_stage=RetrievalRagStage( + retrieval_modules=[ + VectorStoreRetrievalRagModule( + vector_store_driver=vector_store_driver, + query_params={ + "count": 2, + "namespace": namespace, + }, + ) + ] + ), + response_stage=ResponseRagStage( + response_modules=[PromptResponseRagModule(prompt_driver=OpenAiChatPromptDriver(model="gpt-4o"))] + ), +) + +artifacts = WebLoader(max_tokens=256).load(input_blogpost) +if isinstance(artifacts, ErrorArtifact): + raise Exception(artifacts.value) +vector_store_driver.upsert_text_artifacts({namespace: artifacts}) + +rag_tool = RagTool( + description="A DataStax blog post", + rag_engine=engine, +) +agent = Agent(tools=[rag_tool]) +agent.run("What engine made possible to index such an amount of data, " "and what kind of tuning was required?") diff --git a/docs/examples/src/talk_to_a_pdf_1.py b/docs/examples/src/talk_to_a_pdf_1.py new file mode 100644 index 000000000..b4ab72029 --- /dev/null +++ b/docs/examples/src/talk_to_a_pdf_1.py @@ -0,0 +1,42 @@ +import requests + +from griptape.artifacts.error_artifact import ErrorArtifact +from griptape.drivers import LocalVectorStoreDriver, OpenAiChatPromptDriver, OpenAiEmbeddingDriver +from griptape.engines.rag import RagEngine +from griptape.engines.rag.modules import PromptResponseRagModule, VectorStoreRetrievalRagModule +from griptape.engines.rag.stages import ResponseRagStage, RetrievalRagStage +from griptape.loaders import PdfLoader +from griptape.structures import Agent +from griptape.tools import RagTool +from griptape.utils import Chat + +namespace = "attention" +response = requests.get("https://arxiv.org/pdf/1706.03762.pdf") +vector_store = LocalVectorStoreDriver(embedding_driver=OpenAiEmbeddingDriver()) +engine = RagEngine( + retrieval_stage=RetrievalRagStage( + retrieval_modules=[ + VectorStoreRetrievalRagModule( + vector_store_driver=vector_store, query_params={"namespace": namespace, "top_n": 20} + ) + ] + ), + response_stage=ResponseRagStage( + response_modules=[PromptResponseRagModule(prompt_driver=OpenAiChatPromptDriver(model="gpt-4o"))] + ), +) +rag_tool = RagTool( + description="Contains information about the Attention Is All You Need paper. " + "Use it to answer any related questions.", + rag_engine=engine, +) + +artifacts = PdfLoader().load(response.content) +if isinstance(artifacts, ErrorArtifact): + raise Exception(artifacts.value) + +vector_store.upsert_text_artifacts({namespace: artifacts}) + +agent = Agent(tools=[rag_tool]) + +Chat(agent).start() diff --git a/docs/examples/src/talk_to_a_video_1.py b/docs/examples/src/talk_to_a_video_1.py new file mode 100644 index 000000000..d23c906b6 --- /dev/null +++ b/docs/examples/src/talk_to_a_video_1.py @@ -0,0 +1,28 @@ +import time + +import google.generativeai as genai + +from griptape.artifacts import GenericArtifact, TextArtifact +from griptape.configs import Defaults +from griptape.configs.drivers import GoogleDriversConfig +from griptape.structures import Agent + +Defaults.drivers_config = GoogleDriversConfig() + +video_file = genai.upload_file(path="tests/resources/griptape-comfyui.mp4") +while video_file.state.name == "PROCESSING": + time.sleep(2) + video_file = genai.get_file(video_file.name) + +if video_file.state.name == "FAILED": + raise ValueError(video_file.state.name) + +agent = Agent( + input=[ + GenericArtifact(video_file), + TextArtifact("Answer this question regarding the video: {{ args[0] }}"), + ] +) + +agent.run("Are there any scenes that show a character with earings?") +agent.run("What happens in the scene starting at 19 seconds?") diff --git a/docs/examples/src/talk_to_a_webpage_1.py b/docs/examples/src/talk_to_a_webpage_1.py new file mode 100644 index 000000000..0412ed977 --- /dev/null +++ b/docs/examples/src/talk_to_a_webpage_1.py @@ -0,0 +1,51 @@ +from griptape.artifacts.error_artifact import ErrorArtifact +from griptape.drivers import LocalVectorStoreDriver, OpenAiChatPromptDriver, OpenAiEmbeddingDriver +from griptape.engines.rag import RagEngine +from griptape.engines.rag.modules import PromptResponseRagModule, VectorStoreRetrievalRagModule +from griptape.engines.rag.stages import ResponseRagStage, RetrievalRagStage +from griptape.loaders import WebLoader +from griptape.rules import Rule, Ruleset +from griptape.structures import Agent +from griptape.tools import RagTool +from griptape.utils import Chat + +namespace = "physics-wiki" + +vector_store_driver = LocalVectorStoreDriver(embedding_driver=OpenAiEmbeddingDriver()) + +engine = RagEngine( + retrieval_stage=RetrievalRagStage( + retrieval_modules=[ + VectorStoreRetrievalRagModule( + vector_store_driver=vector_store_driver, query_params={"namespace": namespace, "top_n": 20} + ) + ] + ), + response_stage=ResponseRagStage( + response_modules=[PromptResponseRagModule(prompt_driver=OpenAiChatPromptDriver(model="gpt-4o"))] + ), +) + +artifacts = WebLoader().load("https://en.wikipedia.org/wiki/Physics") + +if isinstance(artifacts, ErrorArtifact): + raise Exception(artifacts.value) + +vector_store_driver.upsert_text_artifacts({namespace: artifacts}) + +rag_tool = RagTool( + description="Contains information about physics. " "Use it to answer any physics-related questions.", + rag_engine=engine, +) + +agent = Agent( + rulesets=[ + Ruleset( + name="Physics Tutor", + rules=[Rule("Always introduce yourself as a physics tutor"), Rule("Be truthful. Only discuss physics.")], + ) + ], + tools=[rag_tool], +) + +Chat(agent).start() diff --git a/docs/examples/src/talk_to_redshift_1.py b/docs/examples/src/talk_to_redshift_1.py new file mode 100644 index 000000000..bd4b57f4f --- /dev/null +++ b/docs/examples/src/talk_to_redshift_1.py @@ -0,0 +1,43 @@ +import os + +import boto3 + +from griptape.drivers import AmazonRedshiftSqlDriver +from griptape.loaders import SqlLoader +from griptape.rules import Rule, Ruleset +from griptape.structures import Agent +from griptape.tools import FileManagerTool, SqlTool +from griptape.utils import Chat + +session = boto3.Session() + +sql_loader = SqlLoader( + sql_driver=AmazonRedshiftSqlDriver( + database=os.environ["REDSHIFT_DATABASE"], + session=session, + cluster_identifier=os.environ["REDSHIFT_CLUSTER_IDENTIFIER"], + ) +) + +sql_tool = SqlTool( + sql_loader=sql_loader, + table_name="people", + table_description="contains information about tech industry professionals", + engine_name="redshift", +) + +agent = Agent( + tools=[sql_tool, FileManagerTool()], + rulesets=[ + Ruleset( + name="HumansOrg Agent", + rules=[ + Rule("Act and introduce yourself as a HumansOrg, Inc. support agent"), + Rule("Your main objective is to help with finding information about people"), + Rule("Only use information about people from the sources available to you"), + ], + ) + ], +) + +Chat(agent).start() diff --git a/docs/examples/talk-to-a-pdf.md b/docs/examples/talk-to-a-pdf.md index a3f47f0c6..0524359d5 100644 --- a/docs/examples/talk-to-a-pdf.md +++ b/docs/examples/talk-to-a-pdf.md @@ -1,56 +1,5 @@ -This example demonstrates how to vectorize a PDF of the [Attention Is All You Need](https://arxiv.org/pdf/1706.03762.pdf) paper and setup a Griptape agent with rules and the [VectorStoreClient](../reference/griptape/tools/vector_store_client/tool.md) tool to use it during conversations. +This example demonstrates how to vectorize a PDF of the [Attention Is All You Need](https://arxiv.org/pdf/1706.03762.pdf) paper and setup a Griptape agent with rules and the [VectorStoreTool](../reference/griptape/tools/vector_store/tool.md) tool to use it during conversations. ```python -import requests -from griptape.drivers import LocalVectorStoreDriver, OpenAiEmbeddingDriver, OpenAiChatPromptDriver -from griptape.engines.rag import RagEngine -from griptape.engines.rag.modules import VectorStoreRetrievalRagModule, PromptResponseRagModule -from griptape.engines.rag.stages import RetrievalRagStage, ResponseRagStage -from griptape.loaders import PdfLoader -from griptape.structures import Agent -from griptape.tools import RagClient -from griptape.utils import Chat - -namespace = "attention" -response = requests.get("https://arxiv.org/pdf/1706.03762.pdf") -vector_store = LocalVectorStoreDriver( - embedding_driver=OpenAiEmbeddingDriver() -) -engine = RagEngine( - retrieval_stage=RetrievalRagStage( - retrieval_modules=[ - VectorStoreRetrievalRagModule( - vector_store_driver=vector_store, - query_params={ - "namespace": namespace, - "top_n": 20 - - } - ) - ] - ), - response_stage=ResponseRagStage( - response_module=PromptResponseRagModule( - prompt_driver=OpenAiChatPromptDriver(model="gpt-4o") - ) - ) -) -vector_store_tool = RagClient( - description="Contains information about the Attention Is All You Need paper. " - "Use it to answer any related questions.", - rag_engine=engine -) - -vector_store.upsert_text_artifacts( - { - namespace: PdfLoader().load(response.content) - } -) - -agent = Agent( - tools=[vector_store_tool] -) - -Chat(agent).start() - +--8<-- "docs/examples/src/talk_to_a_pdf_1.py" ``` diff --git a/docs/examples/talk-to-a-video.md b/docs/examples/talk-to-a-video.md index 9673bd1c3..1f0a243c0 100644 --- a/docs/examples/talk-to-a-video.md +++ b/docs/examples/talk-to-a-video.md @@ -3,31 +3,7 @@ In this example, we upload a video file using Gemini's file API, and then pass t Note that because we are using Gemini-specific features, this will not work with other [Prompt Drivers](../griptape-framework/drivers/prompt-drivers.md). ```python -import time -from griptape.structures import Agent -from griptape.tasks import PromptTask -from griptape.artifacts import GenericArtifact, TextArtifact -from griptape.config import GoogleStructureConfig -import google.generativeai as genai - -video_file = genai.upload_file(path="tests/resources/griptape-comfyui.mp4") -while video_file.state.name == "PROCESSING": - time.sleep(2) - video_file = genai.get_file(video_file.name) - -if video_file.state.name == "FAILED": - raise ValueError(video_file.state.name) - -agent = Agent( - config=GoogleStructureConfig(), - input=[ - GenericArtifact(video_file), - TextArtifact("Answer this question regarding the video: {{ args[0] }}"), - ] -) - -agent.run("Are there any scenes that show a character with earings?") -agent.run("What happens in the scene starting at 19 seconds?") +--8<-- "docs/examples/src/talk_to_a_video_1.py" ``` ``` diff --git a/docs/examples/talk-to-a-webpage.md b/docs/examples/talk-to-a-webpage.md index d1d0da5fa..e4632d401 100644 --- a/docs/examples/talk-to-a-webpage.md +++ b/docs/examples/talk-to-a-webpage.md @@ -1,70 +1,5 @@ -This example demonstrates how to vectorize a webpage and setup a Griptape agent with rules and the [RagClient](../reference/griptape/tools/rag_client/tool.md) tool to use it during conversations. +This example demonstrates how to vectorize a webpage and setup a Griptape agent with rules and the [RagClient](../reference/griptape/tools/rag/tool.md) tool to use it during conversations. ```python -from griptape.drivers import LocalVectorStoreDriver, OpenAiEmbeddingDriver, OpenAiChatPromptDriver -from griptape.engines.rag import RagEngine -from griptape.engines.rag.modules import VectorStoreRetrievalRagModule, PromptResponseRagModule -from griptape.engines.rag.stages import RetrievalRagStage, ResponseRagStage -from griptape.loaders import WebLoader -from griptape.rules import Ruleset, Rule -from griptape.structures import Agent -from griptape.tools import RagClient -from griptape.utils import Chat - -namespace = "physics-wiki" - -vector_store_driver = LocalVectorStoreDriver(embedding_driver=OpenAiEmbeddingDriver()) - -engine = RagEngine( - retrieval_stage=RetrievalRagStage( - retrieval_modules=[ - VectorStoreRetrievalRagModule( - vector_store_driver=vector_store_driver, - query_params={ - "namespace": namespace, - "top_n": 20 - - } - ) - ] - ), - response_stage=ResponseRagStage( - response_module=PromptResponseRagModule( - prompt_driver=OpenAiChatPromptDriver(model="gpt-4o") - ) - ) -) - -artifacts = WebLoader().load( - "https://en.wikipedia.org/wiki/Physics" -) - -vector_store_driver.upsert_text_artifacts( - {namespace: artifacts} -) - -vector_store_tool = RagClient( - description="Contains information about physics. " - "Use it to answer any physics-related questions.", - rag_engine=engine -) - -agent = Agent( - rulesets=[ - Ruleset( - name="Physics Tutor", - rules=[ - Rule( - "Always introduce yourself as a physics tutor" - ), - Rule( - "Be truthful. Only discuss physics." - ) - ] - ) - ], - tools=[vector_store_tool] -) - -Chat(agent).start() +--8<-- "docs/examples/src/talk_to_a_webpage_1.py" ``` diff --git a/docs/examples/talk-to-redshift.md b/docs/examples/talk-to-redshift.md index efaf45594..8fdd5bc7f 100644 --- a/docs/examples/talk-to-redshift.md +++ b/docs/examples/talk-to-redshift.md @@ -3,45 +3,5 @@ This example demonstrates how to build an agent that can dynamically query Amazo Let's build a support agent that uses GPT-4: ```python -import os -import boto3 -from griptape.drivers import AmazonRedshiftSqlDriver -from griptape.loaders import SqlLoader -from griptape.rules import Ruleset, Rule -from griptape.structures import Agent -from griptape.tools import SqlClient, FileManager -from griptape.utils import Chat - -session = boto3.Session() - -sql_loader = SqlLoader( - sql_driver=AmazonRedshiftSqlDriver( - database=os.environ["REDSHIFT_DATABASE"], - session=session, - cluster_identifier=os.environ['REDSHIFT_CLUSTER_IDENTIFIER'], - ) -) - -sql_tool = SqlClient( - sql_loader=sql_loader, - table_name="people", - table_description="contains information about tech industry professionals", - engine_name="redshift" -) - -agent = Agent( - tools=[sql_tool, FileManager()], - rulesets=[ - Ruleset( - name="HumansOrg Agent", - rules=[ - Rule("Act and introduce yourself as a HumansOrg, Inc. support agent"), - Rule("Your main objective is to help with finding information about people"), - Rule("Only use information about people from the sources available to you") - ] - ) - ] -) - -Chat(agent).start() +--8<-- "docs/examples/src/talk_to_redshift_1.py" ``` diff --git a/docs/griptape-framework/data/artifacts.md b/docs/griptape-framework/data/artifacts.md index c7aa55bd0..8c4da02b3 100644 --- a/docs/griptape-framework/data/artifacts.md +++ b/docs/griptape-framework/data/artifacts.md @@ -8,7 +8,7 @@ search: **[Artifacts](../../reference/griptape/artifacts/base_artifact.md)** are used for passing different types of data between Griptape components. All tools return artifacts that are later consumed by tasks and task memory. Artifacts make sure framework components enforce contracts when passing and consuming data. -## TextArtifact +## Text A [TextArtifact](../../reference/griptape/artifacts/text_artifact.md) for passing text data of arbitrary size around the framework. It can be used to count tokens with [token_count()](../../reference/griptape/artifacts/text_artifact.md#griptape.artifacts.text_artifact.TextArtifact.token_count) with a tokenizer. It can also be used to generate a text embedding with [generate_embedding()](../../reference/griptape/artifacts/text_artifact.md#griptape.artifacts.text_artifact.TextArtifact.generate_embedding) @@ -16,20 +16,20 @@ and access it with [embedding](../../reference/griptape/artifacts/text_artifact. [TaskMemory](../../reference/griptape/memory/task/task_memory.md) automatically stores [TextArtifact](../../reference/griptape/artifacts/text_artifact.md)s returned by tool activities and returns artifact IDs back to the LLM. -## CsvRowArtifact +## Csv Row A [CsvRowArtifact](../../reference/griptape/artifacts/csv_row_artifact.md) for passing structured row data around the framework. It inherits from [TextArtifact](../../reference/griptape/artifacts/text_artifact.md) and overrides the [to_text()](../../reference/griptape/artifacts/csv_row_artifact.md#griptape.artifacts.csv_row_artifact.CsvRowArtifact.to_text) method, which always returns a valid CSV row. -## InfoArtifact +## Info An [InfoArtifact](../../reference/griptape/artifacts/info_artifact.md) for passing short notifications back to the LLM without task memory storing them. -## ErrorArtifact +## Error An [ErrorArtifact](../../reference/griptape/artifacts/error_artifact.md) is used for passing errors back to the LLM without task memory storing them. -## BlobArtifact +## Blob A [BlobArtifact](../../reference/griptape/artifacts/blob_artifact.md) for passing binary large objects (blobs) back to the LLM. Treat it as a way to return unstructured data, such as images, videos, audio, and other files back from tools. @@ -38,17 +38,27 @@ Each blob has a [name](../../reference/griptape/artifacts/base_artifact.md#gript [TaskMemory](../../reference/griptape/memory/task/task_memory.md) automatically stores [BlobArtifact](../../reference/griptape/artifacts/blob_artifact.md)s returned by tool activities that can be reused by other tools. -## ImageArtifact +## Image -An [ImageArtifact](../../reference/griptape/artifacts/image_artifact.md) is used for passing images back to the LLM. In addition to binary image data, an Image Artifact includes image metadata like MIME type, dimensions, and prompt and model information for images returned by [image generation Drivers](../drivers/image-generation-drivers.md). It inherits from [BlobArtifact](#blobartifact). +An [ImageArtifact](../../reference/griptape/artifacts/image_artifact.md) is used for passing images back to the LLM. In addition to binary image data, an Image Artifact includes image metadata like MIME type, dimensions, and prompt and model information for images returned by [image generation Drivers](../drivers/image-generation-drivers.md). It inherits from [BlobArtifact](#blob). -## AudioArtifact +## Audio -An [AudioArtifact](../../reference/griptape/artifacts/audio_artifact.md) allows the Framework to interact with audio content. An Audio Artifact includes binary audio content as well as metadata like format, duration, and prompt and model information for audio returned generative models. It inherits from [BlobArtifact](#blobartifact). +An [AudioArtifact](../../reference/griptape/artifacts/audio_artifact.md) allows the Framework to interact with audio content. An Audio Artifact includes binary audio content as well as metadata like format, duration, and prompt and model information for audio returned generative models. It inherits from [BlobArtifact](#blob). -## BooleanArtifact +## Boolean A [BooleanArtifact](../../reference/griptape/artifacts/boolean_artifact.md) is used for passing boolean values around the framework. !!! info Any object passed on init to `BooleanArtifact` will be coerced into a `bool` type. This might lead to unintended behavior: `BooleanArtifact("False").value is True`. Use [BooleanArtifact.parse_bool](../../reference/griptape/artifacts/boolean_artifact.md#griptape.artifacts.boolean_artifact.BooleanArtifact.parse_bool) to convert case-insensitive string literal values `"True"` and `"False"` into a `BooleanArtifact`: `BooleanArtifact.parse_bool("False").value is False`. + +## Generic + +A [GenericArtifact](../../reference/griptape/artifacts/generic_artifact.md) can be used as an escape hatch for passing any type of data around the framework. +It is generally not recommended to use this Artifact type, but it can be used in a handful of situations where no other Artifact type fits the data being passed. +See [talking to a video](../../examples/talk-to-a-video.md) for an example of using a `GenericArtifact` to pass a Gemini-specific video file. + +## Json + +A [JsonArtifact](../../reference/griptape/artifacts/json_artifact.md) is used for passing JSON-serliazable data around the framework. Anything passed to `value` will be converted using `json.dumps(json.loads(value))`. diff --git a/docs/griptape-framework/data/chunkers.md b/docs/griptape-framework/data/chunkers.md index ecaee5eb4..507645923 100644 --- a/docs/griptape-framework/data/chunkers.md +++ b/docs/griptape-framework/data/chunkers.md @@ -16,12 +16,5 @@ Different types of chunkers provide lists of separators for specific text shapes Here is how to use a chunker: ```python -from griptape.chunkers import TextChunker -from griptape.tokenizers import OpenAiTokenizer -TextChunker( - # set an optional custom tokenizer - tokenizer=OpenAiTokenizer(model="gpt-4o"), - # optionally modify default number of tokens - max_tokens=100 -).chunk("long text") +--8<-- "docs/griptape-framework/data/src/chunkers_1.py" ``` diff --git a/docs/griptape-framework/data/loaders.md b/docs/griptape-framework/data/loaders.md index 73116c064..914fdee2a 100644 --- a/docs/griptape-framework/data/loaders.md +++ b/docs/griptape-framework/data/loaders.md @@ -17,25 +17,7 @@ multiple documents with [load_collection()](../../reference/griptape/loaders/bas Inherits from the [TextLoader](../../reference/griptape/loaders/text_loader.md) and can be used to load PDFs from a path or from an IO stream: ```python -from griptape.loaders import PdfLoader -from griptape.utils import load_files, load_file -import urllib.request - -urllib.request.urlretrieve("https://arxiv.org/pdf/1706.03762.pdf", "attention.pdf") - -# Load a single PDF file -with open("attention.pdf", "rb") as f: - PdfLoader().load(f.read()) -# You can also use the load_file utility function -PdfLoader().load(load_file("attention.pdf")) - -urllib.request.urlretrieve("https://arxiv.org/pdf/1706.03762.pdf", "CoT.pdf") - -# Load multiple PDF files -with open("attention.pdf", "rb") as attention, open("CoT.pdf", "rb") as cot: - PdfLoader().load_collection([attention.read(), cot.read()]) -# You can also use the load_files utility function -PdfLoader().load_collection(list(load_files(["attention.pdf", "CoT.pdf"]).values())) +--8<-- "docs/griptape-framework/data/src/loaders_1.py" ``` ## SQL @@ -43,20 +25,7 @@ PdfLoader().load_collection(list(load_files(["attention.pdf", "CoT.pdf"]).values Can be used to load data from a SQL database into [CsvRowArtifact](../../reference/griptape/artifacts/csv_row_artifact.md)s: ```python -from griptape.loaders import SqlLoader -from griptape.drivers import SqlDriver - -SqlLoader( - sql_driver = SqlDriver( - engine_url="sqlite:///:memory:" - ) -).load("SELECT 'foo', 'bar'") - -SqlLoader( - sql_driver = SqlDriver( - engine_url="sqlite:///:memory:" - ) -).load_collection(["SELECT 'foo', 'bar';", "SELECT 'fizz', 'buzz';"]) +--8<-- "docs/griptape-framework/data/src/loaders_2.py" ``` ## CSV @@ -64,20 +33,7 @@ SqlLoader( Can be used to load CSV files into [CsvRowArtifact](../../reference/griptape/artifacts/csv_row_artifact.md)s: ```python -from griptape.loaders import CsvLoader -from griptape.utils import load_file, load_files - -# Load a single CSV file -with open("tests/resources/cities.csv", "r") as f: - CsvLoader().load(f.read()) -# You can also use the load_file utility function -CsvLoader().load(load_file("tests/resources/cities.csv")) - -# Load multiple CSV files -with open("tests/resources/cities.csv", "r") as cities, open("tests/resources/addresses.csv", "r") as addresses: - CsvLoader().load_collection([cities.read(), addresses.read()]) -# You can also use the load_files utility function -CsvLoader().load_collection(list(load_files(["tests/resources/cities.csv", "tests/resources/addresses.csv"]).values())) +--8<-- "docs/griptape-framework/data/src/loaders_3.py" ``` @@ -89,19 +45,7 @@ CsvLoader().load_collection(list(load_files(["tests/resources/cities.csv", "test Can be used to load [pandas](https://pandas.pydata.org/) [DataFrame](https://pandas.pydata.org/docs/reference/api/pandas.DataFrame.html)s into [CsvRowArtifact](../../reference/griptape/artifacts/csv_row_artifact.md)s: ```python -import urllib -import pandas as pd -from griptape.loaders import DataFrameLoader - -urllib.request.urlretrieve("https://people.sc.fsu.edu/~jburkardt/data/csv/cities.csv", "cities.csv") - -DataFrameLoader().load(pd.read_csv("cities.csv")) - -urllib.request.urlretrieve("https://people.sc.fsu.edu/~jburkardt/data/csv/addresses.csv", "addresses.csv") - -DataFrameLoader().load_collection( - [pd.read_csv('cities.csv'), pd.read_csv('addresses.csv')] -) +--8<-- "docs/griptape-framework/data/src/loaders_4.py" ``` @@ -110,23 +54,7 @@ DataFrameLoader().load_collection( Used to load arbitrary text and text files: ```python -from pathlib import Path -import urllib -from griptape.loaders import TextLoader - -TextLoader().load( - "my text" -) - -urllib.request.urlretrieve("https://example-files.online-convert.com/document/txt/example.txt", "example.txt") - -with open("example.txt", "r") as f: - TextLoader().load(f.read()) - -with open("example.txt", "r") as f: - TextLoader().load_collection( - ["my text", "my other text", f.read()] - ) +--8<-- "docs/griptape-framework/data/src/loaders_5.py" ``` You can set a custom [tokenizer](../../reference/griptape/loaders/text_loader.md#griptape.loaders.text_loader.TextLoader.tokenizer), [max_tokens](../../reference/griptape/loaders/text_loader.md#griptape.loaders.text_loader.TextLoader.max_tokens) parameter, and [chunker](../../reference/griptape/loaders/text_loader.md#griptape.loaders.text_loader.TextLoader.chunker). @@ -139,15 +67,7 @@ You can set a custom [tokenizer](../../reference/griptape/loaders/text_loader.md Inherits from the [TextLoader](../../reference/griptape/loaders/text_loader.md) and can be used to load web pages: ```python -from griptape.loaders import WebLoader - -WebLoader().load( - "https://www.griptape.ai" -) - -WebLoader().load_collection( - ["https://www.griptape.ai", "https://docs.griptape.ai"] -) +--8<-- "docs/griptape-framework/data/src/loaders_6.py" ``` ## Image @@ -155,36 +75,16 @@ WebLoader().load_collection( !!! info This driver requires the `loaders-image` [extra](../index.md#extras). -The Image Loader is used to load an image as an [ImageArtifact](./artifacts.md#imageartifact). The Loader operates on image bytes that can be sourced from files on disk, downloaded images, or images in memory. +The Image Loader is used to load an image as an [ImageArtifact](./artifacts.md#image). The Loader operates on image bytes that can be sourced from files on disk, downloaded images, or images in memory. ```python -from griptape.loaders import ImageLoader -from griptape.utils import load_file - -# Load an image from disk -with open("tests/resources/mountain.png", "rb") as f: - disk_image_artifact = ImageLoader().load(f.read()) -# You can also use the load_file utility function -ImageLoader().load(load_file("tests/resources/mountain.png")) +--8<-- "docs/griptape-framework/data/src/loaders_7.py" ``` By default, the Image Loader will load images in their native format, but not all models work on all formats. To normalize the format of Artifacts returned by the Loader, set the `format` field. ```python -from griptape.loaders import ImageLoader -from griptape.utils import load_files, load_file - -# Load a single image in BMP format -with open("tests/resources/mountain.png", "rb") as f: - image_artifact_jpeg = ImageLoader(format="bmp").load(f.read()) -# You can also use the load_file utility function -ImageLoader(format="bmp").load(load_file("tests/resources/mountain.png")) - -# Load multiple images in BMP format -with open("tests/resources/mountain.png", "rb") as mountain, open("tests/resources/cow.png", "rb") as cow: - ImageLoader().load_collection([mountain.read(), cow.read()]) -# You can also use the load_files utility function -ImageLoader().load_collection(list(load_files(["tests/resources/mountain.png", "tests/resources/cow.png"]).values())) +--8<-- "docs/griptape-framework/data/src/loaders_8.py" ``` @@ -196,13 +96,7 @@ ImageLoader().load_collection(list(load_files(["tests/resources/mountain.png", " Can be used to load email from an imap server: ```python -from griptape.loaders import EmailLoader - -loader = EmailLoader(imap_url="an.email.server.hostname", username="username", password="password") - -loader.load(EmailLoader.EmailQuery(label="INBOX")) - -loader.load_collection([EmailLoader.EmailQuery(label="INBOX"), EmailLoader.EmailQuery(label="SENT")]) +--8<-- "docs/griptape-framework/data/src/loaders_9.py" ``` ## Audio @@ -210,18 +104,10 @@ loader.load_collection([EmailLoader.EmailQuery(label="INBOX"), EmailLoader.Email !!! info This driver requires the `loaders-audio` [extra](../index.md#extras). -The [Audio Loader](../../reference/griptape/loaders/audio_loader.md) is used to load audio content as an [AudioArtifact](./artifacts.md#audioartifact). The Loader operates on audio bytes that can be sourced from files on disk, downloaded audio, or audio in memory. +The [Audio Loader](../../reference/griptape/loaders/audio_loader.md) is used to load audio content as an [AudioArtifact](./artifacts.md#audio). The Loader operates on audio bytes that can be sourced from files on disk, downloaded audio, or audio in memory. The Loader will load audio in its native format and populates the resulting Artifact's `format` field by making a best-effort guess of the underlying audio format using the `filetype` package. ```python -from griptape.loaders import AudioLoader -from griptape.utils import load_file - -# Load an image from disk -with open("tests/resources/sentences.wav", "rb") as f: - audio_artifact = AudioLoader().load(f.read()) - -# You can also use the load_file utility function -AudioLoader().load(load_file("tests/resources/sentences.wav")) +--8<-- "docs/griptape-framework/data/src/loaders_10.py" ``` diff --git a/griptape/tools/audio_transcription_client/__init__.py b/docs/griptape-framework/data/src/__init__.py similarity index 100% rename from griptape/tools/audio_transcription_client/__init__.py rename to docs/griptape-framework/data/src/__init__.py diff --git a/docs/griptape-framework/data/src/chunkers_1.py b/docs/griptape-framework/data/src/chunkers_1.py new file mode 100644 index 000000000..28ca92ceb --- /dev/null +++ b/docs/griptape-framework/data/src/chunkers_1.py @@ -0,0 +1,9 @@ +from griptape.chunkers import TextChunker +from griptape.tokenizers import OpenAiTokenizer + +TextChunker( + # set an optional custom tokenizer + tokenizer=OpenAiTokenizer(model="gpt-4o"), + # optionally modify default number of tokens + max_tokens=100, +).chunk("long text") diff --git a/docs/griptape-framework/data/src/loaders_1.py b/docs/griptape-framework/data/src/loaders_1.py new file mode 100644 index 000000000..2b7f31613 --- /dev/null +++ b/docs/griptape-framework/data/src/loaders_1.py @@ -0,0 +1,19 @@ +import urllib.request +from pathlib import Path + +from griptape.loaders import PdfLoader +from griptape.utils import load_file, load_files + +urllib.request.urlretrieve("https://arxiv.org/pdf/1706.03762.pdf", "attention.pdf") + +# Load a single PDF file +PdfLoader().load(Path("attention.pdf").read_bytes()) +# You can also use the load_file utility function +PdfLoader().load(load_file("attention.pdf")) + +urllib.request.urlretrieve("https://arxiv.org/pdf/1706.03762.pdf", "CoT.pdf") + +# Load multiple PDF files +PdfLoader().load_collection([Path("attention.pdf").read_bytes(), Path("CoT.pdf").read_bytes()]) +# You can also use the load_files utility function +PdfLoader().load_collection(list(load_files(["attention.pdf", "CoT.pdf"]).values())) diff --git a/docs/griptape-framework/data/src/loaders_10.py b/docs/griptape-framework/data/src/loaders_10.py new file mode 100644 index 000000000..42a64c40e --- /dev/null +++ b/docs/griptape-framework/data/src/loaders_10.py @@ -0,0 +1,10 @@ +from pathlib import Path + +from griptape.loaders import AudioLoader +from griptape.utils import load_file + +# Load an image from disk +audio_artifact = AudioLoader().load(Path("tests/resources/sentences.wav").read_bytes()) + +# You can also use the load_file utility function +AudioLoader().load(load_file("tests/resources/sentences.wav")) diff --git a/docs/griptape-framework/data/src/loaders_2.py b/docs/griptape-framework/data/src/loaders_2.py new file mode 100644 index 000000000..b4359e5e5 --- /dev/null +++ b/docs/griptape-framework/data/src/loaders_2.py @@ -0,0 +1,8 @@ +from griptape.drivers import SqlDriver +from griptape.loaders import SqlLoader + +SqlLoader(sql_driver=SqlDriver(engine_url="sqlite:///:memory:")).load("SELECT 'foo', 'bar'") + +SqlLoader(sql_driver=SqlDriver(engine_url="sqlite:///:memory:")).load_collection( + ["SELECT 'foo', 'bar';", "SELECT 'fizz', 'buzz';"] +) diff --git a/docs/griptape-framework/data/src/loaders_3.py b/docs/griptape-framework/data/src/loaders_3.py new file mode 100644 index 000000000..35af0fdfc --- /dev/null +++ b/docs/griptape-framework/data/src/loaders_3.py @@ -0,0 +1,16 @@ +from pathlib import Path + +from griptape.loaders import CsvLoader +from griptape.utils import load_file, load_files + +# Load a single CSV file +CsvLoader().load(Path("tests/resources/cities.csv").read_text()) +# You can also use the load_file utility function +CsvLoader().load(load_file("tests/resources/cities.csv")) + +# Load multiple CSV files +CsvLoader().load_collection( + [Path("tests/resources/cities.csv").read_text(), Path("tests/resources/addresses.csv").read_text()] +) +# You can also use the load_files utility function +CsvLoader().load_collection(list(load_files(["tests/resources/cities.csv", "tests/resources/addresses.csv"]).values())) diff --git a/docs/griptape-framework/data/src/loaders_4.py b/docs/griptape-framework/data/src/loaders_4.py new file mode 100644 index 000000000..8d5883adf --- /dev/null +++ b/docs/griptape-framework/data/src/loaders_4.py @@ -0,0 +1,13 @@ +import urllib.request + +import pandas as pd + +from griptape.loaders import DataFrameLoader + +urllib.request.urlretrieve("https://people.sc.fsu.edu/~jburkardt/data/csv/cities.csv", "cities.csv") + +DataFrameLoader().load(pd.read_csv("cities.csv")) + +urllib.request.urlretrieve("https://people.sc.fsu.edu/~jburkardt/data/csv/addresses.csv", "addresses.csv") + +DataFrameLoader().load_collection([pd.read_csv("cities.csv"), pd.read_csv("addresses.csv")]) diff --git a/docs/griptape-framework/data/src/loaders_5.py b/docs/griptape-framework/data/src/loaders_5.py new file mode 100644 index 000000000..0eefda776 --- /dev/null +++ b/docs/griptape-framework/data/src/loaders_5.py @@ -0,0 +1,12 @@ +import urllib.request +from pathlib import Path + +from griptape.loaders import TextLoader + +TextLoader().load("my text") + +urllib.request.urlretrieve("https://example-files.online-convert.com/document/txt/example.txt", "example.txt") + +TextLoader().load(Path("example.txt").read_text()) + +TextLoader().load_collection(["my text", "my other text", Path("example.txt").read_text()]) diff --git a/docs/griptape-framework/data/src/loaders_6.py b/docs/griptape-framework/data/src/loaders_6.py new file mode 100644 index 000000000..b3e68173e --- /dev/null +++ b/docs/griptape-framework/data/src/loaders_6.py @@ -0,0 +1,5 @@ +from griptape.loaders import WebLoader + +WebLoader().load("https://www.griptape.ai") + +WebLoader().load_collection(["https://www.griptape.ai", "https://docs.griptape.ai"]) diff --git a/docs/griptape-framework/data/src/loaders_7.py b/docs/griptape-framework/data/src/loaders_7.py new file mode 100644 index 000000000..6857886e8 --- /dev/null +++ b/docs/griptape-framework/data/src/loaders_7.py @@ -0,0 +1,9 @@ +from pathlib import Path + +from griptape.loaders import ImageLoader +from griptape.utils import load_file + +# Load an image from disk +disk_image_artifact = ImageLoader().load(Path("tests/resources/mountain.png").read_bytes()) +# You can also use the load_file utility function +ImageLoader().load(load_file("tests/resources/mountain.png")) diff --git a/docs/griptape-framework/data/src/loaders_8.py b/docs/griptape-framework/data/src/loaders_8.py new file mode 100644 index 000000000..e85992d45 --- /dev/null +++ b/docs/griptape-framework/data/src/loaders_8.py @@ -0,0 +1,16 @@ +from pathlib import Path + +from griptape.loaders import ImageLoader +from griptape.utils import load_file, load_files + +# Load a single image in BMP format +image_artifact_jpeg = ImageLoader(format="bmp").load(Path("tests/resources/mountain.png").read_bytes()) +# You can also use the load_file utility function +ImageLoader(format="bmp").load(load_file("tests/resources/mountain.png")) + +# Load multiple images in BMP format +ImageLoader().load_collection( + [Path("tests/resources/mountain.png").read_bytes(), Path("tests/resources/cow.png").read_bytes()] +) +# You can also use the load_files utility function +ImageLoader().load_collection(list(load_files(["tests/resources/mountain.png", "tests/resources/cow.png"]).values())) diff --git a/docs/griptape-framework/data/src/loaders_9.py b/docs/griptape-framework/data/src/loaders_9.py new file mode 100644 index 000000000..d131910d3 --- /dev/null +++ b/docs/griptape-framework/data/src/loaders_9.py @@ -0,0 +1,7 @@ +from griptape.loaders import EmailLoader + +loader = EmailLoader(imap_url="an.email.server.hostname", username="username", password="password") + +loader.load(EmailLoader.EmailQuery(label="INBOX")) + +loader.load_collection([EmailLoader.EmailQuery(label="INBOX"), EmailLoader.EmailQuery(label="SENT")]) diff --git a/docs/griptape-framework/drivers/audio-transcription-drivers.md b/docs/griptape-framework/drivers/audio-transcription-drivers.md index 0fba57438..10630cf22 100644 --- a/docs/griptape-framework/drivers/audio-transcription-drivers.md +++ b/docs/griptape-framework/drivers/audio-transcription-drivers.md @@ -18,22 +18,5 @@ This capability is essential for enhancing accessibility, improving content disc The [OpenAI Audio Transcription Driver](../../reference/griptape/drivers/audio_transcription/openai_audio_transcription_driver.md) utilizes OpenAI's sophisticated `whisper` model to accurately transcribe spoken audio into text. This model supports multiple languages, ensuring precise transcription across a wide range of dialects. ```python -from griptape.drivers import OpenAiAudioTranscriptionDriver -from griptape.engines import AudioTranscriptionEngine -from griptape.tools.audio_transcription_client.tool import AudioTranscriptionClient -from griptape.structures import Agent - - -driver = OpenAiAudioTranscriptionDriver( - model="whisper-1" -) - -tool = AudioTranscriptionClient( - off_prompt=False, - engine=AudioTranscriptionEngine( - audio_transcription_driver=driver, - ), -) - -Agent(tools=[tool]).run("Transcribe the following audio file: tests/resources/sentences.wav") +--8<-- "docs/griptape-framework/drivers/src/audio_transcription_drivers_1.py" ``` diff --git a/docs/griptape-framework/drivers/conversation-memory-drivers.md b/docs/griptape-framework/drivers/conversation-memory-drivers.md index acdb7c202..bb4c1b35a 100644 --- a/docs/griptape-framework/drivers/conversation-memory-drivers.md +++ b/docs/griptape-framework/drivers/conversation-memory-drivers.md @@ -9,20 +9,20 @@ You can persist and load memory by using Conversation Memory Drivers. You can bu ## Conversation Memory Drivers -### Local +### Griptape Cloud -The [LocalConversationMemoryDriver](../../reference/griptape/drivers/memory/conversation/local_conversation_memory_driver.md) allows you to persist Conversation Memory in a local JSON file. +The [GriptapeCloudConversationMemoryDriver](../../reference/griptape/drivers/memory/conversation/griptape_cloud_conversation_memory_driver.md) allows you to persist Conversation Memory in Griptape Cloud. It provides seamless integration with Griptape's cloud-based `Threads` and `Messages` resources. ```python -from griptape.structures import Agent -from griptape.drivers import LocalConversationMemoryDriver -from griptape.memory.structure import ConversationMemory +--8<-- "docs/griptape-framework/drivers/src/conversation_memory_drivers_griptape_cloud.py" +``` + +### Local -local_driver = LocalConversationMemoryDriver(file_path="memory.json") -agent = Agent(conversation_memory=ConversationMemory(driver=local_driver)) +The [LocalConversationMemoryDriver](../../reference/griptape/drivers/memory/conversation/local_conversation_memory_driver.md) allows you to persist Conversation Memory in a local JSON file. -agent.run("Surfing is my favorite sport.") -agent.run("What is my favorite sport?") +```python +--8<-- "docs/griptape-framework/drivers/src/conversation_memory_drivers_1.py" ``` ### Amazon DynamoDb @@ -33,24 +33,7 @@ agent.run("What is my favorite sport?") The [AmazonDynamoDbConversationMemoryDriver](../../reference/griptape/drivers/memory/conversation/amazon_dynamodb_conversation_memory_driver.md) allows you to persist Conversation Memory in [Amazon DynamoDb](https://aws.amazon.com/dynamodb/). ```python -import os -import uuid -from griptape.drivers import AmazonDynamoDbConversationMemoryDriver -from griptape.memory.structure import ConversationMemory -from griptape.structures import Agent - -conversation_id = uuid.uuid4().hex -dynamodb_driver = AmazonDynamoDbConversationMemoryDriver( - table_name=os.environ["DYNAMODB_TABLE_NAME"], - partition_key="id", - value_attribute_key="memory", - partition_key_value=conversation_id, -) - -agent = Agent(conversation_memory=ConversationMemory(driver=dynamodb_driver)) - -agent.run("My name is Jeff.") -agent.run("What is my name?") +--8<-- "docs/griptape-framework/drivers/src/conversation_memory_drivers_2.py" ``` Optional parameters `sort_key` and `sort_key_value` can be supplied for tables with a composite primary key. @@ -63,23 +46,6 @@ Optional parameters `sort_key` and `sort_key_value` can be supplied for tables w The [RedisConversationMemoryDriver](../../reference/griptape/drivers/memory/conversation/redis_conversation_memory_driver.md) allows you to persist Conversation Memory in [Redis](https://redis.io/). ```python -import os -import uuid -from griptape.drivers import RedisConversationMemoryDriver -from griptape.memory.structure import ConversationMemory -from griptape.structures import Agent - -conversation_id = uuid.uuid4().hex -redis_conversation_driver = RedisConversationMemoryDriver( - host=os.environ["REDIS_HOST"], - port=os.environ["REDIS_PORT"], - password=os.environ["REDIS_PASSWORD"], - index=os.environ["REDIS_INDEX"], - conversation_id = conversation_id -) - -agent = Agent(conversation_memory=ConversationMemory(driver=redis_conversation_driver)) - -agent.run("My name is Jeff.") -agent.run("What is my name?") +--8<-- "docs/griptape-framework/drivers/src/conversation_memory_drivers_3.py" ``` + diff --git a/docs/griptape-framework/drivers/embedding-drivers.md b/docs/griptape-framework/drivers/embedding-drivers.md index 567aa13e4..68f40f09e 100644 --- a/docs/griptape-framework/drivers/embedding-drivers.md +++ b/docs/griptape-framework/drivers/embedding-drivers.md @@ -21,12 +21,7 @@ The [OpenAiEmbeddingDriver](../../reference/griptape/drivers/embedding/openai_em ```python -from griptape.drivers import OpenAiEmbeddingDriver - -embeddings = OpenAiEmbeddingDriver().embed_string("Hello Griptape!") - -# display the first 3 embeddings -print(embeddings[:3]) +--8<-- "docs/griptape-framework/drivers/src/embedding_drivers_1.py" ``` ``` [0.0017853748286142945, 0.006118456833064556, -0.005811543669551611] @@ -37,18 +32,8 @@ print(embeddings[:3]) Many services such as [LMStudio](https://lmstudio.ai/) and [OhMyGPT](https://www.ohmygpt.com/) provide OpenAI-compatible APIs. You can use the [OpenAiEmbeddingDriver](../../reference/griptape/drivers/embedding/openai_embedding_driver.md) to interact with these services. Simply set the `base_url` to the service's API endpoint and the `model` to the model name. If the service requires an API key, you can set it in the `api_key` field. -```python title="PYTEST_IGNORE" -from griptape.drivers import OpenAiEmbeddingDriver - -embedding_driver = OpenAiEmbeddingDriver( - base_url="http://127.0.0.1:1234/v1", - model="nomic-ai/nomic-embed-text-v1.5-GGUF/nomic-embed-text-v1.5.Q2_K", -) - -embeddings = embedding_driver.embed_string("Hello world!") - -# display the first 3 embeddings -print(embeddings[:3]) +```python +--8<-- "docs/griptape-framework/drivers/src/embedding_drivers_2.py" ``` !!! tip @@ -67,12 +52,7 @@ with updated defaults. The [AmazonBedrockTitanEmbeddingDriver](../../reference/griptape/drivers/embedding/amazon_bedrock_titan_embedding_driver.md) uses the [Amazon Bedrock Embeddings API](https://docs.aws.amazon.com/bedrock/latest/userguide/embeddings.html). ```python -from griptape.drivers import AmazonBedrockTitanEmbeddingDriver - -embeddings = AmazonBedrockTitanEmbeddingDriver().embed_string("Hello world!") - -# display the first 3 embeddings -print(embeddings[:3]) +--8<-- "docs/griptape-framework/drivers/src/embedding_drivers_3.py" ``` ``` [-0.234375, -0.024902344, -0.14941406] @@ -85,12 +65,7 @@ print(embeddings[:3]) The [GoogleEmbeddingDriver](../../reference/griptape/drivers/embedding/google_embedding_driver.md) uses the [Google Embeddings API](https://ai.google.dev/tutorials/python_quickstart#use_embeddings). ```python -from griptape.drivers import GoogleEmbeddingDriver - -embeddings = GoogleEmbeddingDriver().embed_string("Hello world!") - -# display the first 3 embeddings -print(embeddings[:3]) +--8<-- "docs/griptape-framework/drivers/src/embedding_drivers_4.py" ``` ``` [0.0588633, 0.0033929371, -0.072810836] @@ -106,24 +81,7 @@ The [HuggingFaceHubEmbeddingDriver](../../reference/griptape/drivers/embedding/h - feature-extraction ```python -import os -from griptape.drivers import HuggingFaceHubEmbeddingDriver -from griptape.tokenizers import HuggingFaceTokenizer -from transformers import AutoTokenizer - -driver = HuggingFaceHubEmbeddingDriver( - api_token=os.environ["HUGGINGFACE_HUB_ACCESS_TOKEN"], - model="sentence-transformers/all-MiniLM-L6-v2", - tokenizer=HuggingFaceTokenizer( - model="sentence-transformers/all-MiniLM-L6-v2", - max_output_tokens=512, - ), -) - -embeddings = driver.embed_string("Hello world!") - -# display the first 3 embeddings -print(embeddings[:3]) +--8<-- "docs/griptape-framework/drivers/src/embedding_drivers_5.py" ``` ### Ollama @@ -133,17 +91,8 @@ print(embeddings[:3]) The [OllamaEmbeddingDriver](../../reference/griptape/drivers/embedding/ollama_embedding_driver.md) uses the [Ollama Embeddings API](https://ollama.com/blog/embedding-models). -```python title="PYTEST_IGNORE" -from griptape.drivers import OllamaEmbeddingDriver - -driver = OllamaEmbeddingDriver( - model="all-minilm", -) - -results = driver.embed_string("Hello world!") - -# display the first 3 embeddings -print(results[:3]) +```python +--8<-- "docs/griptape-framework/drivers/src/embedding_drivers_6.py" ``` ### Amazon SageMaker Jumpstart @@ -153,18 +102,8 @@ The [AmazonSageMakerJumpstartEmbeddingDriver](../../reference/griptape/drivers/e !!! info This driver requires the `drivers-embedding-amazon-sagemaker` [extra](../index.md#extras). -```python title="PYTEST_IGNORE" -import os -from griptape.drivers import AmazonSageMakerJumpstartEmbeddingDriver, SageMakerTensorFlowHubEmbeddingModelDriver - -driver = AmazonSageMakerJumpstartEmbeddingDriver( - model=os.environ["SAGEMAKER_TENSORFLOW_HUB_MODEL"], -) - -embeddings = driver.embed_string("Hello world!") - -# display the first 3 embeddings -print(embeddings[:3]) +```python +--8<-- "docs/griptape-framework/drivers/src/embedding_drivers_7.py" ``` ### VoyageAI @@ -174,17 +113,7 @@ The [VoyageAiEmbeddingDriver](../../reference/griptape/drivers/embedding/voyagea This driver requires the `drivers-embedding-voyageai` [extra](../index.md#extras). ```python -import os -from griptape.drivers import VoyageAiEmbeddingDriver - -driver = VoyageAiEmbeddingDriver( - api_key=os.environ["VOYAGE_API_KEY"] -) - -embeddings = driver.embed_string("Hello world!") - -# display the first 3 embeddings -print(embeddings[:3]) +--8<-- "docs/griptape-framework/drivers/src/embedding_drivers_8.py" ``` ### Cohere @@ -195,40 +124,12 @@ The [CohereEmbeddingDriver](../../reference/griptape/drivers/embedding/cohere_em This driver requires the `drivers-embedding-cohere` [extra](../index.md#extras). ```python -import os -from griptape.drivers import CohereEmbeddingDriver - -embedding_driver=CohereEmbeddingDriver( - model="embed-english-v3.0", - api_key=os.environ["COHERE_API_KEY"], - input_type="search_document", -) - -embeddings = embedding_driver.embed_string("Hello world!") - -# display the first 3 embeddings -print(embeddings[:3]) +--8<-- "docs/griptape-framework/drivers/src/embedding_drivers_9.py" ``` ### Override Default Structure Embedding Driver -Here is how you can override the Embedding Driver that is used by default in Structures. +Here is how you can override the Embedding Driver that is used by default in Structures. ```python -from griptape.structures import Agent -from griptape.tools import WebScraper, TaskMemoryClient -from griptape.drivers import ( - OpenAiChatPromptDriver, - VoyageAiEmbeddingDriver, -) -from griptape.config import StructureConfig - -agent = Agent( - tools=[WebScraper(off_prompt=True), TaskMemoryClient(off_prompt=False)], - config=StructureConfig( - prompt_driver=OpenAiChatPromptDriver(model="gpt-4o"), - embedding_driver=VoyageAiEmbeddingDriver(), - ), -) - -agent.run("based on https://www.griptape.ai/, tell me what Griptape is") +--8<-- "docs/griptape-framework/drivers/src/embedding_drivers_10.py" ``` diff --git a/docs/griptape-framework/drivers/event-listener-drivers.md b/docs/griptape-framework/drivers/event-listener-drivers.md index 73453afb6..ab0609c51 100644 --- a/docs/griptape-framework/drivers/event-listener-drivers.md +++ b/docs/griptape-framework/drivers/event-listener-drivers.md @@ -10,61 +10,13 @@ Event Listener Drivers are used to send Griptape [Events](../misc/events.md) to You can instantiate Drivers and pass them to Event Listeners in your Structure: ```python -import os - -from griptape.drivers import AmazonSqsEventListenerDriver -from griptape.events import ( - EventListener, -) -from griptape.rules import Rule -from griptape.structures import Agent - -agent = Agent( - rules=[ - Rule( - value="You will be provided with a block of text, and your task is to extract a list of keywords from it." - ) - ], - event_listeners=[ - EventListener( - handler=lambda event: { # You can optionally use the handler to transform the event payload before sending it to the Driver - "event": event.to_dict(), - }, - driver=AmazonSqsEventListenerDriver( - queue_url=os.environ["AMAZON_SQS_QUEUE_URL"], - ), - ), - ], -) - -agent.run( - """Black-on-black ware is a 20th- and 21st-century pottery tradition developed by the Puebloan Native American ceramic artists in Northern New Mexico. - Traditional reduction-fired blackware has been made for centuries by pueblo artists. - Black-on-black ware of the past century is produced with a smooth surface, with the designs applied through selective burnishing or the application of refractory slip. - Another style involves carving or incising designs and selectively polishing the raised areas. - For generations several families from Kha'po Owingeh and P'ohwhóge Owingeh pueblos have been making black-on-black ware with the techniques passed down from matriarch potters. Artists from other pueblos have also produced black-on-black ware. - Several contemporary artists have created works honoring the pottery of their ancestors.""" -) +--8<-- "docs/griptape-framework/drivers/src/event_listener_drivers_1.py" ``` Or use them independently: ```python -import os -from griptape.drivers import GriptapeCloudEventListenerDriver -from griptape.events import FinishStructureRunEvent -from griptape.artifacts import TextArtifact - -# By default, GriptapeCloudEventListenerDriver uses the api key provided -# in the GT_CLOUD_API_KEY environment variable. -event_driver = GriptapeCloudEventListenerDriver() - -done_event = FinishStructureRunEvent( - output_task_input=TextArtifact("Just started!"), - output_task_output=TextArtifact("All done!"), -) - -event_driver.publish_event(done_event) +--8<-- "docs/griptape-framework/drivers/src/event_listener_drivers_2.py" ``` ## Event Listener Drivers @@ -79,38 +31,7 @@ Griptape offers the following Event Listener Drivers for forwarding Griptape Eve The [AmazonSqsEventListenerDriver](../../reference/griptape/drivers/event_listener/amazon_sqs_event_listener_driver.md) sends Events to an [Amazon SQS](https://aws.amazon.com/sqs/) queue. ```python -import os - -from griptape.drivers import AmazonSqsEventListenerDriver -from griptape.events import ( - EventListener, -) -from griptape.rules import Rule -from griptape.structures import Agent - -agent = Agent( - rules=[ - Rule( - value="You will be provided with a block of text, and your task is to extract a list of keywords from it." - ) - ], - event_listeners=[ - EventListener( - driver=AmazonSqsEventListenerDriver( - queue_url=os.environ["AMAZON_SQS_QUEUE_URL"], - ), - ), - ], -) - -agent.run( - """Black-on-black ware is a 20th- and 21st-century pottery tradition developed by the Puebloan Native American ceramic artists in Northern New Mexico. - Traditional reduction-fired blackware has been made for centuries by pueblo artists. - Black-on-black ware of the past century is produced with a smooth surface, with the designs applied through selective burnishing or the application of refractory slip. - Another style involves carving or incising designs and selectively polishing the raised areas. - For generations several families from Kha'po Owingeh and P'ohwhóge Owingeh pueblos have been making black-on-black ware with the techniques passed down from matriarch potters. Artists from other pueblos have also produced black-on-black ware. - Several contemporary artists have created works honoring the pottery of their ancestors.""" -) +--8<-- "docs/griptape-framework/drivers/src/event_listener_drivers_3.py" ``` ### AWS IoT @@ -121,40 +42,7 @@ agent.run( The [AwsIotCoreEventListenerDriver](../../reference/griptape/drivers/event_listener/aws_iot_core_event_listener_driver.md) sends Events to the [AWS IoT Message Broker](https://aws.amazon.com/iot-core/). ```python -import os - -from griptape.config import StructureConfig -from griptape.drivers import AwsIotCoreEventListenerDriver, OpenAiChatPromptDriver -from griptape.events import ( - EventListener, - FinishStructureRunEvent, -) -from griptape.rules import Rule -from griptape.structures import Agent - -agent = Agent( - rules=[ - Rule( - value="You will be provided with a text, and your task is to extract the airport codes from it." - ) - ], - config=StructureConfig( - prompt_driver=OpenAiChatPromptDriver( - model="gpt-3.5-turbo", temperature=0.7 - ) - ), - event_listeners=[ - EventListener( - event_types=[FinishStructureRunEvent], - driver=AwsIotCoreEventListenerDriver( - topic=os.environ["AWS_IOT_CORE_TOPIC"], - iot_endpoint=os.environ["AWS_IOT_CORE_ENDPOINT"], - ), - ), - ], -) - -agent.run("I want to fly from Orlando to Boston") +--8<-- "docs/griptape-framework/drivers/src/event_listener_drivers_4.py" ``` ### Griptape Cloud @@ -165,29 +53,7 @@ The [GriptapeCloudEventListenerDriver](../../reference/griptape/drivers/event_li This Driver is required when using the Griptape Cloud Managed Structures feature. For local development, you can use the [Skatepark Emulator](https://github.com/griptape-ai/griptape-cli?tab=readme-ov-file#skatepark-emulator). ```python -import os - -from griptape.drivers import GriptapeCloudEventListenerDriver -from griptape.events import ( - EventListener, - FinishStructureRunEvent, -) -from griptape.structures import Agent - -agent = Agent( - event_listeners=[ - EventListener( - event_types=[FinishStructureRunEvent], - # By default, GriptapeCloudEventListenerDriver uses the api key provided - # in the GT_CLOUD_API_KEY environment variable. - driver=GriptapeCloudEventListenerDriver(), - ), - ], -) - -agent.run( - "Create a list of 8 questions for an interview with a science fiction author." -) +--8<-- "docs/griptape-framework/drivers/src/event_listener_drivers_5.py" ``` ### Webhook Event Listener Driver @@ -195,27 +61,7 @@ agent.run( The [WebhookEventListenerDriver](../../reference/griptape/drivers/event_listener/webhook_event_listener_driver.md) sends Events to any [Webhook](https://en.wikipedia.org/wiki/Webhook) URL. ```python -import os - -from griptape.drivers import WebhookEventListenerDriver -from griptape.events import ( - EventListener, - FinishStructureRunEvent, -) -from griptape.structures import Agent - -agent = Agent( - event_listeners=[ - EventListener( - event_types=[FinishStructureRunEvent], - driver=WebhookEventListenerDriver( - webhook_url=os.environ["WEBHOOK_URL"], - ), - ), - ], -) - -agent.run("Analyze the pros and cons of remote work vs. office work") +--8<-- "docs/griptape-framework/drivers/src/event_listener_drivers_6.py" ``` ### Pusher @@ -225,31 +71,5 @@ agent.run("Analyze the pros and cons of remote work vs. office work") The [PusherEventListenerDriver](../../reference/griptape/drivers/event_listener/pusher_event_listener_driver.md) sends Events to [Pusher](https://pusher.com). ```python -import os -from griptape.drivers import PusherEventListenerDriver -from griptape.events import ( - EventListener, - FinishStructureRunEvent -) -from griptape.structures import Agent - -agent = Agent( - event_listeners=[ - EventListener( - event_types=[FinishStructureRunEvent], - driver=PusherEventListenerDriver( - batched=False, - app_id=os.environ["PUSHER_APP_ID"], - key=os.environ["PUSHER_KEY"], - secret=os.environ["PUSHER_SECRET"], - cluster=os.environ["PUSHER_CLUSTER"], - channel='my-channel', - event_name='my-event' - ), - ), - ], -) - -agent.run("Analyze the pros and cons of remote work vs. office work") - +--8<-- "docs/griptape-framework/drivers/src/event_listener_drivers_7.py" ``` diff --git a/docs/griptape-framework/drivers/image-generation-drivers.md b/docs/griptape-framework/drivers/image-generation-drivers.md index aae25c64d..bcc91aca6 100644 --- a/docs/griptape-framework/drivers/image-generation-drivers.md +++ b/docs/griptape-framework/drivers/image-generation-drivers.md @@ -10,22 +10,7 @@ search: Provide a Driver when building an [Engine](../engines/image-generation-engines.md), then pass it to a [Tool](../tools/index.md) for use by an [Agent](../structures/agents.md): ```python -from griptape.structures import Agent -from griptape.engines import PromptImageGenerationEngine -from griptape.drivers import OpenAiImageGenerationDriver -from griptape.tools import PromptImageGenerationClient - -driver = OpenAiImageGenerationDriver( - model="dall-e-2", -) - -engine = PromptImageGenerationEngine(image_generation_driver=driver) - -agent = Agent(tools=[ - PromptImageGenerationClient(engine=engine), -]) - -agent.run("Generate a watercolor painting of a dog riding a skateboard") +--8<-- "docs/griptape-framework/drivers/src/image_generation_drivers_1.py" ``` ## Image Generation Drivers @@ -41,28 +26,7 @@ The [Bedrock Stable Diffusion Model Driver](../../reference/griptape/drivers/ima This Model Driver supports negative prompts. When provided (for example, when used with an [image generation Engine](../engines/image-generation-engines.md) configured with [Negative Rulesets](../engines/image-generation-engines.md#image-generation-engine-rulesets)), the image generation request will include negatively-weighted prompts describing features or characteristics to avoid in the resulting generation. ```python -from griptape.structures import Agent -from griptape.tools import PromptImageGenerationClient -from griptape.engines import PromptImageGenerationEngine -from griptape.drivers import AmazonBedrockImageGenerationDriver, \ - BedrockStableDiffusionImageGenerationModelDriver - -model_driver = BedrockStableDiffusionImageGenerationModelDriver( - style_preset="pixel-art", -) - -driver = AmazonBedrockImageGenerationDriver( - image_generation_model_driver=model_driver, - model="stability.stable-diffusion-xl-v0", -) - -engine = PromptImageGenerationEngine(image_generation_driver=driver) - -agent = Agent(tools=[ - PromptImageGenerationClient(engine=engine), -]) - -agent.run("Generate an image of a dog riding a skateboard") +--8<-- "docs/griptape-framework/drivers/src/image_generation_drivers_2.py" ``` #### Titan @@ -72,26 +36,7 @@ The [Bedrock Titan Image Generator Model Driver](../../reference/griptape/driver This Model Driver supports negative prompts. When provided (for example, when used with an [image generation engine](../engines/image-generation-engines.md) configured with [Negative Rulesets](../engines/image-generation-engines.md#image-generation-engine-rulesets)), the image generation request will include negatively-weighted prompts describing features or characteristics to avoid in the resulting generation. ```python -from griptape.structures import Agent -from griptape.tools import PromptImageGenerationClient -from griptape.engines import PromptImageGenerationEngine -from griptape.drivers import AmazonBedrockImageGenerationDriver, \ - BedrockTitanImageGenerationModelDriver - -model_driver = BedrockTitanImageGenerationModelDriver() - -driver = AmazonBedrockImageGenerationDriver( - image_generation_model_driver=model_driver, - model="amazon.titan-image-generator-v1", -) - -engine = PromptImageGenerationEngine(image_generation_driver=driver) - -agent = Agent(tools=[ - PromptImageGenerationClient(engine=engine), -]) - -agent.run("Generate a watercolor painting of a dog riding a skateboard") +--8<-- "docs/griptape-framework/drivers/src/image_generation_drivers_3.py" ``` ### Azure OpenAI @@ -99,27 +44,7 @@ agent.run("Generate a watercolor painting of a dog riding a skateboard") The [Azure OpenAI Image Generation Driver](../../reference/griptape/drivers/image_generation/azure_openai_image_generation_driver.md) provides access to OpenAI models hosted by Azure. In addition to the configurations provided by the underlying OpenAI Driver, the Azure OpenAI Driver allows configuration of Azure-specific deployment values. ```python -import os - -from griptape.structures import Agent -from griptape.tools import PromptImageGenerationClient -from griptape.engines import PromptImageGenerationEngine -from griptape.drivers import AzureOpenAiImageGenerationDriver - -driver = AzureOpenAiImageGenerationDriver( - model="dall-e-3", - azure_deployment=os.environ["AZURE_OPENAI_DALL_E_3_DEPLOYMENT_ID"], - azure_endpoint=os.environ["AZURE_OPENAI_ENDPOINT_2"], - api_key=os.environ["AZURE_OPENAI_API_KEY_2"], -) - -engine = PromptImageGenerationEngine(image_generation_driver=driver) - -agent = Agent(tools=[ - PromptImageGenerationClient(engine=engine), -]) - -agent.run("Generate a watercolor painting of a dog riding a skateboard") +--8<-- "docs/griptape-framework/drivers/src/image_generation_drivers_4.py" ``` ### Leonardo.Ai @@ -131,27 +56,7 @@ This Driver supports configurations like model selection, image size, specifying This Driver supports negative prompts. When provided (for example, when used with an [image generation engine](../engines/image-generation-engines.md) configured with [Negative Rulesets](../engines/image-generation-engines.md#image-generation-engine-rulesets)), the image generation request will include negatively-weighted prompts describing features or characteristics to avoid in the resulting generation. ```python -import os - -from griptape.structures import Agent -from griptape.tools import PromptImageGenerationClient -from griptape.engines import PromptImageGenerationEngine -from griptape.drivers import LeonardoImageGenerationDriver - -driver = LeonardoImageGenerationDriver( - model=os.environ["LEONARDO_MODEL_ID"], - api_key=os.environ["LEONARDO_API_KEY"], - image_width=512, - image_height=1024, -) - -engine = PromptImageGenerationEngine(image_generation_driver=driver) - -agent = Agent(tools=[ - PromptImageGenerationClient(engine=engine), -]) - -agent.run("Generate a watercolor painting of a dog riding a skateboard") +--8<-- "docs/griptape-framework/drivers/src/image_generation_drivers_5.py" ``` ### OpenAI @@ -161,23 +66,7 @@ The [OpenAI Image Generation Driver](../../reference/griptape/drivers/image_gene This Driver supports image generation configurations like style presets, image quality preference, and image size. For details on supported configuration values, see the [OpenAI documentation](https://platform.openai.com/docs/guides/images/introduction). ```python -from griptape.structures import Agent -from griptape.tools import PromptImageGenerationClient -from griptape.engines import PromptImageGenerationEngine -from griptape.drivers import OpenAiImageGenerationDriver - -driver = OpenAiImageGenerationDriver( - model="dall-e-2", - image_size="512x512", -) - -engine = PromptImageGenerationEngine(image_generation_driver=driver) - -agent = Agent(tools=[ - PromptImageGenerationClient(engine=engine), -]) - -agent.run("Generate a watercolor painting of a dog riding a skateboard") +--8<-- "docs/griptape-framework/drivers/src/image_generation_drivers_6.py" ``` ### HuggingFace Pipelines @@ -204,29 +93,8 @@ The [Stable Diffusion 3 Image Generation Pipeline Driver](../../reference/gripta Image generation consumes substantial memory. On devices with limited VRAM, it may be necessary to enable the `enable_model_cpu_offload` or `drop_t5_encoder` configurations. For more information, see [HuggingFace's documentation](https://huggingface.co/docs/diffusers/en/optimization/memory) on reduced memory usage. -```python title="PYTEST_IGNORE" -from griptape.structures import Pipeline -from griptape.tasks import PromptImageGenerationTask -from griptape.engines import PromptImageGenerationEngine -from griptape.drivers import HuggingFacePipelineImageGenerationDriver, \ - StableDiffusion3ImageGenerationPipelineDriver -from griptape.artifacts import TextArtifact - -image_generation_task = PromptImageGenerationTask( - input=TextArtifact("landscape photograph, verdant, countryside, 8k"), - image_generation_engine=PromptImageGenerationEngine( - image_generation_driver=HuggingFacePipelineImageGenerationDriver( - model="stabilityai/stable-diffusion-3-medium-diffusers", - device="cuda", - pipeline_driver=StableDiffusion3ImageGenerationPipelineDriver( - height=512, - width=512, - ) - ) - ) -) - -output_artifact = Pipeline(tasks=[image_generation_task]).run().output +```python +--8<-- "docs/griptape-framework/drivers/src/image_generation_drivers_7.py" ``` #### Stable Diffusion 3 Img2Img Image Generation Pipeline Driver @@ -236,35 +104,8 @@ output_artifact = Pipeline(tasks=[image_generation_task]).run().output The [Stable Diffusion 3 Img2Img Image Generation Pipeline Driver](../../reference/griptape/drivers/image_generation_pipeline/stable_diffusion_3_img_2_img_image_generation_pipeline_driver.md) provides a `StableDiffusion3Img2ImgPipeline` for image-to-image generations, accepting a text prompt and input image. This Driver accepts a text prompt, an input image, and configurations including Stable Diffusion 3 model, output image size, inference steps, generation seed, and strength of generation over the input image. -```python title="PYTEST_IGNORE" -from pathlib import Path - -from griptape.structures import Pipeline -from griptape.tasks import VariationImageGenerationTask -from griptape.engines import VariationImageGenerationEngine -from griptape.drivers import HuggingFacePipelineImageGenerationDriver, \ - StableDiffusion3Img2ImgImageGenerationPipelineDriver -from griptape.artifacts import TextArtifact, ImageArtifact -from griptape.loaders import ImageLoader - -prompt_artifact = TextArtifact("landscape photograph, verdant, countryside, 8k") -input_image_artifact = ImageLoader().load(Path("tests/resources/mountain.png").read_bytes()) - -image_variation_task = VariationImageGenerationTask( - input=(prompt_artifact, input_image_artifact), - image_generation_engine=PromptImageGenerationEngine( - image_generation_driver=HuggingFacePipelineImageGenerationDriver( - model="stabilityai/stable-diffusion-3-medium-diffusers", - device="cuda", - pipeline_driver=StableDiffusion3Img2ImgImageGenerationPipelineDriver( - height=1024, - width=1024, - ) - ) - ) -) - -output_artifact = Pipeline(tasks=[image_variation_task]).run().output +```python +--8<-- "docs/griptape-framework/drivers/src/image_generation_drivers_8.py" ``` #### StableDiffusion3ControlNetImageGenerationPipelineDriver @@ -274,35 +115,6 @@ output_artifact = Pipeline(tasks=[image_variation_task]).run().output The [StableDiffusion3ControlNetImageGenerationPipelineDriver](../../reference/griptape/drivers/image_generation_pipeline/stable_diffusion_3_controlnet_image_generation_pipeline_driver.md) provides a `StableDiffusion3ControlNetPipeline` for image-to-image generations, accepting a text prompt and a control image. This Driver accepts a text prompt, a control image, and configurations including Stable Diffusion 3 model, ControlNet model, output image size, generation seed, inference steps, and the degree to which the model adheres to the control image. -```python title="PYTEST_IGNORE" -from pathlib import Path - -from griptape.structures import Pipeline -from griptape.tasks import VariationImageGenerationTask -from griptape.engines import VariationImageGenerationEngine -from griptape.drivers import HuggingFacePipelineImageGenerationDriver, \ - StableDiffusion3ControlNetImageGenerationPipelineDriver -from griptape.artifacts import TextArtifact, ImageArtifact -from griptape.loaders import ImageLoader - -prompt_artifact = TextArtifact("landscape photograph, verdant, countryside, 8k") -control_image_artifact = ImageLoader().load(Path("canny_control_image.png").read_bytes()) - -controlnet_task = VariationImageGenerationTask( - input=(prompt_artifact, control_image_artifact), - image_generation_engine=PromptImageGenerationEngine( - image_generation_driver=HuggingFacePipelineImageGenerationDriver( - model="stabilityai/stable-diffusion-3-medium-diffusers", - device="cuda", - pipeline_driver=StableDiffusion3ControlNetImageGenerationPipelineDriver( - controlnet_model="InstantX/SD3-Controlnet-Canny", - control_strength=0.8, - height=768, - width=1024, - ) - ) - ) -) - -output_artifact = Pipeline(tasks=[controlnet_task]).run().output +```python +--8<-- "docs/griptape-framework/drivers/src/image_generation_drivers_9.py" ``` diff --git a/docs/griptape-framework/drivers/image-query-drivers.md b/docs/griptape-framework/drivers/image-query-drivers.md index 04e3ebaee..b0c598572 100644 --- a/docs/griptape-framework/drivers/image-query-drivers.md +++ b/docs/griptape-framework/drivers/image-query-drivers.md @@ -20,50 +20,13 @@ Image Query Drivers are used by [Image Query Engines](../engines/image-query-eng The [AnthropicImageQueryDriver](../../reference/griptape/drivers/image_query/anthropic_image_query_driver.md) is used to query images using Anthropic's Claude 3 multi-modal model. Here is an example of how to use it: ```python -from griptape.drivers import AnthropicImageQueryDriver -from griptape.engines import ImageQueryEngine -from griptape.loaders import ImageLoader - -driver = AnthropicImageQueryDriver( - model="claude-3-sonnet-20240229", - max_tokens=1024, -) - -engine = ImageQueryEngine( - image_query_driver=driver, -) - -with open("tests/resources/mountain.png", "rb") as f: - image_artifact = ImageLoader().load(f.read()) - -engine.run("Describe the weather in the image", [image_artifact]) +--8<-- "docs/griptape-framework/drivers/src/image_query_drivers_1.py" ``` You can also specify multiple images with a single text prompt. This applies the same text prompt to all images specified, up to a max of 20. However, you will still receive one text response from the model currently. ```python -from griptape.drivers import AnthropicImageQueryDriver -from griptape.engines import ImageQueryEngine -from griptape.loaders import ImageLoader - -driver = AnthropicImageQueryDriver( - model="claude-3-sonnet-20240229", - max_tokens=1024, -) - -engine = ImageQueryEngine( - image_query_driver=driver, -) - -with open("tests/resources/mountain.png", "rb") as f: - image_artifact1 = ImageLoader().load(f.read()) - -with open("tests/resources/cow.png", "rb") as f: - image_artifact2 = ImageLoader().load(f.read()) - -result = engine.run("Describe the weather in the image", [image_artifact1, image_artifact2]) - -print(result) +--8<-- "docs/griptape-framework/drivers/src/image_query_drivers_2.py" ``` ### OpenAI @@ -74,23 +37,7 @@ print(result) The [OpenAiVisionImageQueryDriver](../../reference/griptape/drivers/image_query/openai_image_query_driver.md) is used to query images using the OpenAI Vision API. Here is an example of how to use it: ```python -from griptape.drivers import OpenAiImageQueryDriver -from griptape.engines import ImageQueryEngine -from griptape.loaders import ImageLoader - -driver = OpenAiImageQueryDriver( - model="gpt-4o", - max_tokens=256, -) - -engine = ImageQueryEngine( - image_query_driver=driver, -) - -with open("tests/resources/mountain.png", "rb") as f: - image_artifact = ImageLoader().load(f.read()) - -engine.run("Describe the weather in the image", [image_artifact]) +--8<-- "docs/griptape-framework/drivers/src/image_query_drivers_3.py" ``` ### Azure OpenAI @@ -101,27 +48,7 @@ engine.run("Describe the weather in the image", [image_artifact]) The [AzureOpenAiVisionImageQueryDriver](../../reference/griptape/drivers/image_query/azure_openai_image_query_driver.md) is used to query images using the Azure OpenAI Vision API. Here is an example of how to use it: ```python -import os -from griptape.drivers import AzureOpenAiImageQueryDriver -from griptape.engines import ImageQueryEngine -from griptape.loaders import ImageLoader - -driver = AzureOpenAiImageQueryDriver( - azure_endpoint=os.environ["AZURE_OPENAI_ENDPOINT_2"], - api_key=os.environ["AZURE_OPENAI_API_KEY_2"], - model="gpt-4o", - azure_deployment="gpt-4o", - max_tokens=256, -) - -engine = ImageQueryEngine( - image_query_driver=driver, -) - -with open("tests/resources/mountain.png", "rb") as f: - image_artifact = ImageLoader().load(f.read()) - -engine.run("Describe the weather in the image", [image_artifact]) +--8<-- "docs/griptape-framework/drivers/src/image_query_drivers_4.py" ``` ### Amazon Bedrock @@ -133,30 +60,5 @@ The [Amazon Bedrock Image Query Driver](../../reference/griptape/drivers/image_q The [BedrockClaudeImageQueryModelDriver](../../reference/griptape/drivers/image_query_model/bedrock_claude_image_query_model_driver.md) provides support for Claude models hosted by Bedrock. ```python -from griptape.drivers import AmazonBedrockImageQueryDriver, BedrockClaudeImageQueryModelDriver -from griptape.engines import ImageQueryEngine -from griptape.loaders import ImageLoader -import boto3 - -session = boto3.Session( - region_name="us-west-2" -) - -driver = AmazonBedrockImageQueryDriver( - image_query_model_driver=BedrockClaudeImageQueryModelDriver(), - model="anthropic.claude-3-sonnet-20240229-v1:0", - session=session -) - -engine = ImageQueryEngine( - image_query_driver=driver -) - -with open("tests/resources/mountain.png", "rb") as f: - image_artifact = ImageLoader().load(f.read()) - - -result = engine.run("Describe the weather in the image", [image_artifact]) - -print(result) +--8<-- "docs/griptape-framework/drivers/src/image_query_drivers_5.py" ``` diff --git a/docs/griptape-framework/drivers/observability-drivers.md b/docs/griptape-framework/drivers/observability-drivers.md index fbe1a1319..701aca504 100644 --- a/docs/griptape-framework/drivers/observability-drivers.md +++ b/docs/griptape-framework/drivers/observability-drivers.md @@ -28,17 +28,8 @@ The Griptape Cloud Observability Driver instruments `@observable` functions and Here is an example of how to use the `GriptapeCloudObservabilityDriver` with the `Observability` context manager to send the telemetry to Griptape Cloud: -```python title="PYTEST_IGNORE" -from griptape.drivers import GriptapeCloudObservabilityDriver -from griptape.rules import Rule -from griptape.structures import Agent -from griptape.observability import Observability - -observability_driver = GriptapeCloudObservabilityDriver() - -with Observability(observability_driver=observability_driver): - agent = Agent(rules=[Rule("Output one word")]) - agent.run("Name an animal") +```python +--8<-- "docs/griptape-framework/drivers/src/observability_drivers_1.py" ``` @@ -52,21 +43,8 @@ The [OpenTelemetry](https://opentelemetry.io/) Observability Driver instruments Here is an example of how to use the `OpenTelemetryObservabilityDriver` with the `Observability` context manager to output the telemetry directly to the console: -```python title="PYTEST_IGNORE" -from griptape.drivers import OpenTelemetryObservabilityDriver -from griptape.rules import Rule -from griptape.structures import Agent -from griptape.observability import Observability -from opentelemetry.sdk.trace.export import ConsoleSpanExporter, BatchSpanProcessor - -observability_driver = OpenTelemetryObservabilityDriver( - service_name="name-an-animal", - span_processor=BatchSpanProcessor(ConsoleSpanExporter()) -) - -with Observability(observability_driver=observability_driver): - agent = Agent(rules=[Rule("Output one word")]) - agent.run("Name an animal") +```python +--8<-- "docs/griptape-framework/drivers/src/observability_drivers_2.py" ``` Output (only relevant because of use of `ConsoleSpanExporter`): diff --git a/docs/griptape-framework/drivers/prompt-drivers.md b/docs/griptape-framework/drivers/prompt-drivers.md index ab749bf7c..54230b999 100644 --- a/docs/griptape-framework/drivers/prompt-drivers.md +++ b/docs/griptape-framework/drivers/prompt-drivers.md @@ -10,51 +10,13 @@ Prompt Drivers are used by Griptape Structures to make API calls to the underlyi You can instantiate drivers and pass them to structures: ```python -from griptape.structures import Agent -from griptape.drivers import OpenAiChatPromptDriver -from griptape.rules import Rule -from griptape.config import StructureConfig - -agent = Agent( - config=StructureConfig( - prompt_driver=OpenAiChatPromptDriver(model="gpt-4o", temperature=0.3), - ), - input="You will be provided with a tweet, and your task is to classify its sentiment as positive, neutral, or negative. Tweet: {{ args[0] }}", - rules=[ - Rule( - value="Output only the sentiment." - ) - ], -) - -agent.run("I loved the new Batman movie!") +--8<-- "docs/griptape-framework/drivers/src/prompt_drivers_1.py" ``` Or use them independently: ```python -from griptape.common import PromptStack -from griptape.drivers import OpenAiChatPromptDriver - -stack = PromptStack() - -stack.add_system_message( - "You will be provided with Python code, and your task is to calculate its time complexity." -) -stack.add_user_message( - """ - def foo(n, k): - accum = 0 - for i in range(n): - for l in range(k): - accum += i - return accum - """ -) - -result = OpenAiChatPromptDriver(model="gpt-3.5-turbo-16k", temperature=0).run(stack) - -print(result.value) +--8<-- "docs/griptape-framework/drivers/src/prompt_drivers_2.py" ``` ## Prompt Drivers @@ -67,31 +29,7 @@ The [OpenAiChatPromptDriver](../../reference/griptape/drivers/prompt/openai_chat This driver uses [OpenAi function calling](https://platform.openai.com/docs/guides/function-calling) when using [Tools](../tools/index.md). ```python -import os -from griptape.structures import Agent -from griptape.drivers import OpenAiChatPromptDriver -from griptape.rules import Rule -from griptape.config import StructureConfig - -agent = Agent( - config=StructureConfig( - prompt_driver=OpenAiChatPromptDriver( - api_key=os.environ["OPENAI_API_KEY"], - temperature=0.1, - model="gpt-4o", - response_format="json_object", - seed=42, - ) - ), - input="You will be provided with a description of a mood, and your task is to generate the CSS code for a color that matches it. Description: {{ args[0] }}", - rules=[ - Rule( - value='Write your output in json with a single key called "css_code".' - ) - ], -) - -agent.run("Blue sky at dusk.") +--8<-- "docs/griptape-framework/drivers/src/prompt_drivers_3.py" ``` !!! info @@ -102,23 +40,8 @@ agent.run("Blue sky at dusk.") Many services such as [LMStudio](https://lmstudio.ai/) and [OhMyGPT](https://www.ohmygpt.com/) provide OpenAI-compatible APIs. You can use the [OpenAiChatPromptDriver](../../reference/griptape/drivers/prompt/openai_chat_prompt_driver.md) to interact with these services. Simply set the `base_url` to the service's API endpoint and the `model` to the model name. If the service requires an API key, you can set it in the `api_key` field. -```python title="PYTEST_IGNORE" -from griptape.structures import Agent -from griptape.drivers import OpenAiChatPromptDriver -from griptape.rules import Rule -from griptape.config import StructureConfig - -agent = Agent( - config=StructureConfig( - prompt_driver=OpenAiChatPromptDriver( - base_url="http://127.0.0.1:1234/v1", - model="lmstudio-community/Meta-Llama-3-8B-Instruct-GGUF", stream=True - ) - ), - rules=[Rule(value="You are a helpful coding assistant.")], -) - -agent.run("How do I init and update a git submodule?") +```python +--8<-- "docs/griptape-framework/drivers/src/prompt_drivers_4.py" ``` !!! tip @@ -130,30 +53,7 @@ The [AzureOpenAiChatPromptDriver](../../reference/griptape/drivers/prompt/azure_ This driver uses [Azure OpenAi function calling](https://learn.microsoft.com/en-us/azure/ai-services/openai/how-to/function-calling) when using [Tools](../tools/index.md). ```python -import os -from griptape.structures import Agent -from griptape.rules import Rule -from griptape.drivers import AzureOpenAiChatPromptDriver -from griptape.config import StructureConfig - -agent = Agent( - config=StructureConfig( - prompt_driver=AzureOpenAiChatPromptDriver( - api_key=os.environ["AZURE_OPENAI_API_KEY_1"], - model="gpt-3.5-turbo", - azure_deployment=os.environ["AZURE_OPENAI_35_TURBO_DEPLOYMENT_ID"], - azure_endpoint=os.environ["AZURE_OPENAI_ENDPOINT_1"], - ) - ), - rules=[ - Rule( - value="You will be provided with text, and your task is to translate it into emojis. " - "Do not use any regular text. Do your best with emojis only." - ) - ], -) - -agent.run("Artificial intelligence is a technology with great promise.") +--8<-- "docs/griptape-framework/drivers/src/prompt_drivers_5.py" ``` ### Cohere @@ -165,21 +65,7 @@ This driver uses [Cohere tool use](https://docs.cohere.com/docs/tools) when usin This driver requires the `drivers-prompt-cohere` [extra](../index.md#extras). ```python -import os -from griptape.structures import Agent -from griptape.drivers import CoherePromptDriver -from griptape.config import StructureConfig - -agent = Agent( - config=StructureConfig( - prompt_driver=CoherePromptDriver( - model="command-r", - api_key=os.environ['COHERE_API_KEY'], - ) - ) -) - -agent.run('What is the sentiment of this review? Review: "I really enjoyed this movie!"') +--8<-- "docs/griptape-framework/drivers/src/prompt_drivers_6.py" ``` ### Anthropic @@ -191,21 +77,7 @@ The [AnthropicPromptDriver](../../reference/griptape/drivers/prompt/anthropic_pr This driver uses [Anthropic tool use](https://docs.anthropic.com/en/docs/build-with-claude/tool-use) when using [Tools](../tools/index.md). ```python -import os -from griptape.structures import Agent -from griptape.drivers import AnthropicPromptDriver -from griptape.config import StructureConfig - -agent = Agent( - config=StructureConfig( - prompt_driver=AnthropicPromptDriver( - model="claude-3-opus-20240229", - api_key=os.environ['ANTHROPIC_API_KEY'], - ) - ) -) - -agent.run('Where is the best place to see cherry blossums in Japan?') +--8<-- "docs/griptape-framework/drivers/src/prompt_drivers_7.py" ``` ### Google @@ -217,21 +89,7 @@ The [GooglePromptDriver](../../reference/griptape/drivers/prompt/google_prompt_d This driver uses [Gemini function calling](https://ai.google.dev/gemini-api/docs/function-calling) when using [Tools](../tools/index.md). ```python -import os -from griptape.structures import Agent -from griptape.drivers import GooglePromptDriver -from griptape.config import StructureConfig - -agent = Agent( - config=StructureConfig( - prompt_driver=GooglePromptDriver( - model="gemini-pro", - api_key=os.environ['GOOGLE_API_KEY'], - ) - ) -) - -agent.run('Briefly explain how a computer works to a young child.') +--8<-- "docs/griptape-framework/drivers/src/prompt_drivers_8.py" ``` ### Amazon Bedrock @@ -245,35 +103,7 @@ This driver uses [Bedrock tool use](https://docs.aws.amazon.com/bedrock/latest/u All models supported by the Converse API are available for use with this driver. ```python -from griptape.structures import Agent -from griptape.drivers import AmazonBedrockPromptDriver -from griptape.rules import Rule -from griptape.config import StructureConfig - -agent = Agent( - config=StructureConfig( - prompt_driver=AmazonBedrockPromptDriver( - model="anthropic.claude-3-sonnet-20240229-v1:0", - ) - ), - rules=[ - Rule( - value="You are a customer service agent that is classifying emails by type. I want you to give your answer and then explain it." - ) - ], -) -agent.run( - """How would you categorize this email? - - Can I use my Mixmaster 4000 to mix paint, or is it only meant for mixing food? - - - Categories are: - (A) Pre-sale question - (B) Broken or defective item - (C) Billing question - (D) Other (please explain)""" -) +--8<-- "docs/griptape-framework/drivers/src/prompt_drivers_9.py" ``` ### Ollama @@ -285,21 +115,7 @@ The [OllamaPromptDriver](../../reference/griptape/drivers/prompt/ollama_prompt_d This driver uses [Ollama tool calling](https://ollama.com/blog/tool-support) when using [Tools](../tools/index.md). ```python -from griptape.config import StructureConfig -from griptape.drivers import OllamaPromptDriver -from griptape.tools import Calculator -from griptape.structures import Agent - - -agent = Agent( - config=StructureConfig( - prompt_driver=OllamaPromptDriver( - model="llama3.1", - ), - ), - tools=[Calculator()], -) -agent.run("What is (192 + 12) ^ 4") +--8<-- "docs/griptape-framework/drivers/src/prompt_drivers_10.py" ``` ### Hugging Face Hub @@ -315,58 +131,15 @@ The [HuggingFaceHubPromptDriver](../../reference/griptape/drivers/prompt/hugging Due to the limitations of Hugging Face serverless inference, only models that are than 10GB are supported. ```python -import os -from griptape.structures import Agent -from griptape.drivers import HuggingFaceHubPromptDriver -from griptape.rules import Rule, Ruleset -from griptape.config import StructureConfig - - -agent = Agent( - config=StructureConfig( - prompt_driver=HuggingFaceHubPromptDriver( - model="HuggingFaceH4/zephyr-7b-beta", - api_token=os.environ["HUGGINGFACE_HUB_ACCESS_TOKEN"], - ) - ), - rulesets=[ - Ruleset( - name="Girafatron", - rules=[ - Rule( - value="You are Girafatron, a giraffe-obsessed robot. You are talking to a human. " - "Girafatron is obsessed with giraffes, the most glorious animal on the face of this Earth. " - "Giraftron believes all other animals are irrelevant when compared to the glorious majesty of the giraffe." - ) - ], - ) - ], -) - -agent.run("Hello Girafatron, what is your favorite animal?") +--8<-- "docs/griptape-framework/drivers/src/prompt_drivers_11.py" ``` #### Text Generation Interface The [HuggingFaceHubPromptDriver](#hugging-face-hub) also supports [Text Generation Interface](https://huggingface.co/docs/text-generation-inference/basic_tutorials/consuming_tgi#inference-client) for running models locally. To use Text Generation Interface, just set `model` to a TGI endpoint. -```python title="PYTEST_IGNORE" -import os -from griptape.structures import Agent -from griptape.drivers import HuggingFaceHubPromptDriver -from griptape.config import StructureConfig - - -agent = Agent( - config=StructureConfig( - prompt_driver=HuggingFaceHubPromptDriver( - model="http://127.0.0.1:8080", - api_token=os.environ["HUGGINGFACE_HUB_ACCESS_TOKEN"], - ), - ), -) - -agent.run("Write the code for a snake game.") +```python +--8<-- "docs/griptape-framework/drivers/src/prompt_drivers_12.py" ``` ### Hugging Face Pipeline @@ -380,31 +153,7 @@ The [HuggingFacePipelinePromptDriver](../../reference/griptape/drivers/prompt/hu Running a model locally can be a computationally expensive process. ```python -from griptape.structures import Agent -from griptape.drivers import HuggingFacePipelinePromptDriver -from griptape.rules import Rule, Ruleset -from griptape.config import StructureConfig - - -agent = Agent( - config=StructureConfig( - prompt_driver=HuggingFacePipelinePromptDriver( - model="TinyLlama/TinyLlama-1.1B-Chat-v1.0", - ) - ), - rulesets=[ - Ruleset( - name="Pirate", - rules=[ - Rule( - value="You are a pirate chatbot who always responds in pirate speak!" - ) - ], - ) - ], -) - -agent.run("How many helicopters can a human eat in one sitting?") +--8<-- "docs/griptape-framework/drivers/src/prompt_drivers_13.py" ``` ### Amazon SageMaker Jumpstart @@ -417,25 +166,7 @@ The [AmazonSageMakerJumpstartPromptDriver](../../reference/griptape/drivers/prom Amazon Sagemaker Jumpstart provides a wide range of models with varying capabilities. This Driver has been primarily _chat-optimized_ models that have a [Huggingface Chat Template](https://huggingface.co/docs/transformers/en/chat_templating) available. If your model does not fit this use-case, we suggest sub-classing [AmazonSageMakerJumpstartPromptDriver](../../reference/griptape/drivers/prompt/amazon_sagemaker_jumpstart_prompt_driver.md) and overriding the `_to_model_input` and `_to_model_params` methods. - - -```python title="PYTEST_IGNORE" -import os -from griptape.structures import Agent -from griptape.drivers import ( - AmazonSageMakerJumpstartPromptDriver, - SageMakerFalconPromptModelDriver, -) -from griptape.config import StructureConfig - -agent = Agent( - config=StructureConfig( - prompt_driver=AmazonSageMakerJumpstartPromptDriver( - endpoint=os.environ["SAGEMAKER_LLAMA_3_INSTRUCT_ENDPOINT_NAME"], - model="meta-llama/Meta-Llama-3-8B-Instruct", - ) - ) -) - -agent.run("What is a good lasagna recipe?") + +```python +--8<-- "docs/griptape-framework/drivers/src/prompt_drivers_14.py" ``` diff --git a/docs/griptape-framework/drivers/sql-drivers.md b/docs/griptape-framework/drivers/sql-drivers.md index 814112a12..c5c15e258 100644 --- a/docs/griptape-framework/drivers/sql-drivers.md +++ b/docs/griptape-framework/drivers/sql-drivers.md @@ -23,13 +23,7 @@ For example, to use the `psycopg2` driver for PostgreSQL, you can install it wit This is a basic SQL loader based on [SQLAlchemy 2.0](https://docs.sqlalchemy.org/en/20/). Here is an example of how to use it: ```python -from griptape.drivers import SqlDriver - -driver = SqlDriver( - engine_url="sqlite:///:memory:" -) - -driver.execute_query("select 'foo', 'bar';") +--8<-- "docs/griptape-framework/drivers/src/sql_drivers_1.py" ``` ### Amazon Redshift @@ -41,19 +35,7 @@ This is a SQL driver for interacting with the [Amazon Redshift Data API](https:/ to execute statements. Here is an example of how to use it for Redshift Serverless: ```python -import boto3 -import os -from griptape.drivers import AmazonRedshiftSqlDriver - -session = boto3.Session() - -driver = AmazonRedshiftSqlDriver( - database=os.environ["REDSHIFT_DATABASE"], - session=session, - cluster_identifier=os.environ['REDSHIFT_CLUSTER_IDENTIFIER'], -) - -driver.execute_query("select * from people;") +--8<-- "docs/griptape-framework/drivers/src/sql_drivers_2.py" ``` ### Snowflake @@ -64,22 +46,5 @@ driver.execute_query("select * from people;") This is a SQL driver based on the [Snowflake SQLAlchemy Toolkit](https://docs.snowflake.com/en/developer-guide/python-connector/sqlalchemy) which runs on top of the Snowflake Connector for Python. Here is an example of how to use it: ```python -import os -import snowflake.connector -from snowflake.connector import SnowflakeConnection -from griptape.drivers import SnowflakeSqlDriver - -def get_snowflake_connection() -> SnowflakeConnection: - return snowflake.connector.connect( - account=os.environ['SNOWFLAKE_ACCOUNT'], - user=os.environ['SNOWFLAKE_USER'], - password=os.environ['SNOWFLAKE_PASSWORD'], - database=os.environ['SNOWFLAKE_DATABASE'], - schema=os.environ['SNOWFLAKE_SCHEMA'], - warehouse=os.environ['SNOWFLAKE_WAREHOUSE'] - ) - -driver = SnowflakeSqlDriver(connection_func=get_snowflake_connection) - -driver.execute_query("select * from people;") +--8<-- "docs/griptape-framework/drivers/src/sql_drivers_3.py" ``` diff --git a/docs/griptape-framework/drivers/src/audio_transcription_drivers_1.py b/docs/griptape-framework/drivers/src/audio_transcription_drivers_1.py new file mode 100644 index 000000000..16013638e --- /dev/null +++ b/docs/griptape-framework/drivers/src/audio_transcription_drivers_1.py @@ -0,0 +1,15 @@ +from griptape.drivers import OpenAiAudioTranscriptionDriver +from griptape.engines import AudioTranscriptionEngine +from griptape.structures import Agent +from griptape.tools.audio_transcription.tool import AudioTranscriptionTool + +driver = OpenAiAudioTranscriptionDriver(model="whisper-1") + +tool = AudioTranscriptionTool( + off_prompt=False, + engine=AudioTranscriptionEngine( + audio_transcription_driver=driver, + ), +) + +Agent(tools=[tool]).run("Transcribe the following audio file: tests/resources/sentences.wav") diff --git a/docs/griptape-framework/drivers/src/conversation_memory_drivers_1.py b/docs/griptape-framework/drivers/src/conversation_memory_drivers_1.py new file mode 100644 index 000000000..27829d8d2 --- /dev/null +++ b/docs/griptape-framework/drivers/src/conversation_memory_drivers_1.py @@ -0,0 +1,9 @@ +from griptape.drivers import LocalConversationMemoryDriver +from griptape.memory.structure import ConversationMemory +from griptape.structures import Agent + +local_driver = LocalConversationMemoryDriver(file_path="memory.json") +agent = Agent(conversation_memory=ConversationMemory(driver=local_driver)) + +agent.run("Surfing is my favorite sport.") +agent.run("What is my favorite sport?") diff --git a/docs/griptape-framework/drivers/src/conversation_memory_drivers_2.py b/docs/griptape-framework/drivers/src/conversation_memory_drivers_2.py new file mode 100644 index 000000000..9db525b42 --- /dev/null +++ b/docs/griptape-framework/drivers/src/conversation_memory_drivers_2.py @@ -0,0 +1,19 @@ +import os +import uuid + +from griptape.drivers import AmazonDynamoDbConversationMemoryDriver +from griptape.memory.structure import ConversationMemory +from griptape.structures import Agent + +conversation_id = uuid.uuid4().hex +dynamodb_driver = AmazonDynamoDbConversationMemoryDriver( + table_name=os.environ["DYNAMODB_TABLE_NAME"], + partition_key="id", + value_attribute_key="memory", + partition_key_value=conversation_id, +) + +agent = Agent(conversation_memory=ConversationMemory(driver=dynamodb_driver)) + +agent.run("My name is Jeff.") +agent.run("What is my name?") diff --git a/docs/griptape-framework/drivers/src/conversation_memory_drivers_3.py b/docs/griptape-framework/drivers/src/conversation_memory_drivers_3.py new file mode 100644 index 000000000..0f80d1393 --- /dev/null +++ b/docs/griptape-framework/drivers/src/conversation_memory_drivers_3.py @@ -0,0 +1,20 @@ +import os +import uuid + +from griptape.drivers import RedisConversationMemoryDriver +from griptape.memory.structure import ConversationMemory +from griptape.structures import Agent + +conversation_id = uuid.uuid4().hex +redis_conversation_driver = RedisConversationMemoryDriver( + host=os.environ["REDIS_HOST"], + port=int(os.environ["REDIS_PORT"]), + password=os.environ["REDIS_PASSWORD"], + index=os.environ["REDIS_INDEX"], + conversation_id=conversation_id, +) + +agent = Agent(conversation_memory=ConversationMemory(driver=redis_conversation_driver)) + +agent.run("My name is Jeff.") +agent.run("What is my name?") diff --git a/docs/griptape-framework/drivers/src/conversation_memory_drivers_griptape_cloud.py b/docs/griptape-framework/drivers/src/conversation_memory_drivers_griptape_cloud.py new file mode 100644 index 000000000..35492e06b --- /dev/null +++ b/docs/griptape-framework/drivers/src/conversation_memory_drivers_griptape_cloud.py @@ -0,0 +1,15 @@ +import os +import uuid + +from griptape.drivers import GriptapeCloudConversationMemoryDriver +from griptape.memory.structure import ConversationMemory +from griptape.structures import Agent + +conversation_id = uuid.uuid4().hex +cloud_conversation_driver = GriptapeCloudConversationMemoryDriver( + api_key=os.environ["GT_CLOUD_API_KEY"], +) +agent = Agent(conversation_memory=ConversationMemory(driver=cloud_conversation_driver)) + +agent.run("My name is Jeff.") +agent.run("What is my name?") diff --git a/docs/griptape-framework/drivers/src/embedding_drivers_1.py b/docs/griptape-framework/drivers/src/embedding_drivers_1.py new file mode 100644 index 000000000..e0663f490 --- /dev/null +++ b/docs/griptape-framework/drivers/src/embedding_drivers_1.py @@ -0,0 +1,6 @@ +from griptape.drivers import OpenAiEmbeddingDriver + +embeddings = OpenAiEmbeddingDriver().embed_string("Hello Griptape!") + +# display the first 3 embeddings +print(embeddings[:3]) diff --git a/docs/griptape-framework/drivers/src/embedding_drivers_10.py b/docs/griptape-framework/drivers/src/embedding_drivers_10.py new file mode 100644 index 000000000..605b6e67a --- /dev/null +++ b/docs/griptape-framework/drivers/src/embedding_drivers_10.py @@ -0,0 +1,24 @@ +from griptape.configs import Defaults +from griptape.configs.drivers import DriversConfig +from griptape.drivers import ( + OpenAiChatPromptDriver, + VoyageAiEmbeddingDriver, +) +from griptape.structures import Agent +from griptape.tools import PromptSummaryTool, WebScraperTool + +Defaults.drivers_config = DriversConfig( + prompt_driver=OpenAiChatPromptDriver(model="gpt-4o"), + embedding_driver=VoyageAiEmbeddingDriver(), +) + +Defaults.drivers_config = DriversConfig( + prompt_driver=OpenAiChatPromptDriver(model="gpt-4o"), + embedding_driver=VoyageAiEmbeddingDriver(), +) + +agent = Agent( + tools=[WebScraperTool(off_prompt=True), PromptSummaryTool(off_prompt=False)], +) + +agent.run("based on https://www.griptape.ai/, tell me what Griptape is") diff --git a/docs/griptape-framework/drivers/src/embedding_drivers_2.py b/docs/griptape-framework/drivers/src/embedding_drivers_2.py new file mode 100644 index 000000000..e9f58a7f2 --- /dev/null +++ b/docs/griptape-framework/drivers/src/embedding_drivers_2.py @@ -0,0 +1,11 @@ +from griptape.drivers import OpenAiEmbeddingDriver + +embedding_driver = OpenAiEmbeddingDriver( + base_url="http://127.0.0.1:1234/v1", + model="nomic-ai/nomic-embed-text-v1.5-GGUF/nomic-embed-text-v1.5.Q2_K", +) + +embeddings = embedding_driver.embed_string("Hello world!") + +# display the first 3 embeddings +print(embeddings[:3]) diff --git a/docs/griptape-framework/drivers/src/embedding_drivers_3.py b/docs/griptape-framework/drivers/src/embedding_drivers_3.py new file mode 100644 index 000000000..0bf54311c --- /dev/null +++ b/docs/griptape-framework/drivers/src/embedding_drivers_3.py @@ -0,0 +1,6 @@ +from griptape.drivers import AmazonBedrockTitanEmbeddingDriver + +embeddings = AmazonBedrockTitanEmbeddingDriver().embed_string("Hello world!") + +# display the first 3 embeddings +print(embeddings[:3]) diff --git a/docs/griptape-framework/drivers/src/embedding_drivers_4.py b/docs/griptape-framework/drivers/src/embedding_drivers_4.py new file mode 100644 index 000000000..35e1f4303 --- /dev/null +++ b/docs/griptape-framework/drivers/src/embedding_drivers_4.py @@ -0,0 +1,6 @@ +from griptape.drivers import GoogleEmbeddingDriver + +embeddings = GoogleEmbeddingDriver().embed_string("Hello world!") + +# display the first 3 embeddings +print(embeddings[:3]) diff --git a/docs/griptape-framework/drivers/src/embedding_drivers_5.py b/docs/griptape-framework/drivers/src/embedding_drivers_5.py new file mode 100644 index 000000000..e9d403f59 --- /dev/null +++ b/docs/griptape-framework/drivers/src/embedding_drivers_5.py @@ -0,0 +1,18 @@ +import os + +from griptape.drivers import HuggingFaceHubEmbeddingDriver +from griptape.tokenizers import HuggingFaceTokenizer + +driver = HuggingFaceHubEmbeddingDriver( + api_token=os.environ["HUGGINGFACE_HUB_ACCESS_TOKEN"], + model="sentence-transformers/all-MiniLM-L6-v2", + tokenizer=HuggingFaceTokenizer( + model="sentence-transformers/all-MiniLM-L6-v2", + max_output_tokens=512, + ), +) + +embeddings = driver.embed_string("Hello world!") + +# display the first 3 embeddings +print(embeddings[:3]) diff --git a/docs/griptape-framework/drivers/src/embedding_drivers_6.py b/docs/griptape-framework/drivers/src/embedding_drivers_6.py new file mode 100644 index 000000000..93ec06bb3 --- /dev/null +++ b/docs/griptape-framework/drivers/src/embedding_drivers_6.py @@ -0,0 +1,10 @@ +from griptape.drivers import OllamaEmbeddingDriver + +driver = OllamaEmbeddingDriver( + model="all-minilm", +) + +results = driver.embed_string("Hello world!") + +# display the first 3 embeddings +print(results[:3]) diff --git a/docs/griptape-framework/drivers/src/embedding_drivers_7.py b/docs/griptape-framework/drivers/src/embedding_drivers_7.py new file mode 100644 index 000000000..9c5b2adbd --- /dev/null +++ b/docs/griptape-framework/drivers/src/embedding_drivers_7.py @@ -0,0 +1,13 @@ +import os + +from griptape.drivers import AmazonSageMakerJumpstartEmbeddingDriver + +driver = AmazonSageMakerJumpstartEmbeddingDriver( + endpoint=os.environ["SAGEMAKER_ENDPOINT"], + model=os.environ["SAGEMAKER_TENSORFLOW_HUB_MODEL"], +) + +embeddings = driver.embed_string("Hello world!") + +# display the first 3 embeddings +print(embeddings[:3]) diff --git a/docs/griptape-framework/drivers/src/embedding_drivers_8.py b/docs/griptape-framework/drivers/src/embedding_drivers_8.py new file mode 100644 index 000000000..312aa60bf --- /dev/null +++ b/docs/griptape-framework/drivers/src/embedding_drivers_8.py @@ -0,0 +1,10 @@ +import os + +from griptape.drivers import VoyageAiEmbeddingDriver + +driver = VoyageAiEmbeddingDriver(api_key=os.environ["VOYAGE_API_KEY"]) + +embeddings = driver.embed_string("Hello world!") + +# display the first 3 embeddings +print(embeddings[:3]) diff --git a/docs/griptape-framework/drivers/src/embedding_drivers_9.py b/docs/griptape-framework/drivers/src/embedding_drivers_9.py new file mode 100644 index 000000000..1b7677577 --- /dev/null +++ b/docs/griptape-framework/drivers/src/embedding_drivers_9.py @@ -0,0 +1,14 @@ +import os + +from griptape.drivers import CohereEmbeddingDriver + +embedding_driver = CohereEmbeddingDriver( + model="embed-english-v3.0", + api_key=os.environ["COHERE_API_KEY"], + input_type="search_document", +) + +embeddings = embedding_driver.embed_string("Hello world!") + +# display the first 3 embeddings +print(embeddings[:3]) diff --git a/docs/griptape-framework/drivers/src/event_listener_drivers_1.py b/docs/griptape-framework/drivers/src/event_listener_drivers_1.py new file mode 100644 index 000000000..66b9372c3 --- /dev/null +++ b/docs/griptape-framework/drivers/src/event_listener_drivers_1.py @@ -0,0 +1,32 @@ +import os + +from griptape.drivers import AmazonSqsEventListenerDriver +from griptape.events import EventBus, EventListener +from griptape.rules import Rule +from griptape.structures import Agent + +EventBus.add_event_listeners( + [ + EventListener( + driver=AmazonSqsEventListenerDriver( + queue_url=os.environ["AMAZON_SQS_QUEUE_URL"], + ), + ), + ] +) + + +agent = Agent( + rules=[ + Rule(value="You will be provided with a block of text, and your task is to extract a list of keywords from it.") + ], +) + +agent.run( + """Black-on-black ware is a 20th- and 21st-century pottery tradition developed by the Puebloan Native American ceramic artists in Northern New Mexico. + Traditional reduction-fired blackware has been made for centuries by pueblo artists. + Black-on-black ware of the past century is produced with a smooth surface, with the designs applied through selective burnishing or the application of refractory slip. + Another style involves carving or incising designs and selectively polishing the raised areas. + For generations several families from Kha'po Owingeh and P'ohwhóge Owingeh pueblos have been making black-on-black ware with the techniques passed down from matriarch potters. Artists from other pueblos have also produced black-on-black ware. + Several contemporary artists have created works honoring the pottery of their ancestors.""" +) diff --git a/docs/griptape-framework/drivers/src/event_listener_drivers_2.py b/docs/griptape-framework/drivers/src/event_listener_drivers_2.py new file mode 100644 index 000000000..8fd2e48c9 --- /dev/null +++ b/docs/griptape-framework/drivers/src/event_listener_drivers_2.py @@ -0,0 +1,14 @@ +from griptape.artifacts import TextArtifact +from griptape.drivers import GriptapeCloudEventListenerDriver +from griptape.events import FinishStructureRunEvent + +# By default, GriptapeCloudEventListenerDriver uses the api key provided +# in the GT_CLOUD_API_KEY environment variable. +event_driver = GriptapeCloudEventListenerDriver() + +done_event = FinishStructureRunEvent( + output_task_input=TextArtifact("Just started!"), + output_task_output=TextArtifact("All done!"), +) + +event_driver.publish_event(done_event) diff --git a/docs/griptape-framework/drivers/src/event_listener_drivers_3.py b/docs/griptape-framework/drivers/src/event_listener_drivers_3.py new file mode 100644 index 000000000..0bb248362 --- /dev/null +++ b/docs/griptape-framework/drivers/src/event_listener_drivers_3.py @@ -0,0 +1,31 @@ +import os + +from griptape.drivers import AmazonSqsEventListenerDriver +from griptape.events import EventBus, EventListener +from griptape.rules import Rule +from griptape.structures import Agent + +EventBus.add_event_listeners( + [ + EventListener( + driver=AmazonSqsEventListenerDriver( + queue_url=os.environ["AMAZON_SQS_QUEUE_URL"], + ), + ), + ] +) + +agent = Agent( + rules=[ + Rule(value="You will be provided with a block of text, and your task is to extract a list of keywords from it.") + ], +) + +agent.run( + """Black-on-black ware is a 20th- and 21st-century pottery tradition developed by the Puebloan Native American ceramic artists in Northern New Mexico. + Traditional reduction-fired blackware has been made for centuries by pueblo artists. + Black-on-black ware of the past century is produced with a smooth surface, with the designs applied through selective burnishing or the application of refractory slip. + Another style involves carving or incising designs and selectively polishing the raised areas. + For generations several families from Kha'po Owingeh and P'ohwhóge Owingeh pueblos have been making black-on-black ware with the techniques passed down from matriarch potters. Artists from other pueblos have also produced black-on-black ware. + Several contemporary artists have created works honoring the pottery of their ancestors.""" +) diff --git a/docs/griptape-framework/drivers/src/event_listener_drivers_4.py b/docs/griptape-framework/drivers/src/event_listener_drivers_4.py new file mode 100644 index 000000000..6d03d2ce3 --- /dev/null +++ b/docs/griptape-framework/drivers/src/event_listener_drivers_4.py @@ -0,0 +1,27 @@ +import os + +from griptape.configs import Defaults +from griptape.configs.drivers import DriversConfig +from griptape.drivers import AwsIotCoreEventListenerDriver, OpenAiChatPromptDriver +from griptape.events import EventBus, EventListener, FinishStructureRunEvent +from griptape.rules import Rule +from griptape.structures import Agent + +Defaults.drivers_config = DriversConfig(prompt_driver=OpenAiChatPromptDriver(model="gpt-3.5-turbo", temperature=0.7)) +EventBus.add_event_listeners( + [ + EventListener( + event_types=[FinishStructureRunEvent], + driver=AwsIotCoreEventListenerDriver( + topic=os.environ["AWS_IOT_CORE_TOPIC"], + iot_endpoint=os.environ["AWS_IOT_CORE_ENDPOINT"], + ), + ), + ] +) + +agent = Agent( + rules=[Rule(value="You will be provided with a text, and your task is to extract the airport codes from it.")], +) + +agent.run("I want to fly from Orlando to Boston") diff --git a/docs/griptape-framework/drivers/src/event_listener_drivers_5.py b/docs/griptape-framework/drivers/src/event_listener_drivers_5.py new file mode 100644 index 000000000..27186e229 --- /dev/null +++ b/docs/griptape-framework/drivers/src/event_listener_drivers_5.py @@ -0,0 +1,17 @@ +from griptape.drivers import GriptapeCloudEventListenerDriver +from griptape.events import EventBus, EventListener, FinishStructureRunEvent +from griptape.structures import Agent + +EventBus.add_event_listeners( + [ + EventListener( + event_types=[FinishStructureRunEvent], + # By default, GriptapeCloudEventListenerDriver uses the api key provided + # in the GT_CLOUD_API_KEY environment variable. + driver=GriptapeCloudEventListenerDriver(), + ), + ] +) + +agent = Agent() +agent.run("Create a list of 8 questions for an interview with a science fiction author.") diff --git a/docs/griptape-framework/drivers/src/event_listener_drivers_6.py b/docs/griptape-framework/drivers/src/event_listener_drivers_6.py new file mode 100644 index 000000000..c60cc6984 --- /dev/null +++ b/docs/griptape-framework/drivers/src/event_listener_drivers_6.py @@ -0,0 +1,20 @@ +import os + +from griptape.drivers import WebhookEventListenerDriver +from griptape.events import EventBus, EventListener, FinishStructureRunEvent +from griptape.structures import Agent + +EventBus.add_event_listeners( + [ + EventListener( + event_types=[FinishStructureRunEvent], + driver=WebhookEventListenerDriver( + webhook_url=os.environ["WEBHOOK_URL"], + ), + ), + ] +) + +agent = Agent() + +agent.run("Analyze the pros and cons of remote work vs. office work") diff --git a/docs/griptape-framework/drivers/src/event_listener_drivers_7.py b/docs/griptape-framework/drivers/src/event_listener_drivers_7.py new file mode 100644 index 000000000..c010cb8f9 --- /dev/null +++ b/docs/griptape-framework/drivers/src/event_listener_drivers_7.py @@ -0,0 +1,26 @@ +import os + +from griptape.drivers import PusherEventListenerDriver +from griptape.events import EventBus, EventListener, FinishStructureRunEvent +from griptape.structures import Agent + +EventBus.add_event_listeners( + [ + EventListener( + event_types=[FinishStructureRunEvent], + driver=PusherEventListenerDriver( + batched=False, + app_id=os.environ["PUSHER_APP_ID"], + key=os.environ["PUSHER_KEY"], + secret=os.environ["PUSHER_SECRET"], + cluster=os.environ["PUSHER_CLUSTER"], + channel="my-channel", + event_name="my-event", + ), + ), + ], +) + +agent = Agent() + +agent.run("Analyze the pros and cons of remote work vs. office work") diff --git a/docs/griptape-framework/drivers/src/image_generation_drivers_1.py b/docs/griptape-framework/drivers/src/image_generation_drivers_1.py new file mode 100644 index 000000000..b20a42265 --- /dev/null +++ b/docs/griptape-framework/drivers/src/image_generation_drivers_1.py @@ -0,0 +1,18 @@ +from griptape.drivers import OpenAiImageGenerationDriver +from griptape.engines import PromptImageGenerationEngine +from griptape.structures import Agent +from griptape.tools import PromptImageGenerationTool + +driver = OpenAiImageGenerationDriver( + model="dall-e-2", +) + +engine = PromptImageGenerationEngine(image_generation_driver=driver) + +agent = Agent( + tools=[ + PromptImageGenerationTool(engine=engine), + ] +) + +agent.run("Generate a watercolor painting of a dog riding a skateboard") diff --git a/docs/griptape-framework/drivers/src/image_generation_drivers_2.py b/docs/griptape-framework/drivers/src/image_generation_drivers_2.py new file mode 100644 index 000000000..ab07fcb27 --- /dev/null +++ b/docs/griptape-framework/drivers/src/image_generation_drivers_2.py @@ -0,0 +1,23 @@ +from griptape.drivers import AmazonBedrockImageGenerationDriver, BedrockStableDiffusionImageGenerationModelDriver +from griptape.engines import PromptImageGenerationEngine +from griptape.structures import Agent +from griptape.tools import PromptImageGenerationTool + +model_driver = BedrockStableDiffusionImageGenerationModelDriver( + style_preset="pixel-art", +) + +driver = AmazonBedrockImageGenerationDriver( + image_generation_model_driver=model_driver, + model="stability.stable-diffusion-xl-v0", +) + +engine = PromptImageGenerationEngine(image_generation_driver=driver) + +agent = Agent( + tools=[ + PromptImageGenerationTool(engine=engine), + ] +) + +agent.run("Generate an image of a dog riding a skateboard") diff --git a/docs/griptape-framework/drivers/src/image_generation_drivers_3.py b/docs/griptape-framework/drivers/src/image_generation_drivers_3.py new file mode 100644 index 000000000..b8c63589d --- /dev/null +++ b/docs/griptape-framework/drivers/src/image_generation_drivers_3.py @@ -0,0 +1,21 @@ +from griptape.drivers import AmazonBedrockImageGenerationDriver, BedrockTitanImageGenerationModelDriver +from griptape.engines import PromptImageGenerationEngine +from griptape.structures import Agent +from griptape.tools import PromptImageGenerationTool + +model_driver = BedrockTitanImageGenerationModelDriver() + +driver = AmazonBedrockImageGenerationDriver( + image_generation_model_driver=model_driver, + model="amazon.titan-image-generator-v1", +) + +engine = PromptImageGenerationEngine(image_generation_driver=driver) + +agent = Agent( + tools=[ + PromptImageGenerationTool(engine=engine), + ] +) + +agent.run("Generate a watercolor painting of a dog riding a skateboard") diff --git a/docs/griptape-framework/drivers/src/image_generation_drivers_4.py b/docs/griptape-framework/drivers/src/image_generation_drivers_4.py new file mode 100644 index 000000000..f1bc06200 --- /dev/null +++ b/docs/griptape-framework/drivers/src/image_generation_drivers_4.py @@ -0,0 +1,23 @@ +import os + +from griptape.drivers import AzureOpenAiImageGenerationDriver +from griptape.engines import PromptImageGenerationEngine +from griptape.structures import Agent +from griptape.tools import PromptImageGenerationTool + +driver = AzureOpenAiImageGenerationDriver( + model="dall-e-3", + azure_deployment=os.environ["AZURE_OPENAI_DALL_E_3_DEPLOYMENT_ID"], + azure_endpoint=os.environ["AZURE_OPENAI_ENDPOINT_2"], + api_key=os.environ["AZURE_OPENAI_API_KEY_2"], +) + +engine = PromptImageGenerationEngine(image_generation_driver=driver) + +agent = Agent( + tools=[ + PromptImageGenerationTool(engine=engine), + ] +) + +agent.run("Generate a watercolor painting of a dog riding a skateboard") diff --git a/docs/griptape-framework/drivers/src/image_generation_drivers_5.py b/docs/griptape-framework/drivers/src/image_generation_drivers_5.py new file mode 100644 index 000000000..46173a232 --- /dev/null +++ b/docs/griptape-framework/drivers/src/image_generation_drivers_5.py @@ -0,0 +1,23 @@ +import os + +from griptape.drivers import LeonardoImageGenerationDriver +from griptape.engines import PromptImageGenerationEngine +from griptape.structures import Agent +from griptape.tools import PromptImageGenerationTool + +driver = LeonardoImageGenerationDriver( + model=os.environ["LEONARDO_MODEL_ID"], + api_key=os.environ["LEONARDO_API_KEY"], + image_width=512, + image_height=1024, +) + +engine = PromptImageGenerationEngine(image_generation_driver=driver) + +agent = Agent( + tools=[ + PromptImageGenerationTool(engine=engine), + ] +) + +agent.run("Generate a watercolor painting of a dog riding a skateboard") diff --git a/docs/griptape-framework/drivers/src/image_generation_drivers_6.py b/docs/griptape-framework/drivers/src/image_generation_drivers_6.py new file mode 100644 index 000000000..d295da4ff --- /dev/null +++ b/docs/griptape-framework/drivers/src/image_generation_drivers_6.py @@ -0,0 +1,19 @@ +from griptape.drivers import OpenAiImageGenerationDriver +from griptape.engines import PromptImageGenerationEngine +from griptape.structures import Agent +from griptape.tools import PromptImageGenerationTool + +driver = OpenAiImageGenerationDriver( + model="dall-e-2", + image_size="512x512", +) + +engine = PromptImageGenerationEngine(image_generation_driver=driver) + +agent = Agent( + tools=[ + PromptImageGenerationTool(engine=engine), + ] +) + +agent.run("Generate a watercolor painting of a dog riding a skateboard") diff --git a/docs/griptape-framework/drivers/src/image_generation_drivers_7.py b/docs/griptape-framework/drivers/src/image_generation_drivers_7.py new file mode 100644 index 000000000..041f2360d --- /dev/null +++ b/docs/griptape-framework/drivers/src/image_generation_drivers_7.py @@ -0,0 +1,21 @@ +from griptape.artifacts import TextArtifact +from griptape.drivers import HuggingFacePipelineImageGenerationDriver, StableDiffusion3ImageGenerationPipelineDriver +from griptape.engines import PromptImageGenerationEngine +from griptape.structures import Pipeline +from griptape.tasks import PromptImageGenerationTask + +image_generation_task = PromptImageGenerationTask( + input=TextArtifact("landscape photograph, verdant, countryside, 8k"), + image_generation_engine=PromptImageGenerationEngine( + image_generation_driver=HuggingFacePipelineImageGenerationDriver( + model="stabilityai/stable-diffusion-3-medium-diffusers", + device="cuda", + pipeline_driver=StableDiffusion3ImageGenerationPipelineDriver( + height=512, + width=512, + ), + ) + ), +) + +output_artifact = Pipeline(tasks=[image_generation_task]).run().output diff --git a/docs/griptape-framework/drivers/src/image_generation_drivers_8.py b/docs/griptape-framework/drivers/src/image_generation_drivers_8.py new file mode 100644 index 000000000..69437a3a5 --- /dev/null +++ b/docs/griptape-framework/drivers/src/image_generation_drivers_8.py @@ -0,0 +1,30 @@ +from pathlib import Path + +from griptape.artifacts import TextArtifact +from griptape.drivers import ( + HuggingFacePipelineImageGenerationDriver, + StableDiffusion3Img2ImgImageGenerationPipelineDriver, +) +from griptape.engines import VariationImageGenerationEngine +from griptape.loaders import ImageLoader +from griptape.structures import Pipeline +from griptape.tasks import VariationImageGenerationTask + +prompt_artifact = TextArtifact("landscape photograph, verdant, countryside, 8k") +input_image_artifact = ImageLoader().load(Path("tests/resources/mountain.png").read_bytes()) + +image_variation_task = VariationImageGenerationTask( + input=(prompt_artifact, input_image_artifact), + image_generation_engine=VariationImageGenerationEngine( + image_generation_driver=HuggingFacePipelineImageGenerationDriver( + model="stabilityai/stable-diffusion-3-medium-diffusers", + device="cuda", + pipeline_driver=StableDiffusion3Img2ImgImageGenerationPipelineDriver( + height=1024, + width=1024, + ), + ) + ), +) + +output_artifact = Pipeline(tasks=[image_variation_task]).run().output diff --git a/docs/griptape-framework/drivers/src/image_generation_drivers_9.py b/docs/griptape-framework/drivers/src/image_generation_drivers_9.py new file mode 100644 index 000000000..2054588d9 --- /dev/null +++ b/docs/griptape-framework/drivers/src/image_generation_drivers_9.py @@ -0,0 +1,31 @@ +from pathlib import Path + +from griptape.artifacts import TextArtifact +from griptape.drivers import ( + HuggingFacePipelineImageGenerationDriver, + StableDiffusion3ControlNetImageGenerationPipelineDriver, +) +from griptape.engines import VariationImageGenerationEngine +from griptape.loaders import ImageLoader +from griptape.structures import Pipeline +from griptape.tasks import VariationImageGenerationTask + +prompt_artifact = TextArtifact("landscape photograph, verdant, countryside, 8k") +control_image_artifact = ImageLoader().load(Path("canny_control_image.png").read_bytes()) + +controlnet_task = VariationImageGenerationTask( + input=(prompt_artifact, control_image_artifact), + image_generation_engine=VariationImageGenerationEngine( + image_generation_driver=HuggingFacePipelineImageGenerationDriver( + model="stabilityai/stable-diffusion-3-medium-diffusers", + device="cuda", + pipeline_driver=StableDiffusion3ControlNetImageGenerationPipelineDriver( + controlnet_model="InstantX/SD3-Controlnet-Canny", + height=768, + width=1024, + ), + ) + ), +) + +output_artifact = Pipeline(tasks=[controlnet_task]).run().output diff --git a/docs/griptape-framework/drivers/src/image_query_drivers_1.py b/docs/griptape-framework/drivers/src/image_query_drivers_1.py new file mode 100644 index 000000000..0c9db5be7 --- /dev/null +++ b/docs/griptape-framework/drivers/src/image_query_drivers_1.py @@ -0,0 +1,18 @@ +from pathlib import Path + +from griptape.drivers import AnthropicImageQueryDriver +from griptape.engines import ImageQueryEngine +from griptape.loaders import ImageLoader + +driver = AnthropicImageQueryDriver( + model="claude-3-sonnet-20240229", + max_tokens=1024, +) + +engine = ImageQueryEngine( + image_query_driver=driver, +) + +image_artifact = ImageLoader().load(Path("tests/resources/mountain.png").read_bytes()) + +engine.run("Describe the weather in the image", [image_artifact]) diff --git a/docs/griptape-framework/drivers/src/image_query_drivers_2.py b/docs/griptape-framework/drivers/src/image_query_drivers_2.py new file mode 100644 index 000000000..8d605c0d9 --- /dev/null +++ b/docs/griptape-framework/drivers/src/image_query_drivers_2.py @@ -0,0 +1,22 @@ +from pathlib import Path + +from griptape.drivers import AnthropicImageQueryDriver +from griptape.engines import ImageQueryEngine +from griptape.loaders import ImageLoader + +driver = AnthropicImageQueryDriver( + model="claude-3-sonnet-20240229", + max_tokens=1024, +) + +engine = ImageQueryEngine( + image_query_driver=driver, +) + +image_artifact1 = ImageLoader().load(Path("tests/resources/mountain.png").read_bytes()) + +image_artifact2 = ImageLoader().load(Path("tests/resources/cow.png").read_bytes()) + +result = engine.run("Describe the weather in the image", [image_artifact1, image_artifact2]) + +print(result) diff --git a/docs/griptape-framework/drivers/src/image_query_drivers_3.py b/docs/griptape-framework/drivers/src/image_query_drivers_3.py new file mode 100644 index 000000000..14070312b --- /dev/null +++ b/docs/griptape-framework/drivers/src/image_query_drivers_3.py @@ -0,0 +1,18 @@ +from pathlib import Path + +from griptape.drivers import OpenAiImageQueryDriver +from griptape.engines import ImageQueryEngine +from griptape.loaders import ImageLoader + +driver = OpenAiImageQueryDriver( + model="gpt-4o", + max_tokens=256, +) + +engine = ImageQueryEngine( + image_query_driver=driver, +) + +image_artifact = ImageLoader().load(Path("tests/resources/mountain.png").read_bytes()) + +engine.run("Describe the weather in the image", [image_artifact]) diff --git a/docs/griptape-framework/drivers/src/image_query_drivers_4.py b/docs/griptape-framework/drivers/src/image_query_drivers_4.py new file mode 100644 index 000000000..9ebf5ef59 --- /dev/null +++ b/docs/griptape-framework/drivers/src/image_query_drivers_4.py @@ -0,0 +1,22 @@ +import os +from pathlib import Path + +from griptape.drivers import AzureOpenAiImageQueryDriver +from griptape.engines import ImageQueryEngine +from griptape.loaders import ImageLoader + +driver = AzureOpenAiImageQueryDriver( + azure_endpoint=os.environ["AZURE_OPENAI_ENDPOINT_2"], + api_key=os.environ["AZURE_OPENAI_API_KEY_2"], + model="gpt-4o", + azure_deployment="gpt-4o", + max_tokens=256, +) + +engine = ImageQueryEngine( + image_query_driver=driver, +) + +image_artifact = ImageLoader().load(Path("tests/resources/mountain.png").read_bytes()) + +engine.run("Describe the weather in the image", [image_artifact]) diff --git a/docs/griptape-framework/drivers/src/image_query_drivers_5.py b/docs/griptape-framework/drivers/src/image_query_drivers_5.py new file mode 100644 index 000000000..2bab9a7fd --- /dev/null +++ b/docs/griptape-framework/drivers/src/image_query_drivers_5.py @@ -0,0 +1,24 @@ +from pathlib import Path + +import boto3 + +from griptape.drivers import AmazonBedrockImageQueryDriver, BedrockClaudeImageQueryModelDriver +from griptape.engines import ImageQueryEngine +from griptape.loaders import ImageLoader + +session = boto3.Session(region_name="us-west-2") + +driver = AmazonBedrockImageQueryDriver( + image_query_model_driver=BedrockClaudeImageQueryModelDriver(), + model="anthropic.claude-3-sonnet-20240229-v1:0", + session=session, +) + +engine = ImageQueryEngine(image_query_driver=driver) + +image_artifact = ImageLoader().load(Path("tests/resources/mountain.png").read_bytes()) + + +result = engine.run("Describe the weather in the image", [image_artifact]) + +print(result) diff --git a/docs/griptape-framework/drivers/src/observability_drivers_1.py b/docs/griptape-framework/drivers/src/observability_drivers_1.py new file mode 100644 index 000000000..ac8a43800 --- /dev/null +++ b/docs/griptape-framework/drivers/src/observability_drivers_1.py @@ -0,0 +1,10 @@ +from griptape.drivers import GriptapeCloudObservabilityDriver +from griptape.observability import Observability +from griptape.rules import Rule +from griptape.structures import Agent + +observability_driver = GriptapeCloudObservabilityDriver() + +with Observability(observability_driver=observability_driver): + agent = Agent(rules=[Rule("Output one word")]) + agent.run("Name an animal") diff --git a/docs/griptape-framework/drivers/src/observability_drivers_2.py b/docs/griptape-framework/drivers/src/observability_drivers_2.py new file mode 100644 index 000000000..f4c3d5663 --- /dev/null +++ b/docs/griptape-framework/drivers/src/observability_drivers_2.py @@ -0,0 +1,14 @@ +from opentelemetry.sdk.trace.export import BatchSpanProcessor, ConsoleSpanExporter + +from griptape.drivers import OpenTelemetryObservabilityDriver +from griptape.observability import Observability +from griptape.rules import Rule +from griptape.structures import Agent + +observability_driver = OpenTelemetryObservabilityDriver( + service_name="name-an-animal", span_processor=BatchSpanProcessor(ConsoleSpanExporter()) +) + +with Observability(observability_driver=observability_driver): + agent = Agent(rules=[Rule("Output one word")]) + agent.run("Name an animal") diff --git a/docs/griptape-framework/drivers/src/prompt_drivers_1.py b/docs/griptape-framework/drivers/src/prompt_drivers_1.py new file mode 100644 index 000000000..ab5273228 --- /dev/null +++ b/docs/griptape-framework/drivers/src/prompt_drivers_1.py @@ -0,0 +1,11 @@ +from griptape.drivers import OpenAiChatPromptDriver +from griptape.rules import Rule +from griptape.structures import Agent + +agent = Agent( + prompt_driver=OpenAiChatPromptDriver(model="gpt-4o", temperature=0.3), + input="You will be provided with a tweet, and your task is to classify its sentiment as positive, neutral, or negative. Tweet: {{ args[0] }}", + rules=[Rule(value="Output only the sentiment.")], +) + +agent.run("I loved the new Batman movie!") diff --git a/docs/griptape-framework/drivers/src/prompt_drivers_10.py b/docs/griptape-framework/drivers/src/prompt_drivers_10.py new file mode 100644 index 000000000..1d757668c --- /dev/null +++ b/docs/griptape-framework/drivers/src/prompt_drivers_10.py @@ -0,0 +1,11 @@ +from griptape.drivers import OllamaPromptDriver +from griptape.structures import Agent +from griptape.tools import CalculatorTool + +agent = Agent( + prompt_driver=OllamaPromptDriver( + model="llama3.1", + ), + tools=[CalculatorTool()], +) +agent.run("What is (192 + 12) ^ 4") diff --git a/docs/griptape-framework/drivers/src/prompt_drivers_11.py b/docs/griptape-framework/drivers/src/prompt_drivers_11.py new file mode 100644 index 000000000..9e838473c --- /dev/null +++ b/docs/griptape-framework/drivers/src/prompt_drivers_11.py @@ -0,0 +1,26 @@ +import os + +from griptape.drivers import HuggingFaceHubPromptDriver +from griptape.rules import Rule, Ruleset +from griptape.structures import Agent + +agent = Agent( + prompt_driver=HuggingFaceHubPromptDriver( + model="HuggingFaceH4/zephyr-7b-beta", + api_token=os.environ["HUGGINGFACE_HUB_ACCESS_TOKEN"], + ), + rulesets=[ + Ruleset( + name="Girafatron", + rules=[ + Rule( + value="You are Girafatron, a giraffe-obsessed robot. You are talking to a human. " + "Girafatron is obsessed with giraffes, the most glorious animal on the face of this Earth. " + "Giraftron believes all other animals are irrelevant when compared to the glorious majesty of the giraffe." + ) + ], + ) + ], +) + +agent.run("Hello Girafatron, what is your favorite animal?") diff --git a/docs/griptape-framework/drivers/src/prompt_drivers_12.py b/docs/griptape-framework/drivers/src/prompt_drivers_12.py new file mode 100644 index 000000000..d555c32c9 --- /dev/null +++ b/docs/griptape-framework/drivers/src/prompt_drivers_12.py @@ -0,0 +1,13 @@ +import os + +from griptape.drivers import HuggingFaceHubPromptDriver +from griptape.structures import Agent + +agent = Agent( + prompt_driver=HuggingFaceHubPromptDriver( + model="http://127.0.0.1:8080", + api_token=os.environ["HUGGINGFACE_HUB_ACCESS_TOKEN"], + ), +) + +agent.run("Write the code for a snake game.") diff --git a/docs/griptape-framework/drivers/src/prompt_drivers_13.py b/docs/griptape-framework/drivers/src/prompt_drivers_13.py new file mode 100644 index 000000000..d3ddd9093 --- /dev/null +++ b/docs/griptape-framework/drivers/src/prompt_drivers_13.py @@ -0,0 +1,17 @@ +from griptape.drivers import HuggingFacePipelinePromptDriver +from griptape.rules import Rule, Ruleset +from griptape.structures import Agent + +agent = Agent( + prompt_driver=HuggingFacePipelinePromptDriver( + model="TinyLlama/TinyLlama-1.1B-Chat-v1.0", + ), + rulesets=[ + Ruleset( + name="Pirate", + rules=[Rule(value="You are a pirate chatbot who always responds in pirate speak!")], + ) + ], +) + +agent.run("How many helicopters can a human eat in one sitting?") diff --git a/docs/griptape-framework/drivers/src/prompt_drivers_14.py b/docs/griptape-framework/drivers/src/prompt_drivers_14.py new file mode 100644 index 000000000..228a5f9b2 --- /dev/null +++ b/docs/griptape-framework/drivers/src/prompt_drivers_14.py @@ -0,0 +1,15 @@ +import os + +from griptape.drivers import ( + AmazonSageMakerJumpstartPromptDriver, +) +from griptape.structures import Agent + +agent = Agent( + prompt_driver=AmazonSageMakerJumpstartPromptDriver( + endpoint=os.environ["SAGEMAKER_LLAMA_3_INSTRUCT_ENDPOINT_NAME"], + model="meta-llama/Meta-Llama-3-8B-Instruct", + ) +) + +agent.run("What is a good lasagna recipe?") diff --git a/docs/griptape-framework/drivers/src/prompt_drivers_2.py b/docs/griptape-framework/drivers/src/prompt_drivers_2.py new file mode 100644 index 000000000..af516f877 --- /dev/null +++ b/docs/griptape-framework/drivers/src/prompt_drivers_2.py @@ -0,0 +1,20 @@ +from griptape.common import PromptStack +from griptape.drivers import OpenAiChatPromptDriver + +stack = PromptStack() + +stack.add_system_message("You will be provided with Python code, and your task is to calculate its time complexity.") +stack.add_user_message( + """ + def foo(n, k): + accum = 0 + for i in range(n): + for l in range(k): + accum += i + return accum + """ +) + +result = OpenAiChatPromptDriver(model="gpt-3.5-turbo-16k", temperature=0).run(stack) + +print(result.value) diff --git a/docs/griptape-framework/drivers/src/prompt_drivers_3.py b/docs/griptape-framework/drivers/src/prompt_drivers_3.py new file mode 100644 index 000000000..8e85ce887 --- /dev/null +++ b/docs/griptape-framework/drivers/src/prompt_drivers_3.py @@ -0,0 +1,19 @@ +import os + +from griptape.drivers import OpenAiChatPromptDriver +from griptape.rules import Rule +from griptape.structures import Agent + +agent = Agent( + prompt_driver=OpenAiChatPromptDriver( + api_key=os.environ["OPENAI_API_KEY"], + temperature=0.1, + model="gpt-4o", + response_format="json_object", + seed=42, + ), + input="You will be provided with a description of a mood, and your task is to generate the CSS code for a color that matches it. Description: {{ args[0] }}", + rules=[Rule(value='Write your output in json with a single key called "css_code".')], +) + +agent.run("Blue sky at dusk.") diff --git a/docs/griptape-framework/drivers/src/prompt_drivers_4.py b/docs/griptape-framework/drivers/src/prompt_drivers_4.py new file mode 100644 index 000000000..bcafb40de --- /dev/null +++ b/docs/griptape-framework/drivers/src/prompt_drivers_4.py @@ -0,0 +1,12 @@ +from griptape.drivers import OpenAiChatPromptDriver +from griptape.rules import Rule +from griptape.structures import Agent + +agent = Agent( + prompt_driver=OpenAiChatPromptDriver( + base_url="http://127.0.0.1:1234/v1", model="lmstudio-community/Meta-Llama-3-8B-Instruct-GGUF", stream=True + ), + rules=[Rule(value="You are a helpful coding assistant.")], +) + +agent.run("How do I init and update a git submodule?") diff --git a/docs/griptape-framework/drivers/src/prompt_drivers_5.py b/docs/griptape-framework/drivers/src/prompt_drivers_5.py new file mode 100644 index 000000000..76301d8d9 --- /dev/null +++ b/docs/griptape-framework/drivers/src/prompt_drivers_5.py @@ -0,0 +1,22 @@ +import os + +from griptape.drivers import AzureOpenAiChatPromptDriver +from griptape.rules import Rule +from griptape.structures import Agent + +agent = Agent( + prompt_driver=AzureOpenAiChatPromptDriver( + api_key=os.environ["AZURE_OPENAI_API_KEY_1"], + model="gpt-3.5-turbo", + azure_deployment=os.environ["AZURE_OPENAI_35_TURBO_DEPLOYMENT_ID"], + azure_endpoint=os.environ["AZURE_OPENAI_ENDPOINT_1"], + ), + rules=[ + Rule( + value="You will be provided with text, and your task is to translate it into emojis. " + "Do not use any regular text. Do your best with emojis only." + ) + ], +) + +agent.run("Artificial intelligence is a technology with great promise.") diff --git a/docs/griptape-framework/drivers/src/prompt_drivers_6.py b/docs/griptape-framework/drivers/src/prompt_drivers_6.py new file mode 100644 index 000000000..5e4d226a6 --- /dev/null +++ b/docs/griptape-framework/drivers/src/prompt_drivers_6.py @@ -0,0 +1,13 @@ +import os + +from griptape.drivers import CoherePromptDriver +from griptape.structures import Agent + +agent = Agent( + prompt_driver=CoherePromptDriver( + model="command-r", + api_key=os.environ["COHERE_API_KEY"], + ) +) + +agent.run('What is the sentiment of this review? Review: "I really enjoyed this movie!"') diff --git a/docs/griptape-framework/drivers/src/prompt_drivers_7.py b/docs/griptape-framework/drivers/src/prompt_drivers_7.py new file mode 100644 index 000000000..23f3d0c35 --- /dev/null +++ b/docs/griptape-framework/drivers/src/prompt_drivers_7.py @@ -0,0 +1,13 @@ +import os + +from griptape.drivers import AnthropicPromptDriver +from griptape.structures import Agent + +agent = Agent( + prompt_driver=AnthropicPromptDriver( + model="claude-3-opus-20240229", + api_key=os.environ["ANTHROPIC_API_KEY"], + ) +) + +agent.run("Where is the best place to see cherry blossums in Japan?") diff --git a/docs/griptape-framework/drivers/src/prompt_drivers_8.py b/docs/griptape-framework/drivers/src/prompt_drivers_8.py new file mode 100644 index 000000000..b6a1c109e --- /dev/null +++ b/docs/griptape-framework/drivers/src/prompt_drivers_8.py @@ -0,0 +1,13 @@ +import os + +from griptape.drivers import GooglePromptDriver +from griptape.structures import Agent + +agent = Agent( + prompt_driver=GooglePromptDriver( + model="gemini-pro", + api_key=os.environ["GOOGLE_API_KEY"], + ) +) + +agent.run("Briefly explain how a computer works to a young child.") diff --git a/docs/griptape-framework/drivers/src/prompt_drivers_9.py b/docs/griptape-framework/drivers/src/prompt_drivers_9.py new file mode 100644 index 000000000..992dbecd2 --- /dev/null +++ b/docs/griptape-framework/drivers/src/prompt_drivers_9.py @@ -0,0 +1,26 @@ +from griptape.drivers import AmazonBedrockPromptDriver +from griptape.rules import Rule +from griptape.structures import Agent + +agent = Agent( + prompt_driver=AmazonBedrockPromptDriver( + model="anthropic.claude-3-sonnet-20240229-v1:0", + ), + rules=[ + Rule( + value="You are a customer service agent that is classifying emails by type. I want you to give your answer and then explain it." + ) + ], +) +agent.run( + """How would you categorize this email? + + Can I use my Mixmaster 4000 to mix paint, or is it only meant for mixing food? + + + Categories are: + (A) Pre-sale question + (B) Broken or defective item + (C) Billing question + (D) Other (please explain)""" +) diff --git a/docs/griptape-framework/drivers/src/sql_drivers_1.py b/docs/griptape-framework/drivers/src/sql_drivers_1.py new file mode 100644 index 000000000..7b03799a9 --- /dev/null +++ b/docs/griptape-framework/drivers/src/sql_drivers_1.py @@ -0,0 +1,5 @@ +from griptape.drivers import SqlDriver + +driver = SqlDriver(engine_url="sqlite:///:memory:") + +driver.execute_query("select 'foo', 'bar';") diff --git a/docs/griptape-framework/drivers/src/sql_drivers_2.py b/docs/griptape-framework/drivers/src/sql_drivers_2.py new file mode 100644 index 000000000..37087bc16 --- /dev/null +++ b/docs/griptape-framework/drivers/src/sql_drivers_2.py @@ -0,0 +1,15 @@ +import os + +import boto3 + +from griptape.drivers import AmazonRedshiftSqlDriver + +session = boto3.Session() + +driver = AmazonRedshiftSqlDriver( + database=os.environ["REDSHIFT_DATABASE"], + session=session, + cluster_identifier=os.environ["REDSHIFT_CLUSTER_IDENTIFIER"], +) + +driver.execute_query("select * from people;") diff --git a/docs/griptape-framework/drivers/src/sql_drivers_3.py b/docs/griptape-framework/drivers/src/sql_drivers_3.py new file mode 100644 index 000000000..29ee4a818 --- /dev/null +++ b/docs/griptape-framework/drivers/src/sql_drivers_3.py @@ -0,0 +1,22 @@ +import os + +import snowflake.connector +from snowflake.connector import SnowflakeConnection + +from griptape.drivers import SnowflakeSqlDriver + + +def get_snowflake_connection() -> SnowflakeConnection: + return snowflake.connector.connect( + account=os.environ["SNOWFLAKE_ACCOUNT"], + user=os.environ["SNOWFLAKE_USER"], + password=os.environ["SNOWFLAKE_PASSWORD"], + database=os.environ["SNOWFLAKE_DATABASE"], + schema=os.environ["SNOWFLAKE_SCHEMA"], + warehouse=os.environ["SNOWFLAKE_WAREHOUSE"], + ) + + +driver = SnowflakeSqlDriver(connection_func=get_snowflake_connection) + +driver.execute_query("select * from people;") diff --git a/docs/griptape-framework/drivers/src/structure_run_drivers_1.py b/docs/griptape-framework/drivers/src/structure_run_drivers_1.py new file mode 100644 index 000000000..a29bfbedf --- /dev/null +++ b/docs/griptape-framework/drivers/src/structure_run_drivers_1.py @@ -0,0 +1,43 @@ +from griptape.drivers import LocalStructureRunDriver +from griptape.rules import Rule +from griptape.structures import Agent, Pipeline +from griptape.tasks import StructureRunTask + + +def build_joke_teller() -> Agent: + return Agent( + rules=[ + Rule( + value="You are very funny.", + ) + ], + ) + + +def build_joke_rewriter() -> Agent: + return Agent( + rules=[ + Rule( + value="You are the editor of a joke book. But you only speak in riddles", + ) + ], + ) + + +joke_coordinator = Pipeline( + tasks=[ + StructureRunTask( + driver=LocalStructureRunDriver( + structure_factory_fn=build_joke_teller, + ), + ), + StructureRunTask( + ("Rewrite this joke: {{ parent_output }}",), + driver=LocalStructureRunDriver( + structure_factory_fn=build_joke_rewriter, + ), + ), + ] +) + +joke_coordinator.run("Tell me a joke") diff --git a/docs/griptape-framework/drivers/src/structure_run_drivers_2.py b/docs/griptape-framework/drivers/src/structure_run_drivers_2.py new file mode 100644 index 000000000..6103a6507 --- /dev/null +++ b/docs/griptape-framework/drivers/src/structure_run_drivers_2.py @@ -0,0 +1,41 @@ +import os + +from griptape.drivers import GriptapeCloudStructureRunDriver, LocalStructureRunDriver +from griptape.rules import Rule +from griptape.structures import Agent, Pipeline +from griptape.tasks import StructureRunTask + +base_url = os.environ["GRIPTAPE_CLOUD_BASE_URL"] +api_key = os.environ["GRIPTAPE_CLOUD_API_KEY"] +structure_id = os.environ["GRIPTAPE_CLOUD_STRUCTURE_ID"] + + +pipeline = Pipeline( + tasks=[ + StructureRunTask( + ("Think of a question related to Retrieval Augmented Generation.",), + driver=LocalStructureRunDriver( + structure_factory_fn=lambda: Agent( + rules=[ + Rule( + value="You are an expert in Retrieval Augmented Generation.", + ), + Rule( + value="Only output your answer, no other information.", + ), + ] + ) + ), + ), + StructureRunTask( + ("{{ parent_output }}",), + driver=GriptapeCloudStructureRunDriver( + base_url=base_url, + api_key=api_key, + structure_id=structure_id, + ), + ), + ] +) + +pipeline.run() diff --git a/docs/griptape-framework/drivers/src/text_to_speech_drivers_1.py b/docs/griptape-framework/drivers/src/text_to_speech_drivers_1.py new file mode 100644 index 000000000..376113d63 --- /dev/null +++ b/docs/griptape-framework/drivers/src/text_to_speech_drivers_1.py @@ -0,0 +1,20 @@ +import os + +from griptape.drivers import ElevenLabsTextToSpeechDriver +from griptape.engines import TextToSpeechEngine +from griptape.structures import Agent +from griptape.tools.text_to_speech.tool import TextToSpeechTool + +driver = ElevenLabsTextToSpeechDriver( + api_key=os.environ["ELEVEN_LABS_API_KEY"], + model="eleven_multilingual_v2", + voice="Matilda", +) + +tool = TextToSpeechTool( + engine=TextToSpeechEngine( + text_to_speech_driver=driver, + ), +) + +Agent(tools=[tool]).run("Generate audio from this text: 'Hello, world!'") diff --git a/docs/griptape-framework/drivers/src/text_to_speech_drivers_2.py b/docs/griptape-framework/drivers/src/text_to_speech_drivers_2.py new file mode 100644 index 000000000..4a6323b1b --- /dev/null +++ b/docs/griptape-framework/drivers/src/text_to_speech_drivers_2.py @@ -0,0 +1,14 @@ +from griptape.drivers import OpenAiTextToSpeechDriver +from griptape.engines import TextToSpeechEngine +from griptape.structures import Agent +from griptape.tools.text_to_speech.tool import TextToSpeechTool + +driver = OpenAiTextToSpeechDriver() + +tool = TextToSpeechTool( + engine=TextToSpeechEngine( + text_to_speech_driver=driver, + ), +) + +Agent(tools=[tool]).run("Generate audio from this text: 'Hello, world!'") diff --git a/docs/griptape-framework/drivers/src/vector_store_drivers_1.py b/docs/griptape-framework/drivers/src/vector_store_drivers_1.py new file mode 100644 index 000000000..a4e54da3a --- /dev/null +++ b/docs/griptape-framework/drivers/src/vector_store_drivers_1.py @@ -0,0 +1,24 @@ +import os + +from griptape.artifacts import ErrorArtifact +from griptape.drivers import LocalVectorStoreDriver, OpenAiEmbeddingDriver +from griptape.loaders import WebLoader + +# Initialize an Embedding Driver +embedding_driver = OpenAiEmbeddingDriver(api_key=os.environ["OPENAI_API_KEY"]) + +vector_store_driver = LocalVectorStoreDriver(embedding_driver=embedding_driver) + +# Load Artifacts from the web +artifacts = WebLoader(max_tokens=100).load("https://www.griptape.ai") + +if isinstance(artifacts, ErrorArtifact): + raise Exception(artifacts.value) +# Upsert Artifacts into the Vector Store Driver +[vector_store_driver.upsert_text_artifact(a, namespace="griptape") for a in artifacts] + +results = vector_store_driver.query(query="What is griptape?") + +values = [r.to_artifact().value for r in results] + +print("\n\n".join(values)) diff --git a/docs/griptape-framework/drivers/src/vector_store_drivers_10.py b/docs/griptape-framework/drivers/src/vector_store_drivers_10.py new file mode 100644 index 000000000..b7645bd82 --- /dev/null +++ b/docs/griptape-framework/drivers/src/vector_store_drivers_10.py @@ -0,0 +1,41 @@ +import os + +from griptape.artifacts.error_artifact import ErrorArtifact +from griptape.drivers import OpenAiEmbeddingDriver, QdrantVectorStoreDriver +from griptape.loaders import WebLoader + +# Set up environment variables +host = os.environ["QDRANT_CLUSTER_ENDPOINT"] +api_key = os.environ["QDRANT_CLUSTER_API_KEY"] + +# Initialize an Embedding Driver. +embedding_driver = OpenAiEmbeddingDriver(api_key=os.environ["OPENAI_API_KEY"]) + +vector_store_driver = QdrantVectorStoreDriver( + url=host, + collection_name="griptape", + content_payload_key="content", + embedding_driver=embedding_driver, + api_key=api_key, +) + +# Load Artifacts from the web +artifacts = WebLoader().load("https://www.griptape.ai") + +if isinstance(artifacts, ErrorArtifact): + raise Exception(artifacts.value) + +# Recreate Qdrant collection +vector_store_driver.client.recreate_collection( + collection_name=vector_store_driver.collection_name, + vectors_config={"size": 1536, "distance": vector_store_driver.distance}, +) + +# Upsert Artifacts into the Vector Store Driver +[vector_store_driver.upsert_text_artifact(a, namespace="griptape") for a in artifacts] + +results = vector_store_driver.query(query="What is griptape?") + +values = [r.to_artifact().value for r in results] + +print("\n\n".join(values)) diff --git a/docs/griptape-framework/drivers/src/vector_store_drivers_11.py b/docs/griptape-framework/drivers/src/vector_store_drivers_11.py new file mode 100644 index 000000000..965f97715 --- /dev/null +++ b/docs/griptape-framework/drivers/src/vector_store_drivers_11.py @@ -0,0 +1,36 @@ +import os + +from griptape.artifacts.error_artifact import ErrorArtifact +from griptape.drivers import AstraDbVectorStoreDriver, OpenAiEmbeddingDriver +from griptape.loaders import WebLoader + +# Astra DB secrets and connection parameters +api_endpoint = os.environ["ASTRA_DB_API_ENDPOINT"] +token = os.environ["ASTRA_DB_APPLICATION_TOKEN"] +astra_db_namespace = os.environ.get("ASTRA_DB_KEYSPACE") # optional + +# Initialize an Embedding Driver. +embedding_driver = OpenAiEmbeddingDriver(api_key=os.environ["OPENAI_API_KEY"]) + +vector_store_driver = AstraDbVectorStoreDriver( + embedding_driver=embedding_driver, + api_endpoint=api_endpoint, + token=token, + collection_name="griptape_test_collection", + astra_db_namespace=astra_db_namespace, # optional +) + +# Load Artifacts from the web +artifacts = WebLoader().load("https://www.griptape.ai") + +if isinstance(artifacts, ErrorArtifact): + raise Exception(artifacts.value) + +# Upsert Artifacts into the Vector Store Driver +[vector_store_driver.upsert_text_artifact(a, namespace="griptape") for a in artifacts] + +results = vector_store_driver.query(query="What is griptape?") + +values = [r.to_artifact().value for r in results] + +print("\n\n".join(values)) diff --git a/docs/griptape-framework/drivers/src/vector_store_drivers_2.py b/docs/griptape-framework/drivers/src/vector_store_drivers_2.py new file mode 100644 index 000000000..f8b500924 --- /dev/null +++ b/docs/griptape-framework/drivers/src/vector_store_drivers_2.py @@ -0,0 +1,17 @@ +import os + +from griptape.drivers import GriptapeCloudKnowledgeBaseVectorStoreDriver + +# Initialize environment variables +gt_cloud_api_key = os.environ["GRIPTAPE_CLOUD_API_KEY"] +gt_cloud_knowledge_base_id = os.environ["GRIPTAPE_CLOUD_KB_ID"] + +vector_store_driver = GriptapeCloudKnowledgeBaseVectorStoreDriver( + api_key=gt_cloud_api_key, knowledge_base_id=gt_cloud_knowledge_base_id +) + +results = vector_store_driver.query(query="What is griptape?") + +values = [r.to_artifact().value for r in results] + +print("\n\n".join(values)) diff --git a/docs/griptape-framework/drivers/src/vector_store_drivers_3.py b/docs/griptape-framework/drivers/src/vector_store_drivers_3.py new file mode 100644 index 000000000..d2cfc8142 --- /dev/null +++ b/docs/griptape-framework/drivers/src/vector_store_drivers_3.py @@ -0,0 +1,30 @@ +import os + +from griptape.artifacts import ErrorArtifact +from griptape.drivers import OpenAiEmbeddingDriver, PineconeVectorStoreDriver +from griptape.loaders import WebLoader + +# Initialize an Embedding Driver +embedding_driver = OpenAiEmbeddingDriver(api_key=os.environ["OPENAI_API_KEY"]) + +vector_store_driver = PineconeVectorStoreDriver( + api_key=os.environ["PINECONE_API_KEY"], + environment=os.environ["PINECONE_ENVIRONMENT"], + index_name=os.environ["PINECONE_INDEX_NAME"], + embedding_driver=embedding_driver, +) + +# Load Artifacts from the web +artifacts = WebLoader(max_tokens=100).load("https://www.griptape.ai") + +if isinstance(artifacts, ErrorArtifact): + raise Exception(artifacts.value) + +# Upsert Artifacts into the Vector Store Driver +[vector_store_driver.upsert_text_artifact(a, namespace="griptape") for a in artifacts] + +results = vector_store_driver.query(query="What is griptape?") + +values = [r.to_artifact().value for r in results] + +print("\n\n".join(values)) diff --git a/docs/griptape-framework/drivers/src/vector_store_drivers_4.py b/docs/griptape-framework/drivers/src/vector_store_drivers_4.py new file mode 100644 index 000000000..fe35f1ff5 --- /dev/null +++ b/docs/griptape-framework/drivers/src/vector_store_drivers_4.py @@ -0,0 +1,39 @@ +import os + +from griptape.artifacts import ErrorArtifact +from griptape.drivers import MarqoVectorStoreDriver, OpenAiChatPromptDriver, OpenAiEmbeddingDriver +from griptape.loaders import WebLoader + +# Initialize an Embedding Driver +embedding_driver = OpenAiEmbeddingDriver(api_key=os.environ["OPENAI_API_KEY"]) +prompt_driver = OpenAiChatPromptDriver(model="gpt-3.5-turbo") + +# Define the namespace +namespace = "griptape-ai" + +# Initialize the Vector Store Driver +vector_store_driver = MarqoVectorStoreDriver( + api_key=os.environ["MARQO_API_KEY"], + url=os.environ["MARQO_URL"], + index=os.environ["MARQO_INDEX_NAME"], + embedding_driver=embedding_driver, +) + +# Load Artifacts from the web +artifacts = WebLoader(max_tokens=200).load("https://www.griptape.ai") + +if isinstance(artifacts, ErrorArtifact): + raise Exception(artifacts.value) + +# Upsert Artifacts into the Vector Store Driver +vector_store_driver.upsert_text_artifacts( + { + "griptape": artifacts, + } +) + +results = vector_store_driver.query(query="What is griptape?") + +values = [r.to_artifact().value for r in results] + +print("\n\n".join(values)) diff --git a/docs/griptape-framework/drivers/src/vector_store_drivers_5.py b/docs/griptape-framework/drivers/src/vector_store_drivers_5.py new file mode 100644 index 000000000..867195a48 --- /dev/null +++ b/docs/griptape-framework/drivers/src/vector_store_drivers_5.py @@ -0,0 +1,45 @@ +import os + +from griptape.artifacts.error_artifact import ErrorArtifact +from griptape.drivers import MongoDbAtlasVectorStoreDriver, OpenAiEmbeddingDriver +from griptape.loaders import WebLoader + +# Initialize an Embedding Driver +embedding_driver = OpenAiEmbeddingDriver(api_key=os.environ["OPENAI_API_KEY"]) + +host = os.environ["MONGODB_HOST"] +username = os.environ["MONGODB_USERNAME"] +password = os.environ["MONGODB_PASSWORD"] +database_name = os.environ["MONGODB_DATABASE_NAME"] +collection_name = os.environ["MONGODB_COLLECTION_NAME"] +index_name = os.environ["MONGODB_INDEX_NAME"] +vector_path = os.environ["MONGODB_VECTOR_PATH"] + +# Initialize the Vector Store Driver +vector_store_driver = MongoDbAtlasVectorStoreDriver( + connection_string=f"mongodb+srv://{username}:{password}@{host}/{database_name}", + database_name=database_name, + collection_name=collection_name, + embedding_driver=embedding_driver, + index_name=index_name, + vector_path=vector_path, +) + +# Load Artifacts from the web +artifacts = WebLoader(max_tokens=200).load("https://www.griptape.ai") + +if isinstance(artifacts, ErrorArtifact): + raise Exception(artifacts.value) + +# Upsert Artifacts into the Vector Store Driver +vector_store_driver.upsert_text_artifacts( + { + "griptape": artifacts, + } +) + +results = vector_store_driver.query(query="What is griptape?") + +values = [r.to_artifact().value for r in results] + +print("\n\n".join(values)) diff --git a/docs/griptape-framework/drivers/src/vector_store_drivers_6.py b/docs/griptape-framework/drivers/src/vector_store_drivers_6.py new file mode 100644 index 000000000..9c5c9cab6 --- /dev/null +++ b/docs/griptape-framework/drivers/src/vector_store_drivers_6.py @@ -0,0 +1,45 @@ +import os + +from griptape.artifacts.error_artifact import ErrorArtifact +from griptape.drivers import AzureMongoDbVectorStoreDriver, OpenAiEmbeddingDriver +from griptape.loaders import WebLoader + +# Initialize an Embedding Driver +embedding_driver = OpenAiEmbeddingDriver(api_key=os.environ["OPENAI_API_KEY"]) + +azure_host = os.environ["AZURE_MONGODB_HOST"] +username = os.environ["AZURE_MONGODB_USERNAME"] +password = os.environ["AZURE_MONGODB_PASSWORD"] +database_name = os.environ["AZURE_MONGODB_DATABASE_NAME"] +collection_name = os.environ["AZURE_MONGODB_COLLECTION_NAME"] +index_name = os.environ["AZURE_MONGODB_INDEX_NAME"] +vector_path = os.environ["AZURE_MONGODB_VECTOR_PATH"] + +# Initialize the Vector Store Driver +vector_store_driver = AzureMongoDbVectorStoreDriver( + connection_string=f"mongodb+srv://{username}:{password}@{azure_host}/{database_name}?tls=true&authMechanism=SCRAM-SHA-256&retrywrites=false&maxIdleTimeMS=120000", + database_name=database_name, + collection_name=collection_name, + embedding_driver=embedding_driver, + index_name=index_name, + vector_path=vector_path, +) + +# Load Artifacts from the web +artifacts = WebLoader(max_tokens=200).load("https://www.griptape.ai") + +if isinstance(artifacts, ErrorArtifact): + raise Exception(artifacts.value) + +# Upsert Artifacts into the Vector Store Driver +vector_store_driver.upsert_text_artifacts( + { + "griptape": artifacts, + } +) + +results = vector_store_driver.query(query="What is griptape?") + +values = [r.to_artifact().value for r in results] + +print("\n\n".join(values)) diff --git a/docs/griptape-framework/drivers/src/vector_store_drivers_7.py b/docs/griptape-framework/drivers/src/vector_store_drivers_7.py new file mode 100644 index 000000000..c08d9ff3b --- /dev/null +++ b/docs/griptape-framework/drivers/src/vector_store_drivers_7.py @@ -0,0 +1,35 @@ +import os + +from griptape.artifacts.error_artifact import ErrorArtifact +from griptape.drivers import OpenAiEmbeddingDriver, RedisVectorStoreDriver +from griptape.loaders import WebLoader + +# Initialize an Embedding Driver +embedding_driver = OpenAiEmbeddingDriver(api_key=os.environ["OPENAI_API_KEY"]) + +vector_store_driver = RedisVectorStoreDriver( + host=os.environ["REDIS_HOST"], + port=int(os.environ["REDIS_PORT"]), + password=os.environ["REDIS_PASSWORD"], + index=os.environ["REDIS_INDEX"], + embedding_driver=embedding_driver, +) + +# Load Artifacts from the web +artifacts = WebLoader(max_tokens=200).load("https://www.griptape.ai") + +if isinstance(artifacts, ErrorArtifact): + raise Exception(artifacts.value) + +# Upsert Artifacts into the Vector Store Driver +vector_store_driver.upsert_text_artifacts( + { + "griptape": artifacts, + } +) + +results = vector_store_driver.query(query="What is griptape?") + +values = [r.to_artifact().value for r in results] + +print("\n\n".join(values)) diff --git a/docs/griptape-framework/drivers/src/vector_store_drivers_8.py b/docs/griptape-framework/drivers/src/vector_store_drivers_8.py new file mode 100644 index 000000000..a57363eb3 --- /dev/null +++ b/docs/griptape-framework/drivers/src/vector_store_drivers_8.py @@ -0,0 +1,36 @@ +import os + +import boto3 + +from griptape.artifacts.error_artifact import ErrorArtifact +from griptape.drivers import AmazonOpenSearchVectorStoreDriver, OpenAiEmbeddingDriver +from griptape.loaders import WebLoader + +# Initialize an Embedding Driver +embedding_driver = OpenAiEmbeddingDriver(api_key=os.environ["OPENAI_API_KEY"]) + +vector_store_driver = AmazonOpenSearchVectorStoreDriver( + host=os.environ["AMAZON_OPENSEARCH_HOST"], + index_name=os.environ["AMAZON_OPENSEARCH_INDEX_NAME"], + session=boto3.Session(), + embedding_driver=embedding_driver, +) + +# Load Artifacts from the web +artifacts = WebLoader(max_tokens=200).load("https://www.griptape.ai") + +if isinstance(artifacts, ErrorArtifact): + raise Exception(artifacts.value) + +# Upsert Artifacts into the Vector Store Driver +vector_store_driver.upsert_text_artifacts( + { + "griptape": artifacts, + } +) + +results = vector_store_driver.query(query="What is griptape?") + +values = [r.to_artifact().value for r in results] + +print("\n\n".join(values)) diff --git a/docs/griptape-framework/drivers/src/vector_store_drivers_9.py b/docs/griptape-framework/drivers/src/vector_store_drivers_9.py new file mode 100644 index 000000000..c5aface63 --- /dev/null +++ b/docs/griptape-framework/drivers/src/vector_store_drivers_9.py @@ -0,0 +1,42 @@ +import os + +from griptape.artifacts.error_artifact import ErrorArtifact +from griptape.drivers import OpenAiEmbeddingDriver, PgVectorVectorStoreDriver +from griptape.loaders import WebLoader + +# Initialize an Embedding Driver. +embedding_driver = OpenAiEmbeddingDriver(api_key=os.environ["OPENAI_API_KEY"]) + +db_user = os.environ["POSTGRES_USER"] +db_pass = os.environ["POSTGRES_PASSWORD"] +db_host = os.environ["POSTGRES_HOST"] +db_port = os.environ["POSTGRES_PORT"] +db_name = os.environ["POSTGRES_DB"] +db_connection_string = f"postgresql://{db_user}:{db_pass}@{db_host}:{db_port}/{db_name}" +vector_store_driver = PgVectorVectorStoreDriver( + connection_string=db_connection_string, + embedding_driver=embedding_driver, + table_name="griptape_vectors", +) + +# Install required Postgres extensions and create database schema. +vector_store_driver.setup() + +# Load Artifacts from the web +artifacts = WebLoader().load("https://www.griptape.ai") + +if isinstance(artifacts, ErrorArtifact): + raise Exception(artifacts.value) + +# Upsert Artifacts into the Vector Store Driver +vector_store_driver.upsert_text_artifacts( + { + "griptape": artifacts, + } +) + +results = vector_store_driver.query(query="What is griptape?") + +values = [r.to_artifact().value for r in results] + +print("\n\n".join(values)) diff --git a/docs/griptape-framework/drivers/src/web_scraper_drivers_1.py b/docs/griptape-framework/drivers/src/web_scraper_drivers_1.py new file mode 100644 index 000000000..8b9e9dbd1 --- /dev/null +++ b/docs/griptape-framework/drivers/src/web_scraper_drivers_1.py @@ -0,0 +1,20 @@ +import os + +from griptape.drivers import ProxyWebScraperDriver + +query_params = [ + "markdown_response=true", + "js_render=false", + "premium_proxy=false", +] +proxy_url = f'http://{os.environ["ZENROWS_API_KEY"]}:{"&".join(query_params)}@proxy.zenrows.com:8001' + +driver = ProxyWebScraperDriver( + proxies={ + "http": proxy_url, + "https": proxy_url, + }, + params={"verify": False}, +) + +driver.scrape_url("https://griptape.ai") diff --git a/docs/griptape-framework/drivers/src/web_scraper_drivers_2.py b/docs/griptape-framework/drivers/src/web_scraper_drivers_2.py new file mode 100644 index 000000000..db7c1db5a --- /dev/null +++ b/docs/griptape-framework/drivers/src/web_scraper_drivers_2.py @@ -0,0 +1,5 @@ +from griptape.drivers import MarkdownifyWebScraperDriver + +driver = MarkdownifyWebScraperDriver() + +driver.scrape_url("https://griptape.ai") diff --git a/docs/griptape-framework/drivers/src/web_scraper_drivers_3.py b/docs/griptape-framework/drivers/src/web_scraper_drivers_3.py new file mode 100644 index 000000000..aafa465f8 --- /dev/null +++ b/docs/griptape-framework/drivers/src/web_scraper_drivers_3.py @@ -0,0 +1,14 @@ +from griptape.drivers import MarkdownifyWebScraperDriver +from griptape.loaders import WebLoader +from griptape.structures import Agent +from griptape.tools import WebScraperTool + +agent = Agent( + tools=[ + WebScraperTool( + web_loader=WebLoader(web_scraper_driver=MarkdownifyWebScraperDriver(timeout=1000)), + off_prompt=False, + ), + ], +) +agent.run("List all email addresses on griptape.ai in a flat numbered markdown list.") diff --git a/docs/griptape-framework/drivers/src/web_scraper_drivers_4.py b/docs/griptape-framework/drivers/src/web_scraper_drivers_4.py new file mode 100644 index 000000000..50d0a73a4 --- /dev/null +++ b/docs/griptape-framework/drivers/src/web_scraper_drivers_4.py @@ -0,0 +1,5 @@ +from griptape.drivers import TrafilaturaWebScraperDriver + +driver = TrafilaturaWebScraperDriver() + +driver.scrape_url("https://griptape.ai") diff --git a/docs/griptape-framework/drivers/src/web_search_drivers_1.py b/docs/griptape-framework/drivers/src/web_search_drivers_1.py new file mode 100644 index 000000000..1affc64df --- /dev/null +++ b/docs/griptape-framework/drivers/src/web_search_drivers_1.py @@ -0,0 +1,10 @@ +import os + +from griptape.drivers import GoogleWebSearchDriver + +driver = GoogleWebSearchDriver( + api_key=os.environ["GOOGLE_API_KEY"], + search_id=os.environ["GOOGLE_API_SEARCH_ID"], +) + +driver.search("griptape ai") diff --git a/docs/griptape-framework/drivers/src/web_search_drivers_2.py b/docs/griptape-framework/drivers/src/web_search_drivers_2.py new file mode 100644 index 000000000..abe80a2e0 --- /dev/null +++ b/docs/griptape-framework/drivers/src/web_search_drivers_2.py @@ -0,0 +1,18 @@ +import os + +from griptape.drivers import GoogleWebSearchDriver +from griptape.structures import Agent +from griptape.tools import PromptSummaryTool, WebSearchTool + +agent = Agent( + tools=[ + WebSearchTool( + web_search_driver=GoogleWebSearchDriver( + api_key=os.environ["GOOGLE_API_KEY"], + search_id=os.environ["GOOGLE_API_SEARCH_ID"], + ), + ), + PromptSummaryTool(off_prompt=False), + ], +) +agent.run("Give me some websites with information about AI frameworks.") diff --git a/docs/griptape-framework/drivers/src/web_search_drivers_3.py b/docs/griptape-framework/drivers/src/web_search_drivers_3.py new file mode 100644 index 000000000..3ce932649 --- /dev/null +++ b/docs/griptape-framework/drivers/src/web_search_drivers_3.py @@ -0,0 +1,5 @@ +from griptape.drivers import DuckDuckGoWebSearchDriver + +driver = DuckDuckGoWebSearchDriver() + +driver.search("griptape ai") diff --git a/docs/griptape-framework/drivers/structure-run-drivers.md b/docs/griptape-framework/drivers/structure-run-drivers.md index 5916d6e20..1f57ff57e 100644 --- a/docs/griptape-framework/drivers/structure-run-drivers.md +++ b/docs/griptape-framework/drivers/structure-run-drivers.md @@ -5,7 +5,7 @@ search: ## Overview Structure Run Drivers can be used to run Griptape Structures in a variety of runtime environments. -When combined with the [Structure Run Task](../../griptape-framework/structures/tasks.md#structure-run-task) or [Structure Run Client](../../griptape-tools/official-tools/structure-run-client.md) you can create complex, multi-agent pipelines that span multiple runtime environments. +When combined with the [Structure Run Task](../../griptape-framework/structures/tasks.md#structure-run-task) or [Structure Run Tool](../../griptape-tools/official-tools/structure-run-tool.md) you can create complex, multi-agent pipelines that span multiple runtime environments. ## Structure Run Drivers @@ -14,50 +14,7 @@ When combined with the [Structure Run Task](../../griptape-framework/structures/ The [LocalStructureRunDriver](../../reference/griptape/drivers/structure_run/local_structure_run_driver.md) is used to run Griptape Structures in the same runtime environment as the code that is running the Structure. ```python -from griptape.drivers import LocalStructureRunDriver -from griptape.rules import Rule -from griptape.structures import Agent, Pipeline -from griptape.tasks import StructureRunTask - -def build_joke_teller(): - joke_teller = Agent( - rules=[ - Rule( - value="You are very funny.", - ) - ], - ) - - return joke_teller - -def build_joke_rewriter(): - joke_rewriter = Agent( - rules=[ - Rule( - value="You are the editor of a joke book. But you only speak in riddles", - ) - ], - ) - - return joke_rewriter - -joke_coordinator = Pipeline( - tasks=[ - StructureRunTask( - driver=LocalStructureRunDriver( - structure_factory_fn=build_joke_teller, - ), - ), - StructureRunTask( - ("Rewrite this joke: {{ parent_output }}",), - driver=LocalStructureRunDriver( - structure_factory_fn=build_joke_rewriter, - ), - ), - ] -) - -joke_coordinator.run("Tell me a joke") +--8<-- "docs/griptape-framework/drivers/src/structure_run_drivers_1.py" ``` ### Griptape Cloud @@ -66,45 +23,5 @@ The [GriptapeCloudStructureRunDriver](../../reference/griptape/drivers/structure ```python -import os - -from griptape.drivers import GriptapeCloudStructureRunDriver, LocalStructureRunDriver -from griptape.structures import Pipeline, Agent -from griptape.rules import Rule -from griptape.tasks import StructureRunTask - -base_url = os.environ["GRIPTAPE_CLOUD_BASE_URL"] -api_key = os.environ["GRIPTAPE_CLOUD_API_KEY"] -structure_id = os.environ["GRIPTAPE_CLOUD_STRUCTURE_ID"] - - -pipeline = Pipeline( - tasks=[ - StructureRunTask( - ("Think of a question related to Retrieval Augmented Generation.",), - driver=LocalStructureRunDriver( - structure_factory_fn=lambda: Agent( - rules=[ - Rule( - value="You are an expert in Retrieval Augmented Generation.", - ), - Rule( - value="Only output your answer, no other information.", - ), - ] - ) - ), - ), - StructureRunTask( - ("{{ parent_output }}",), - driver=GriptapeCloudStructureRunDriver( - base_url=base_url, - api_key=api_key, - structure_id=structure_id, - ), - ), - ] -) - -pipeline.run() +--8<-- "docs/griptape-framework/drivers/src/structure_run_drivers_2.py" ``` diff --git a/docs/griptape-framework/drivers/text-to-speech-drivers.md b/docs/griptape-framework/drivers/text-to-speech-drivers.md index 8680449dd..c5455914e 100644 --- a/docs/griptape-framework/drivers/text-to-speech-drivers.md +++ b/docs/griptape-framework/drivers/text-to-speech-drivers.md @@ -19,27 +19,7 @@ The [Eleven Labs Text to Speech Driver](../../reference/griptape/drivers/text_to This driver requires the `drivers-text-to-speech-elevenlabs` [extra](../index.md#extras). ```python -import os - -from griptape.drivers import ElevenLabsTextToSpeechDriver -from griptape.engines import TextToSpeechEngine -from griptape.tools.text_to_speech_client.tool import TextToSpeechClient -from griptape.structures import Agent - - -driver = ElevenLabsTextToSpeechDriver( - api_key=os.getenv("ELEVEN_LABS_API_KEY"), - model="eleven_multilingual_v2", - voice="Matilda", -) - -tool = TextToSpeechClient( - engine=TextToSpeechEngine( - text_to_speech_driver=driver, - ), -) - -Agent(tools=[tool]).run("Generate audio from this text: 'Hello, world!'") +--8<-- "docs/griptape-framework/drivers/src/text_to_speech_drivers_1.py" ``` ## OpenAI @@ -47,18 +27,5 @@ Agent(tools=[tool]).run("Generate audio from this text: 'Hello, world!'") The [OpenAI Text to Speech Driver](../../reference/griptape/drivers/text_to_speech/openai_text_to_speech_driver.md) provides support for text-to-speech models hosted by OpenAI. This Driver supports configurations specific to OpenAI, like voice selection and output format. ```python -from griptape.drivers import OpenAiTextToSpeechDriver -from griptape.engines import TextToSpeechEngine -from griptape.tools.text_to_speech_client.tool import TextToSpeechClient -from griptape.structures import Agent - -driver = OpenAiTextToSpeechDriver() - -tool = TextToSpeechClient( - engine=TextToSpeechEngine( - text_to_speech_driver=driver, - ), -) - -Agent(tools=[tool]).run("Generate audio from this text: 'Hello, world!'") +--8<-- "docs/griptape-framework/drivers/src/text_to_speech_drivers_2.py" ``` diff --git a/docs/griptape-framework/drivers/vector-store-drivers.md b/docs/griptape-framework/drivers/vector-store-drivers.md index 2b76119b3..7cca64a46 100644 --- a/docs/griptape-framework/drivers/vector-store-drivers.md +++ b/docs/griptape-framework/drivers/vector-store-drivers.md @@ -28,27 +28,7 @@ Each Vector Store Driver takes a [BaseEmbeddingDriver](../../reference/griptape/ The [LocalVectorStoreDriver](../../reference/griptape/drivers/vector/local_vector_store_driver.md) can be used to load and query data from memory. Here is a complete example of how the Driver can be used to load a webpage into the Driver and query it later: ```python -import os -from griptape.drivers import LocalVectorStoreDriver, OpenAiEmbeddingDriver -from griptape.loaders import WebLoader - - -# Initialize an Embedding Driver -embedding_driver = OpenAiEmbeddingDriver(api_key=os.environ["OPENAI_API_KEY"]) - -vector_store_driver = LocalVectorStoreDriver(embedding_driver=embedding_driver) - -# Load Artifacts from the web -artifacts = WebLoader(max_tokens=100).load("https://www.griptape.ai") - -# Upsert Artifacts into the Vector Store Driver -[vector_store_driver.upsert_text_artifact(a, namespace="griptape") for a in artifacts] - -results = vector_store_driver.query(query="What is griptape?") - -values = [r.to_artifact().value for r in results] - -print("\n\n".join(values)) +--8<-- "docs/griptape-framework/drivers/src/vector_store_drivers_1.py" ``` ### Griptape Cloud Knowledge Base @@ -56,21 +36,7 @@ print("\n\n".join(values)) The [GriptapeCloudKnowledgeBaseVectorStoreDriver](../../reference/griptape/drivers/vector/griptape_cloud_knowledge_base_vector_store_driver.md) can be used to query data from a Griptape Cloud Knowledge Base. Loading into Knowledge Bases is not supported at this time, only querying. Here is a complete example of how the Driver can be used to query an existing Knowledge Base: ```python -import os -from griptape.drivers import GriptapeCloudKnowledgeBaseVectorStoreDriver - - -# Initialize environment variables -gt_cloud_api_key = os.environ["GRIPTAPE_CLOUD_API_KEY"] -gt_cloud_knowledge_base_id = os.environ["GRIPTAPE_CLOUD_KB_ID"] - -vector_store_driver = GriptapeCloudKnowledgeBaseVectorStoreDriver(api_key=gt_cloud_api_key, knowledge_base_id=gt_cloud_knowledge_base_id) - -results = vector_store_driver.query(query="What is griptape?") - -values = [r.to_artifact().value for r in results] - -print("\n\n".join(values)) +--8<-- "docs/griptape-framework/drivers/src/vector_store_drivers_2.py" ``` ### Pinecone @@ -83,32 +49,7 @@ The [PineconeVectorStoreDriver](../../reference/griptape/drivers/vector/pinecone Here is an example of how the Driver can be used to load and query information in a Pinecone cluster: ```python -import os -from griptape.drivers import PineconeVectorStoreDriver, OpenAiEmbeddingDriver -from griptape.loaders import WebLoader - - -# Initialize an Embedding Driver -embedding_driver = OpenAiEmbeddingDriver(api_key=os.environ["OPENAI_API_KEY"]) - -vector_store_driver = PineconeVectorStoreDriver( - api_key=os.environ["PINECONE_API_KEY"], - environment=os.environ["PINECONE_ENVIRONMENT"], - index_name=os.environ["PINECONE_INDEX_NAME"], - embedding_driver=embedding_driver, -) - -# Load Artifacts from the web -artifacts = WebLoader(max_tokens=100).load("https://www.griptape.ai") - -# Upsert Artifacts into the Vector Store Driver -[vector_store_driver.upsert_text_artifact(a, namespace="griptape") for a in artifacts] - -results = vector_store_driver.query(query="What is griptape?") - -values = [r.to_artifact().value for r in results] - -print("\n\n".join(values)) +--8<-- "docs/griptape-framework/drivers/src/vector_store_drivers_3.py" ``` ### Marqo @@ -121,40 +62,7 @@ The [MarqoVectorStoreDriver](../../reference/griptape/drivers/vector/marqo_vecto Here is an example of how the Driver can be used to load and query information in a Marqo cluster: ```python -import os -from griptape.drivers import MarqoVectorStoreDriver, OpenAiEmbeddingDriver, OpenAiChatPromptDriver -from griptape.loaders import WebLoader - -# Initialize an Embedding Driver -embedding_driver = OpenAiEmbeddingDriver(api_key=os.environ["OPENAI_API_KEY"]) -prompt_driver = OpenAiChatPromptDriver(model="gpt-3.5-turbo") - -# Define the namespace -namespace = 'griptape-ai' - -# Initialize the Vector Store Driver -vector_store_driver = MarqoVectorStoreDriver( - api_key=os.environ["MARQO_API_KEY"], - url=os.environ["MARQO_URL"], - index=os.environ["MARQO_INDEX_NAME"], - embedding_driver=embedding_driver, -) - -# Load Artifacts from the web -artifacts = WebLoader(max_tokens=200).load("https://www.griptape.ai") - -# Upsert Artifacts into the Vector Store Driver -vector_store_driver.upsert_text_artifacts( - { - "griptape": artifacts, - } -) - -results = vector_store_driver.query(query="What is griptape?") - -values = [r.to_artifact().value for r in results] - -print("\n\n".join(values)) +--8<-- "docs/griptape-framework/drivers/src/vector_store_drivers_4.py" ``` ### Mongodb Atlas @@ -167,46 +75,7 @@ The [MongodbAtlasVectorStoreDriver](../../reference/griptape/drivers/vector/mong Here is an example of how the Driver can be used to load and query information in a MongoDb Atlas Cluster: ```python -from griptape.drivers import MongoDbAtlasVectorStoreDriver, OpenAiEmbeddingDriver -from griptape.loaders import WebLoader -import os - -# Initialize an Embedding Driver -embedding_driver = OpenAiEmbeddingDriver(api_key=os.environ["OPENAI_API_KEY"]) - -host = os.environ["MONGODB_HOST"] -username = os.environ["MONGODB_USERNAME"] -password = os.environ["MONGODB_PASSWORD"] -database_name = os.environ["MONGODB_DATABASE_NAME"] -collection_name = os.environ[ "MONGODB_COLLECTION_NAME"] -index_name = os.environ["MONGODB_INDEX_NAME"] -vector_path = os.environ["MONGODB_VECTOR_PATH"] - -# Initialize the Vector Store Driver -vector_store_driver = MongoDbAtlasVectorStoreDriver( - connection_string=f"mongodb+srv://{username}:{password}@{host}/{database_name}", - database_name=database_name, - collection_name=collection_name, - embedding_driver=embedding_driver, - index_name=index_name, - vector_path=vector_path, -) - -# Load Artifacts from the web -artifacts = WebLoader(max_tokens=200).load("https://www.griptape.ai") - -# Upsert Artifacts into the Vector Store Driver -vector_store_driver.upsert_text_artifacts( - { - "griptape": artifacts, - } -) - -results = vector_store_driver.query(query="What is griptape?") - -values = [r.to_artifact().value for r in results] - -print("\n\n".join(values)) +--8<-- "docs/griptape-framework/drivers/src/vector_store_drivers_5.py" ``` The format for creating a vector index should look similar to the following: @@ -238,46 +107,7 @@ The [AzureMongoDbVectorStoreDriver](../../reference/griptape/drivers/vector/azur Here is an example of how the Driver can be used to load and query information in an Azure CosmosDb MongoDb vCore database. It is very similar to the Driver for [MongoDb Atlas](#mongodb-atlas): ```python -from griptape.drivers import AzureMongoDbVectorStoreDriver, OpenAiEmbeddingDriver -from griptape.loaders import WebLoader -import os - -# Initialize an Embedding Driver -embedding_driver = OpenAiEmbeddingDriver(api_key=os.environ["OPENAI_API_KEY"]) - -azure_host = os.environ["AZURE_MONGODB_HOST"] -username = os.environ["AZURE_MONGODB_USERNAME"] -password = os.environ["AZURE_MONGODB_PASSWORD"] -database_name = os.environ["AZURE_MONGODB_DATABASE_NAME"] -collection_name = os.environ["AZURE_MONGODB_COLLECTION_NAME"] -index_name = os.environ["AZURE_MONGODB_INDEX_NAME"] -vector_path = os.environ["AZURE_MONGODB_VECTOR_PATH"] - -# Initialize the Vector Store Driver -vector_store_driver = AzureMongoDbVectorStoreDriver( - connection_string=f"mongodb+srv://{username}:{password}@{azure_host}/{database_name}?tls=true&authMechanism=SCRAM-SHA-256&retrywrites=false&maxIdleTimeMS=120000", - database_name=database_name, - collection_name=collection_name, - embedding_driver=embedding_driver, - index_name=index_name, - vector_path=vector_path, -) - -# Load Artifacts from the web -artifacts = WebLoader(max_tokens=200).load("https://www.griptape.ai") - -# Upsert Artifacts into the Vector Store Driver -vector_store_driver.upsert_text_artifacts( - { - "griptape": artifacts, - } -) - -results = vector_store_driver.query(query="What is griptape?") - -values = [r.to_artifact().value for r in results] - -print("\n\n".join(values)) +--8<-- "docs/griptape-framework/drivers/src/vector_store_drivers_6.py" ``` ### Redis @@ -290,37 +120,7 @@ The [RedisVectorStoreDriver](../../reference/griptape/drivers/vector/redis_vecto Here is an example of how the Driver can be used to load and query information in a Redis Cluster: ```python -import os -from griptape.drivers import RedisVectorStoreDriver, OpenAiEmbeddingDriver -from griptape.loaders import WebLoader -import numpy as np # Assuming you'd use numpy to create a dummy vector for the sake of example. - -# Initialize an Embedding Driver -embedding_driver = OpenAiEmbeddingDriver(api_key=os.environ["OPENAI_API_KEY"]) - -vector_store_driver = RedisVectorStoreDriver( - host=os.environ["REDIS_HOST"], - port=os.environ["REDIS_PORT"], - password=os.environ["REDIS_PASSWORD"], - index=os.environ["REDIS_INDEX"], - embedding_driver=embedding_driver, -) - -# Load Artifacts from the web -artifacts = WebLoader(max_tokens=200).load("https://www.griptape.ai") - -# Upsert Artifacts into the Vector Store Driver -vector_store_driver.upsert_text_artifacts( - { - "griptape": artifacts, - } -) - -results = vector_store_driver.query(query="What is griptape?") - -values = [r.to_artifact().value for r in results] - -print("\n\n".join(values)) +--8<-- "docs/griptape-framework/drivers/src/vector_store_drivers_7.py" ``` The format for creating a vector index should be similar to the following: @@ -338,36 +138,7 @@ The [OpenSearchVectorStoreDriver](../../reference/griptape/drivers/vector/opense Here is an example of how the Driver can be used to load and query information in an OpenSearch Cluster: ```python -import os -import boto3 -from griptape.drivers import AmazonOpenSearchVectorStoreDriver, OpenAiEmbeddingDriver -from griptape.loaders import WebLoader - -# Initialize an Embedding Driver -embedding_driver = OpenAiEmbeddingDriver(api_key=os.environ["OPENAI_API_KEY"]) - -vector_store_driver = AmazonOpenSearchVectorStoreDriver( - host=os.environ["AMAZON_OPENSEARCH_HOST"], - index_name=os.environ["AMAZON_OPENSEARCH_INDEX_NAME"], - session=boto3.Session(), - embedding_driver=embedding_driver, -) - -# Load Artifacts from the web -artifacts = WebLoader(max_tokens=200).load("https://www.griptape.ai") - -# Upsert Artifacts into the Vector Store Driver -vector_store_driver.upsert_text_artifacts( - { - "griptape": artifacts, - } -) - -results = vector_store_driver.query(query="What is griptape?") - -values = [r.to_artifact().value for r in results] - -print("\n\n".join(values)) +--8<-- "docs/griptape-framework/drivers/src/vector_store_drivers_8.py" ``` The body mappings for creating a vector index should look similar to the following: @@ -393,43 +164,7 @@ The [PGVectorVectorStoreDriver](../../reference/griptape/drivers/vector/pgvector Here is an example of how the Driver can be used to load and query information in a Postgres database: ```python -import os -from griptape.drivers import PgVectorVectorStoreDriver, OpenAiEmbeddingDriver -from griptape.loaders import WebLoader - -# Initialize an Embedding Driver. -embedding_driver = OpenAiEmbeddingDriver(api_key=os.environ["OPENAI_API_KEY"]) - -db_user = os.environ["POSTGRES_USER"] -db_pass = os.environ["POSTGRES_PASSWORD"] -db_host = os.environ["POSTGRES_HOST"] -db_port = os.environ["POSTGRES_PORT"] -db_name = os.environ["POSTGRES_DB"] -db_connection_string = f"postgresql://{db_user}:{db_pass}@{db_host}:{db_port}/{db_name}" -vector_store_driver = PgVectorVectorStoreDriver( - connection_string=db_connection_string, - embedding_driver=embedding_driver, - table_name="griptape_vectors", -) - -# Install required Postgres extensions and create database schema. -vector_store_driver.setup() - -# Load Artifacts from the web -artifacts = WebLoader().load("https://www.griptape.ai") - -# Upsert Artifacts into the Vector Store Driver -vector_store_driver.upsert_text_artifacts( - { - "griptape": artifacts, - } -) - -results = vector_store_driver.query(query="What is griptape?") - -values = [r.to_artifact().value for r in results] - -print("\n\n".join(values)) +--8<-- "docs/griptape-framework/drivers/src/vector_store_drivers_9.py" ``` ### Qdrant @@ -442,43 +177,18 @@ The QdrantVectorStoreDriver supports the [Qdrant vector database](https://qdrant Here is an example of how the Driver can be used to query information in a Qdrant collection: ```python -import os -from griptape.drivers import QdrantVectorStoreDriver, OpenAiEmbeddingDriver -from griptape.loaders import WebLoader - -# Set up environment variables -host = os.environ["QDRANT_CLUSTER_ENDPOINT"] -api_key = os.environ["QDRANT_CLUSTER_API_KEY"] - -# Initialize an Embedding Driver. -embedding_driver = OpenAiEmbeddingDriver(api_key=os.environ["OPENAI_API_KEY"]) - -vector_store_driver = QdrantVectorStoreDriver( - url=host, - collection_name="griptape", - content_payload_key="content", - embedding_driver=embedding_driver, - api_key=api_key, -) - -# Load Artifacts from the web -artifacts = WebLoader().load("https://www.griptape.ai") - -# Recreate Qdrant collection -vector_store_driver.client.recreate_collection( - collection_name=vector_store_driver.collection_name, - vectors_config={ - "size": 1536, - "distance": vector_store_driver.distance - }, -) +--8<-- "docs/griptape-framework/drivers/src/vector_store_drivers_10.py" +``` -# Upsert Artifacts into the Vector Store Driver -[vector_store_driver.upsert_text_artifact(a, namespace="griptape") for a in artifacts] +### Astra DB -results = vector_store_driver.query(query="What is griptape?") +!!! info + This Driver requires the `drivers-vector-astra-db` [extra](../index.md#extras). + +The AstraDbVectorStoreDriver supports [DataStax Astra DB](https://www.datastax.com/products/datastax-astra). -values = [r.to_artifact().value for r in results] +The following example shows how to store vector entries and query the information using the driver: -print("\n\n".join(values)) +```python +--8<-- "docs/griptape-framework/drivers/src/vector_store_drivers_11.py" ``` diff --git a/docs/griptape-framework/drivers/web-scraper-drivers.md b/docs/griptape-framework/drivers/web-scraper-drivers.md index afebe6622..7bfb7be99 100644 --- a/docs/griptape-framework/drivers/web-scraper-drivers.md +++ b/docs/griptape-framework/drivers/web-scraper-drivers.md @@ -18,27 +18,7 @@ The [ProxyWebScraperDriver](../../reference/griptape/drivers/web_scraper/proxy_w Example using `ProxyWebScraperDriver` directly: ```python -import os -from griptape.drivers import ProxyWebScraperDriver - -query_params = [ - "markdown_response=true", - "js_render=false", - "premium_proxy=false", -] -proxy_url = f'http://{os.environ["ZENROWS_API_KEY"]}:{"&".join(query_params)}@proxy.zenrows.com:8001' - -driver = ProxyWebScraperDriver( - proxies={ - "http": proxy_url, - "https": proxy_url, - }, - params={ - "verify": False - } -) - -driver.scrape_url("https://griptape.ai") +--8<-- "docs/griptape-framework/drivers/src/web_scraper_drivers_1.py" ``` ### Markdownify @@ -72,33 +52,13 @@ The [MarkdownifyWebScraperDriver](../../reference/griptape/drivers/web_scraper/m Example using `MarkdownifyWebScraperDriver` directly: ```python -from griptape.drivers import MarkdownifyWebScraperDriver - -driver = MarkdownifyWebScraperDriver() - -driver.scrape_url("https://griptape.ai") +--8<-- "docs/griptape-framework/drivers/src/web_scraper_drivers_2.py" ``` Example of using `MarkdownifyWebScraperDriver` with an agent: ```python -from griptape.drivers import MarkdownifyWebScraperDriver -from griptape.loaders import WebLoader -from griptape.tools import TaskMemoryClient, WebScraper -from griptape.structures import Agent - -agent = Agent( - tools=[ - WebScraper( - web_loader=WebLoader( - web_scraper_driver=MarkdownifyWebScraperDriver(timeout=1000) - ), - off_prompt=True, - ), - TaskMemoryClient(off_prompt=False), - ], -) -agent.run("List all email addresses on griptape.ai in a flat numbered markdown list.") +--8<-- "docs/griptape-framework/drivers/src/web_scraper_drivers_3.py" ``` ### Trafilatura @@ -111,9 +71,5 @@ The [TrafilaturaWebScraperDriver](../../reference/griptape/drivers/web_scraper/t Example of using `TrafilaturaWebScraperDriver` directly: ```python -from griptape.drivers import TrafilaturaWebScraperDriver - -driver = TrafilaturaWebScraperDriver() - -driver.scrape_url("https://griptape.ai") +--8<-- "docs/griptape-framework/drivers/src/web_scraper_drivers_4.py" ``` diff --git a/docs/griptape-framework/drivers/web-search-drivers.md b/docs/griptape-framework/drivers/web-search-drivers.md index 4b8692214..b2400fe28 100644 --- a/docs/griptape-framework/drivers/web-search-drivers.md +++ b/docs/griptape-framework/drivers/web-search-drivers.md @@ -18,37 +18,13 @@ The [GoogleWebSearchDriver](../../reference/griptape/drivers/web_search/google_w Example using `GoogleWebSearchDriver` directly: ```python -import os -from griptape.drivers import GoogleWebSearchDriver - -driver = GoogleWebSearchDriver( - api_key=os.environ["GOOGLE_API_KEY"], - search_id=os.environ["GOOGLE_API_SEARCH_ID"], -) - -driver.search("griptape ai") +--8<-- "docs/griptape-framework/drivers/src/web_search_drivers_1.py" ``` Example of using `GoogleWebSearchDriver` with an agent: ```python -import os -from griptape.drivers import GoogleWebSearchDriver -from griptape.tools import TaskMemoryClient, WebSearch -from griptape.structures import Agent - -agent = Agent( - tools=[ - WebSearch( - web_search_driver=GoogleWebSearchDriver( - api_key=os.environ["GOOGLE_API_KEY"], - search_id=os.environ["GOOGLE_API_SEARCH_ID"], - ), - ), - TaskMemoryClient(off_prompt=False), - ], -) -agent.run("Give me some websites with information about AI frameworks.") +--8<-- "docs/griptape-framework/drivers/src/web_search_drivers_2.py" ``` ### DuckDuckGo @@ -61,9 +37,5 @@ The [DuckDuckGoWebSearchDriver](../../reference/griptape/drivers/web_search/duck Example of using `DuckDuckGoWebSearchDriver` directly: ```python -from griptape.drivers import DuckDuckGoWebSearchDriver - -driver = DuckDuckGoWebSearchDriver() - -driver.search("griptape ai") +--8<-- "docs/griptape-framework/drivers/src/web_search_drivers_3.py" ``` diff --git a/docs/griptape-framework/engines/audio-engines.md b/docs/griptape-framework/engines/audio-engines.md index 8f637dca0..b5b0b24a6 100644 --- a/docs/griptape-framework/engines/audio-engines.md +++ b/docs/griptape-framework/engines/audio-engines.md @@ -12,25 +12,7 @@ search: This Engine facilitates synthesizing speech from text inputs. ```python -import os - -from griptape.drivers import ElevenLabsTextToSpeechDriver -from griptape.engines import TextToSpeechEngine - - -driver = ElevenLabsTextToSpeechDriver( - api_key=os.getenv("ELEVEN_LABS_API_KEY"), - model="eleven_multilingual_v2", - voice="Laura", -) - -engine = TextToSpeechEngine( - text_to_speech_driver=driver, -) - -engine.run( - prompts=["Hello, world!"], -) +--8<-- "docs/griptape-framework/engines/src/audio_engines_1.py" ``` ### Audio Transcription @@ -38,20 +20,5 @@ engine.run( The [Audio Transcription Engine](../../reference/griptape/engines/audio/audio_transcription_engine.md) facilitates transcribing speech from audio inputs. ```python -from griptape.drivers import OpenAiAudioTranscriptionDriver -from griptape.engines import AudioTranscriptionEngine -from griptape.loaders import AudioLoader -from griptape.utils import load_file - - -driver = OpenAiAudioTranscriptionDriver( - model="whisper-1" -) - -engine = AudioTranscriptionEngine( - audio_transcription_driver=driver, -) - -audio_artifact = AudioLoader().load(load_file("tests/resources/sentences.wav")) -engine.run(audio_artifact) +--8<-- "docs/griptape-framework/engines/src/audio_engines_2.py" ``` diff --git a/docs/griptape-framework/engines/extraction-engines.md b/docs/griptape-framework/engines/extraction-engines.md index 496560968..b971e63cc 100644 --- a/docs/griptape-framework/engines/extraction-engines.md +++ b/docs/griptape-framework/engines/extraction-engines.md @@ -16,26 +16,7 @@ The CSV Extraction Engine is designed specifically for extracting data from CSV- The CSV Extraction Engine requires the `column_names` parameter for specifying the columns to be extracted. ```python -from griptape.drivers import OpenAiChatPromptDriver -from griptape.engines import CsvExtractionEngine - -# Initialize the CsvExtractionEngine instance -csv_engine = CsvExtractionEngine( - prompt_driver=OpenAiChatPromptDriver(model="gpt-3.5-turbo"), -) - -# Define some unstructured data -sample_text = """ -Alice, 28, lives in New York. -Bob, 35 lives in California. -Charlie is 40 and lives in Texas. -""" - -# Extract CSV rows using the engine -result = csv_engine.extract(sample_text, column_names=["name", "age", "location"]) - -for row in result.value: - print(row.to_text()) +--8<-- "docs/griptape-framework/engines/src/extraction_engines_1.py" ``` ``` name,age,location @@ -52,31 +33,7 @@ The JSON Extraction Engine is tailored for extracting data from JSON-formatted c The JSON Extraction Engine requires the `template_schema` parameter for specifying the structure to be extracted. ```python -from schema import Schema - -from griptape.drivers import OpenAiChatPromptDriver -from griptape.engines import JsonExtractionEngine - -json_engine = JsonExtractionEngine( - prompt_driver=OpenAiChatPromptDriver(model="gpt-3.5-turbo"), -) - -# Define some unstructured data -sample_json_text = """ -Alice (Age 28) lives in New York. -Bob (Age 35) lives in California. -""" - -# Define a schema for extraction -user_schema = Schema( - {"users": [{"name": str, "age": int, "location": str}]} -).json_schema("UserSchema") - -# Extract data using the engine -result = json_engine.extract(sample_json_text, template_schema=user_schema) - -for artifact in result.value: - print(artifact.value) +--8<-- "docs/griptape-framework/engines/src/extraction_engines_2.py" ``` ``` {'name': 'Alice', 'age': 28, 'location': 'New York'} diff --git a/docs/griptape-framework/engines/image-generation-engines.md b/docs/griptape-framework/engines/image-generation-engines.md index 600600060..6a8f039aa 100644 --- a/docs/griptape-framework/engines/image-generation-engines.md +++ b/docs/griptape-framework/engines/image-generation-engines.md @@ -17,31 +17,7 @@ In the following example, rulesets are provided to the Engine's `run()` method c See the [documentation for your Driver](../drivers/image-generation-drivers.md) to determine if it supports Negative Rulesets. ```python -from griptape.engines import PromptImageGenerationEngine -from griptape.drivers import AmazonBedrockImageGenerationDriver, \ - BedrockStableDiffusionImageGenerationModelDriver -from griptape.rules import Ruleset, Rule - - -# Create a driver configured to use Stable Diffusion via Bedrock. -driver = AmazonBedrockImageGenerationDriver( - image_generation_model_driver=BedrockStableDiffusionImageGenerationModelDriver(), - model="stability.stable-diffusion-xl-v1", -) - -# Create an engine configured to use the driver. -engine = PromptImageGenerationEngine( - image_generation_driver=driver, -) - -positive_ruleset = Ruleset(name="positive rules", rules=[Rule("artistic"), Rule("watercolor")]) -negative_ruleset = Ruleset(name="negative rules", rules=[Rule("blurry"), Rule("photograph")]) - -engine.run( - prompts=["A dog riding a skateboard"], - rulesets=[positive_ruleset], - negative_rulesets=[negative_ruleset], -) +--8<-- "docs/griptape-framework/engines/src/image_generation_engines_1.py" ``` ### Prompt Image @@ -49,25 +25,7 @@ engine.run( This Engine facilitates generating images from text prompts. ```python -from griptape.engines import PromptImageGenerationEngine -from griptape.drivers import AmazonBedrockImageGenerationDriver, \ - BedrockStableDiffusionImageGenerationModelDriver - - -# Create a driver configured to use Stable Diffusion via Bedrock. -driver = AmazonBedrockImageGenerationDriver( - image_generation_model_driver=BedrockStableDiffusionImageGenerationModelDriver(), - model="stability.stable-diffusion-xl-v1", -) - -# Create an engine configured to use the driver. -engine = PromptImageGenerationEngine( - image_generation_driver=driver, -) - -engine.run( - prompts=["A watercolor painting of a dog riding a skateboard"], -) +--8<-- "docs/griptape-framework/engines/src/image_generation_engines_2.py" ``` ### Variation @@ -75,29 +33,7 @@ engine.run( This Engine facilitates generating variations of an input image according to a text prompt. The input image is used as a reference for the model's generation. ```python -from griptape.engines import VariationImageGenerationEngine -from griptape.drivers import AmazonBedrockImageGenerationDriver, \ - BedrockStableDiffusionImageGenerationModelDriver -from griptape.loaders import ImageLoader - -# Create a driver configured to use Stable Diffusion via Bedrock. -driver = AmazonBedrockImageGenerationDriver( - image_generation_model_driver=BedrockStableDiffusionImageGenerationModelDriver(), - model="stability.stable-diffusion-xl-v1", -) - -# Create an engine configured to use the driver. -engine = VariationImageGenerationEngine( - image_generation_driver=driver, -) - -with open("tests/resources/mountain.png", "rb") as f: - image_artifact = ImageLoader().load(f.read()) - -engine.run( - prompts=["A photo of a mountain landscape in winter"], - image=image_artifact, -) +--8<-- "docs/griptape-framework/engines/src/image_generation_engines_3.py" ``` ### Inpainting @@ -105,34 +41,7 @@ engine.run( This Engine facilitates inpainting, or modifying an input image according to a text prompt within the bounds of a mask defined by mask image. After inpainting, the area specified by the mask is replaced with the model's generation, while the rest of the input image remains the same. ```python -from griptape.engines import InpaintingImageGenerationEngine -from griptape.drivers import AmazonBedrockImageGenerationDriver, \ - BedrockStableDiffusionImageGenerationModelDriver -from griptape.loaders import ImageLoader - - -# Create a driver configured to use Stable Diffusion via Bedrock. -driver = AmazonBedrockImageGenerationDriver( - image_generation_model_driver=BedrockStableDiffusionImageGenerationModelDriver(), - model="stability.stable-diffusion-xl-v1", -) - -# Create an engine configured to use the driver. -engine = InpaintingImageGenerationEngine( - image_generation_driver=driver, -) - -with open("tests/resources/mountain.png", "rb") as f: - image_artifact = ImageLoader().load(f.read()) - -with open("tests/resources/mountain-mask.png", "rb") as f: - mask_artifact = ImageLoader().load(f.read()) - -engine.run( - prompts=["A photo of a castle built into the side of a mountain"], - image=image_artifact, - mask=mask_artifact, -) +--8<-- "docs/griptape-framework/engines/src/image_generation_engines_4.py" ``` ### Outpainting @@ -140,31 +49,5 @@ engine.run( This Engine facilitates outpainting, or modifying an input image according to a text prompt outside the bounds of a mask defined by a mask image. After outpainting, the area of the input image specified by the mask remains the same, while the rest is replaced with the model's generation. ```python -from griptape.engines import OutpaintingImageGenerationEngine -from griptape.drivers import AmazonBedrockImageGenerationDriver, \ - BedrockStableDiffusionImageGenerationModelDriver -from griptape.loaders import ImageLoader - -# Create a driver configured to use Stable Diffusion via Bedrock. -driver = AmazonBedrockImageGenerationDriver( - image_generation_model_driver=BedrockStableDiffusionImageGenerationModelDriver(), - model="stability.stable-diffusion-xl-v1", -) - -# Create an engine configured to use the driver. -engine = OutpaintingImageGenerationEngine( - image_generation_driver=driver, -) - -with open("tests/resources/mountain.png", "rb") as f: - image_artifact = ImageLoader().load(f.read()) - -with open("tests/resources/mountain-mask.png", "rb") as f: - mask_artifact = ImageLoader().load(f.read()) - -engine.run( - prompts=["A photo of a mountain shrouded in clouds"], - image=image_artifact, - mask=mask_artifact, -) +--8<-- "docs/griptape-framework/engines/src/image_generation_engines_5.py" ``` diff --git a/docs/griptape-framework/engines/image-query-engines.md b/docs/griptape-framework/engines/image-query-engines.md index 1db247cb7..3290a20f1 100644 --- a/docs/griptape-framework/engines/image-query-engines.md +++ b/docs/griptape-framework/engines/image-query-engines.md @@ -10,21 +10,5 @@ The [Image Query Engine](../../reference/griptape/engines/image_query/image_quer All Image Query Drivers default to a `max_tokens` of 256. You can tune this value based on your use case and the [Image Query Driver](../drivers/image-query-drivers.md) you are providing. ```python -from griptape.drivers import OpenAiImageQueryDriver -from griptape.engines import ImageQueryEngine -from griptape.loaders import ImageLoader - -driver = OpenAiImageQueryDriver( - model="gpt-4o", - max_tokens=256 -) - -engine = ImageQueryEngine( - image_query_driver=driver -) - -with open("tests/resources/mountain.png", "rb") as f: - image_artifact = ImageLoader().load(f.read()) - -engine.run("Describe the weather in the image", [image_artifact]) +--8<-- "docs/griptape-framework/engines/src/image_query_engines_1.py" ``` diff --git a/docs/griptape-framework/engines/rag-engines.md b/docs/griptape-framework/engines/rag-engines.md index dc09d6de9..ed71b69c0 100644 --- a/docs/griptape-framework/engines/rag-engines.md +++ b/docs/griptape-framework/engines/rag-engines.md @@ -8,70 +8,45 @@ search: !!! note This section is a work in progress. -`RagEngine` is an abstraction for implementing modular RAG pipelines. - -`RagContext` is a container object for passing around RAG context. +[Rag Engine](../../reference/griptape/engines/rag/index.md) is an abstraction for implementing modular retrieval augmented generation (RAG) pipelines. ### RAG Stages -- `QueryRagStage` is for parsing and expanding queries. -- `RetrievalRagStage` is for retrieving content. -- `ResponseRagStage` is for augmenting and generating outputs. + +`RagEngine`s consist of three _stages_: `QueryRagStage`, `RetrievalRagStage`, and `ResponseRagStage`. These stages are always executed sequentially. Each stage comprises multiple _modules_, which are executed in a customized manner. Due to this unique structure, `RagEngines` are not intended to replace [Workflows](../structures/workflows.md) or [Pipelines](../structures/pipelines.md). + + +- `QueryRagStage` is used for modifying user queries. +- `RetrievalRagStage` is used for retrieving and re-ranking text chunks. +- `ResponseRagStage` is used for generating responses. ### RAG Modules -#### Query +RAG modules are used to implement concrete actions in the RAG pipeline. `RagEngine` enables developers to easily add new modules to experiment with novel RAG strategies. -No modules implemented yet. +#### Query Modules -#### Retrieval +- `TranslateQueryRagModule` is for translating the query into another language. + +#### Retrieval Modules - `TextRetrievalRagModule` is for retrieving text chunks. - `TextLoaderRetrievalRagModule` is for retrieving data with text loaders in real time. - `TextChunksRerankRagModule` is for re-ranking retrieved results. -#### Response +#### Response Modules - `MetadataBeforeResponseRagModule` is for appending metadata. - `RulesetsBeforeResponseRagModule` is for appending rulesets. - `PromptResponseRagModule` is for generating responses based on retrieved text chunks. - `TextChunksResponseRagModule` is for responding with retrieved text chunks. - `FootnotePromptResponseRagModule` is for responding with automatic footnotes from text chunk references. +### RAG Context + +`RagContext` is a container object for passing around queries, text chunks, module configs, and other metadata. `RagContext` is modified by modules when appropriate. Some modules support runtime config overrides through `RagContext.module_configs`. + ### Example +The following example shows a simple RAG pipeline that translates incoming queries into English, retrieves data from a local vector store, and generates a response: + ```python -from griptape.artifacts import TextArtifact -from griptape.drivers import LocalVectorStoreDriver, OpenAiEmbeddingDriver, OpenAiChatPromptDriver -from griptape.engines.rag import RagEngine -from griptape.engines.rag.modules import VectorStoreRetrievalRagModule, PromptResponseRagModule -from griptape.engines.rag.stages import RetrievalRagStage, ResponseRagStage - -vector_store = LocalVectorStoreDriver(embedding_driver=OpenAiEmbeddingDriver()) - -artifacts = [ - TextArtifact("Griptape builds AI-powered applications that connect securely to your enterprise data and APIs."), - TextArtifact("Griptape Agents provide incredible power and flexibility when working with large language models.") -] -vector_store.upsert_text_artifacts({"griptape": artifacts}) - -engine = RagEngine( - retrieval_stage=RetrievalRagStage( - retrieval_modules=[ - VectorStoreRetrievalRagModule( - vector_store_driver=vector_store, - query_params={ - "namespace": "griptape", - "top_n": 20 - } - ) - ] - ), - response_stage=ResponseRagStage( - response_module=PromptResponseRagModule( - prompt_driver=OpenAiChatPromptDriver(model="gpt-4o") - ) - ) -) - -print( - engine.process_query("what are Griptape agents?").output.to_text() -) +--8<-- "docs/griptape-framework/engines/src/rag_engines_1.py" ``` diff --git a/griptape/tools/aws_iam_client/__init__.py b/docs/griptape-framework/engines/src/__init__.py similarity index 100% rename from griptape/tools/aws_iam_client/__init__.py rename to docs/griptape-framework/engines/src/__init__.py diff --git a/docs/griptape-framework/engines/src/audio_engines_1.py b/docs/griptape-framework/engines/src/audio_engines_1.py new file mode 100644 index 000000000..527300c09 --- /dev/null +++ b/docs/griptape-framework/engines/src/audio_engines_1.py @@ -0,0 +1,18 @@ +import os + +from griptape.drivers import ElevenLabsTextToSpeechDriver +from griptape.engines import TextToSpeechEngine + +driver = ElevenLabsTextToSpeechDriver( + api_key=os.environ["ELEVEN_LABS_API_KEY"], + model="eleven_multilingual_v2", + voice="Laura", +) + +engine = TextToSpeechEngine( + text_to_speech_driver=driver, +) + +engine.run( + prompts=["Hello, world!"], +) diff --git a/docs/griptape-framework/engines/src/audio_engines_2.py b/docs/griptape-framework/engines/src/audio_engines_2.py new file mode 100644 index 000000000..c04b466f8 --- /dev/null +++ b/docs/griptape-framework/engines/src/audio_engines_2.py @@ -0,0 +1,13 @@ +from griptape.drivers import OpenAiAudioTranscriptionDriver +from griptape.engines import AudioTranscriptionEngine +from griptape.loaders import AudioLoader +from griptape.utils import load_file + +driver = OpenAiAudioTranscriptionDriver(model="whisper-1") + +engine = AudioTranscriptionEngine( + audio_transcription_driver=driver, +) + +audio_artifact = AudioLoader().load(load_file("tests/resources/sentences.wav")) +engine.run(audio_artifact) diff --git a/docs/griptape-framework/engines/src/extraction_engines_1.py b/docs/griptape-framework/engines/src/extraction_engines_1.py new file mode 100644 index 000000000..c681980f2 --- /dev/null +++ b/docs/griptape-framework/engines/src/extraction_engines_1.py @@ -0,0 +1,24 @@ +from griptape.artifacts import ListArtifact +from griptape.drivers import OpenAiChatPromptDriver +from griptape.engines import CsvExtractionEngine + +# Initialize the CsvExtractionEngine instance +csv_engine = CsvExtractionEngine( + prompt_driver=OpenAiChatPromptDriver(model="gpt-3.5-turbo"), +) + +# Define some unstructured data +sample_text = """ +Alice, 28, lives in New York. +Bob, 35 lives in California. +Charlie is 40 and lives in Texas. +""" + +# Extract CSV rows using the engine +result = csv_engine.extract(sample_text, column_names=["name", "age", "location"]) + +if isinstance(result, ListArtifact): + for row in result.value: + print(row.to_text()) +else: + print(result.to_text()) diff --git a/docs/griptape-framework/engines/src/extraction_engines_2.py b/docs/griptape-framework/engines/src/extraction_engines_2.py new file mode 100644 index 000000000..d47bb48e5 --- /dev/null +++ b/docs/griptape-framework/engines/src/extraction_engines_2.py @@ -0,0 +1,27 @@ +from schema import Schema + +from griptape.artifacts.list_artifact import ListArtifact +from griptape.drivers import OpenAiChatPromptDriver +from griptape.engines import JsonExtractionEngine + +json_engine = JsonExtractionEngine( + prompt_driver=OpenAiChatPromptDriver(model="gpt-3.5-turbo"), +) + +# Define some unstructured data +sample_json_text = """ +Alice (Age 28) lives in New York. +Bob (Age 35) lives in California. +""" + +# Define a schema for extraction +user_schema = Schema({"users": [{"name": str, "age": int, "location": str}]}).json_schema("UserSchema") + +# Extract data using the engine +result = json_engine.extract(sample_json_text, template_schema=user_schema) + +if isinstance(result, ListArtifact): + for artifact in result.value: + print(artifact.value) +else: + print(result.to_text()) diff --git a/docs/griptape-framework/engines/src/image_generation_engines_1.py b/docs/griptape-framework/engines/src/image_generation_engines_1.py new file mode 100644 index 000000000..5bc3d5fb5 --- /dev/null +++ b/docs/griptape-framework/engines/src/image_generation_engines_1.py @@ -0,0 +1,23 @@ +from griptape.drivers import AmazonBedrockImageGenerationDriver, BedrockStableDiffusionImageGenerationModelDriver +from griptape.engines import PromptImageGenerationEngine +from griptape.rules import Rule, Ruleset + +# Create a driver configured to use Stable Diffusion via Bedrock. +driver = AmazonBedrockImageGenerationDriver( + image_generation_model_driver=BedrockStableDiffusionImageGenerationModelDriver(), + model="stability.stable-diffusion-xl-v1", +) + +# Create an engine configured to use the driver. +engine = PromptImageGenerationEngine( + image_generation_driver=driver, +) + +positive_ruleset = Ruleset(name="positive rules", rules=[Rule("artistic"), Rule("watercolor")]) +negative_ruleset = Ruleset(name="negative rules", rules=[Rule("blurry"), Rule("photograph")]) + +engine.run( + prompts=["A dog riding a skateboard"], + rulesets=[positive_ruleset], + negative_rulesets=[negative_ruleset], +) diff --git a/docs/griptape-framework/engines/src/image_generation_engines_2.py b/docs/griptape-framework/engines/src/image_generation_engines_2.py new file mode 100644 index 000000000..7a7daf6dc --- /dev/null +++ b/docs/griptape-framework/engines/src/image_generation_engines_2.py @@ -0,0 +1,17 @@ +from griptape.drivers import AmazonBedrockImageGenerationDriver, BedrockStableDiffusionImageGenerationModelDriver +from griptape.engines import PromptImageGenerationEngine + +# Create a driver configured to use Stable Diffusion via Bedrock. +driver = AmazonBedrockImageGenerationDriver( + image_generation_model_driver=BedrockStableDiffusionImageGenerationModelDriver(), + model="stability.stable-diffusion-xl-v1", +) + +# Create an engine configured to use the driver. +engine = PromptImageGenerationEngine( + image_generation_driver=driver, +) + +engine.run( + prompts=["A watercolor painting of a dog riding a skateboard"], +) diff --git a/docs/griptape-framework/engines/src/image_generation_engines_3.py b/docs/griptape-framework/engines/src/image_generation_engines_3.py new file mode 100644 index 000000000..83822b1bc --- /dev/null +++ b/docs/griptape-framework/engines/src/image_generation_engines_3.py @@ -0,0 +1,23 @@ +from pathlib import Path + +from griptape.drivers import AmazonBedrockImageGenerationDriver, BedrockStableDiffusionImageGenerationModelDriver +from griptape.engines import VariationImageGenerationEngine +from griptape.loaders import ImageLoader + +# Create a driver configured to use Stable Diffusion via Bedrock. +driver = AmazonBedrockImageGenerationDriver( + image_generation_model_driver=BedrockStableDiffusionImageGenerationModelDriver(), + model="stability.stable-diffusion-xl-v1", +) + +# Create an engine configured to use the driver. +engine = VariationImageGenerationEngine( + image_generation_driver=driver, +) + +image_artifact = ImageLoader().load(Path("tests/resources/mountain.png").read_bytes()) + +engine.run( + prompts=["A photo of a mountain landscape in winter"], + image=image_artifact, +) diff --git a/docs/griptape-framework/engines/src/image_generation_engines_4.py b/docs/griptape-framework/engines/src/image_generation_engines_4.py new file mode 100644 index 000000000..c258e1cce --- /dev/null +++ b/docs/griptape-framework/engines/src/image_generation_engines_4.py @@ -0,0 +1,26 @@ +from pathlib import Path + +from griptape.drivers import AmazonBedrockImageGenerationDriver, BedrockStableDiffusionImageGenerationModelDriver +from griptape.engines import InpaintingImageGenerationEngine +from griptape.loaders import ImageLoader + +# Create a driver configured to use Stable Diffusion via Bedrock. +driver = AmazonBedrockImageGenerationDriver( + image_generation_model_driver=BedrockStableDiffusionImageGenerationModelDriver(), + model="stability.stable-diffusion-xl-v1", +) + +# Create an engine configured to use the driver. +engine = InpaintingImageGenerationEngine( + image_generation_driver=driver, +) + +image_artifact = ImageLoader().load(Path("tests/resources/mountain.png").read_bytes()) + +mask_artifact = ImageLoader().load(Path("tests/resources/mountain-mask.png").read_bytes()) + +engine.run( + prompts=["A photo of a castle built into the side of a mountain"], + image=image_artifact, + mask=mask_artifact, +) diff --git a/docs/griptape-framework/engines/src/image_generation_engines_5.py b/docs/griptape-framework/engines/src/image_generation_engines_5.py new file mode 100644 index 000000000..f91a48ec0 --- /dev/null +++ b/docs/griptape-framework/engines/src/image_generation_engines_5.py @@ -0,0 +1,26 @@ +from pathlib import Path + +from griptape.drivers import AmazonBedrockImageGenerationDriver, BedrockStableDiffusionImageGenerationModelDriver +from griptape.engines import OutpaintingImageGenerationEngine +from griptape.loaders import ImageLoader + +# Create a driver configured to use Stable Diffusion via Bedrock. +driver = AmazonBedrockImageGenerationDriver( + image_generation_model_driver=BedrockStableDiffusionImageGenerationModelDriver(), + model="stability.stable-diffusion-xl-v1", +) + +# Create an engine configured to use the driver. +engine = OutpaintingImageGenerationEngine( + image_generation_driver=driver, +) + +image_artifact = ImageLoader().load(Path("tests/resources/mountain.png").read_bytes()) + +mask_artifact = ImageLoader().load(Path("tests/resources/mountain-mask.png").read_bytes()) + +engine.run( + prompts=["A photo of a mountain shrouded in clouds"], + image=image_artifact, + mask=mask_artifact, +) diff --git a/docs/griptape-framework/engines/src/image_query_engines_1.py b/docs/griptape-framework/engines/src/image_query_engines_1.py new file mode 100644 index 000000000..b0920392a --- /dev/null +++ b/docs/griptape-framework/engines/src/image_query_engines_1.py @@ -0,0 +1,13 @@ +from pathlib import Path + +from griptape.drivers import OpenAiImageQueryDriver +from griptape.engines import ImageQueryEngine +from griptape.loaders import ImageLoader + +driver = OpenAiImageQueryDriver(model="gpt-4o", max_tokens=256) + +engine = ImageQueryEngine(image_query_driver=driver) + +image_artifact = ImageLoader().load(Path("tests/resources/mountain.png").read_bytes()) + +engine.run("Describe the weather in the image", [image_artifact]) diff --git a/docs/griptape-framework/engines/src/rag_engines_1.py b/docs/griptape-framework/engines/src/rag_engines_1.py new file mode 100644 index 000000000..c257cd4df --- /dev/null +++ b/docs/griptape-framework/engines/src/rag_engines_1.py @@ -0,0 +1,47 @@ +from griptape.artifacts import ErrorArtifact +from griptape.drivers import LocalVectorStoreDriver, OpenAiChatPromptDriver, OpenAiEmbeddingDriver +from griptape.engines.rag import RagContext, RagEngine +from griptape.engines.rag.modules import PromptResponseRagModule, TranslateQueryRagModule, VectorStoreRetrievalRagModule +from griptape.engines.rag.stages import QueryRagStage, ResponseRagStage, RetrievalRagStage +from griptape.loaders import WebLoader +from griptape.rules import Rule, Ruleset + +prompt_driver = OpenAiChatPromptDriver(model="gpt-4o", temperature=0) + +vector_store = LocalVectorStoreDriver(embedding_driver=OpenAiEmbeddingDriver()) +artifacts = WebLoader(max_tokens=500).load("https://www.griptape.ai") + +if isinstance(artifacts, ErrorArtifact): + raise Exception(artifacts.value) + +vector_store.upsert_text_artifacts( + { + "griptape": artifacts, + } +) + +rag_engine = RagEngine( + query_stage=QueryRagStage(query_modules=[TranslateQueryRagModule(prompt_driver=prompt_driver, language="english")]), + retrieval_stage=RetrievalRagStage( + max_chunks=5, + retrieval_modules=[ + VectorStoreRetrievalRagModule( + name="MyAwesomeRetriever", vector_store_driver=vector_store, query_params={"top_n": 20} + ) + ], + ), + response_stage=ResponseRagStage( + response_modules=[ + PromptResponseRagModule( + prompt_driver=prompt_driver, rulesets=[Ruleset(name="persona", rules=[Rule("Talk like a pirate")])] + ) + ] + ), +) + +rag_context = RagContext( + query="¿Qué ofrecen los servicios en la nube de Griptape?", + module_configs={"MyAwesomeRetriever": {"query_params": {"namespace": "griptape"}}}, +) + +print(rag_engine.process(rag_context).outputs[0].to_text()) diff --git a/docs/griptape-framework/engines/src/summary_engines_1.py b/docs/griptape-framework/engines/src/summary_engines_1.py new file mode 100644 index 000000000..092665b37 --- /dev/null +++ b/docs/griptape-framework/engines/src/summary_engines_1.py @@ -0,0 +1,20 @@ +import requests + +from griptape.artifacts.error_artifact import ErrorArtifact +from griptape.drivers import OpenAiChatPromptDriver +from griptape.engines import PromptSummaryEngine +from griptape.loaders import PdfLoader + +response = requests.get("https://arxiv.org/pdf/1706.03762.pdf") +engine = PromptSummaryEngine( + prompt_driver=OpenAiChatPromptDriver(model="gpt-3.5-turbo"), +) + +artifacts = PdfLoader().load(response.content) + +if isinstance(artifacts, ErrorArtifact): + raise Exception(artifacts.value) + +text = "\n\n".join([a.value for a in artifacts]) + +engine.summarize_text(text) diff --git a/docs/griptape-framework/engines/summary-engines.md b/docs/griptape-framework/engines/summary-engines.md index d699a2283..90c72e4dd 100644 --- a/docs/griptape-framework/engines/summary-engines.md +++ b/docs/griptape-framework/engines/summary-engines.md @@ -14,21 +14,5 @@ Used to summarize texts with LLMs. You can set a custom [prompt_driver](../../re Use the [summarize_artifacts](../../reference/griptape/engines/summary/prompt_summary_engine.md#griptape.engines.summary.prompt_summary_engine.PromptSummaryEngine.summarize_artifacts) method to summarize a list of artifacts or [summarize_text](../../reference/griptape/engines/summary/base_summary_engine.md#griptape.engines.summary.base_summary_engine.BaseSummaryEngine.summarize_text) to summarize an arbitrary string. ```python -import io -import requests - -from griptape.drivers import OpenAiChatPromptDriver -from griptape.engines import PromptSummaryEngine -from griptape.loaders import PdfLoader - -response = requests.get("https://arxiv.org/pdf/1706.03762.pdf") -engine = PromptSummaryEngine( - prompt_driver=OpenAiChatPromptDriver(model="gpt-3.5-turbo"), -) - -artifacts = PdfLoader().load(response.content) - -text = "\n\n".join([a.value for a in artifacts]) - -engine.summarize_text(text) +--8<-- "docs/griptape-framework/engines/src/summary_engines_1.py" ``` diff --git a/docs/griptape-framework/index.md b/docs/griptape-framework/index.md index b5613b184..3dd294f0b 100644 --- a/docs/griptape-framework/index.md +++ b/docs/griptape-framework/index.md @@ -65,11 +65,7 @@ For a comprehensive list of extras, please refer to the `[tool.poetry.extras]` s With Griptape, you can create *structures*, such as [Agents](./structures/agents.md), [Pipelines](./structures/pipelines.md), and [Workflows](./structures/workflows.md), that are composed of different types of tasks. First, let's build a simple Agent that we can interact with through a chat based interface. ```python -from griptape.structures import Agent -from griptape.utils import Chat - -agent = Agent() -Chat(agent).start() +--8<-- "docs/griptape-framework/src/index_1.py" ``` Run this script in your IDE and you'll be presented with a `Q:` prompt where you can interact with your model. ``` @@ -89,29 +85,13 @@ Q: If you want to skip the chat interface and load an initial prompt, you can do so using the `.run()` method: ```python -from griptape.structures import Agent -from griptape.utils import Chat - -agent = Agent() -agent.run("write me a haiku about griptape") +--8<-- "docs/griptape-framework/src/index_2.py" ``` Agents on their own are fun, but let's add some capabilities to them using Griptape Tools. ### Build a Simple Agent with Tools ```python -from griptape.structures import Agent -from griptape.tools import Calculator - -calculator = Calculator() - -agent = Agent( - tools=[calculator] -) - -agent.run( - "what is 7^12" -) -print("Answer:", agent.output) +--8<-- "docs/griptape-framework/src/index_3.py" ``` Here is the chain of thought from the Agent. Notice where it realizes it can use the tool you just injected to do the calculation.[^1] [^1]: In some cases a model might be capable of basic arithmetic. For example, gpt-3.5 returns the correct numeric answer but in an odd format. @@ -123,7 +103,7 @@ Here is the chain of thought from the Agent. Notice where it realizes it can use Actions: [ { "tag": "call_RTRm7JLFV0F73dCVPmoWVJqO", - "name": "Calculator", + "name": "CalculatorTool", "path": "calculate", "input": { "values": { @@ -144,74 +124,79 @@ Answer: 13,841,287,201 Agents are great for getting started, but they are intentionally limited to a single task. Pipelines, however, allow us to define any number of tasks to run in sequence. Let's define a simple two-task Pipeline that uses tools and memory: ```python -from griptape.memory.structure import ConversationMemory -from griptape.structures import Pipeline -from griptape.tasks import ToolkitTask, PromptTask -from griptape.tools import WebScraper, FileManager, TaskMemoryClient - - -# Pipelines represent sequences of tasks. -pipeline = Pipeline( - conversation_memory=ConversationMemory() -) - -pipeline.add_tasks( - # Load up the first argument from `pipeline.run`. - ToolkitTask( - "{{ args[0] }}", - # Add tools for web scraping, and file management - tools=[WebScraper(off_prompt=True), FileManager(off_prompt=True), TaskMemoryClient(off_prompt=False)] - ), - # Augment `input` from the previous task. - PromptTask( - "Say the following in spanish: {{ parent_output }}" - ) -) - -pipeline.run( - "Load https://www.griptape.ai, summarize it, and store it in griptape.txt" -) +--8<-- "docs/griptape-framework/src/index_4.py" ``` ``` -[09/08/23 10:02:34] INFO ToolkitTask 3c1d2f4a49384873820a9a8cd8acc983 +[08/12/24 14:50:28] INFO ToolkitTask 19dcf6020968468a91aa8a93c2a3f645 Input: Load https://www.griptape.ai, summarize it, and store it in griptape.txt -[09/08/23 10:02:44] INFO Subtask 42fd56ba100e45688401c5ce32b79a33 - Thought: To complete this task, I need to first load the webpage using the WebScraper tool's get_content - activity. Then, I will summarize the content using the TaskMemory tool's summarize activity. Finally, I will - store the summarized content in a file named griptape.txt using the FileManager tool's save_file_to_disk - activity. - - Action: {"name": "WebScraper", "path": "get_content", "input": {"values": {"url": - "https://www.griptape.ai"}}} -[09/08/23 10:02:45] INFO Subtask 42fd56ba100e45688401c5ce32b79a33 - Response: Output of "WebScraper.get_content" was stored in memory with memory_name "TaskMemory" and - artifact_namespace "39ca67bbe26b4e1584193b87ed82170d" -[09/08/23 10:02:53] INFO Subtask 8023e3d257274df29065b22e736faca8 - Thought: Now that the webpage content is stored in memory, I can use the TaskMemory tool's summarize activity - to summarize the content. - Action: {"name": "TaskMemoryClient", "path": "summarize", "input": {"values": {"memory_name": "TaskMemory", "artifact_namespace": "39ca67bbe26b4e1584193b87ed82170d"}}} -[09/08/23 10:02:57] INFO Subtask 8023e3d257274df29065b22e736faca8 - Response: Griptape is an open source framework that allows developers to build and deploy AI applications - using large language models (LLMs). It provides the ability to create conversational and event-driven apps that - can securely access and manipulate data. The framework enforces structures for predictability and creativity, - allowing developers to easily transition between the two. Griptape Cloud is a managed platform for deploying and - managing AI apps. -[09/08/23 10:03:06] INFO Subtask 7baae700239943c18b5b6b21873f0e13 - Thought: Now that I have the summarized content, I can store it in a file named griptape.txt using the - FileManager tool's save_file_to_disk activity. - Action: {"name": "FileManager", "path": "save_file_to_disk", "input": {"values": - {"memory_name": "TaskMemory", "artifact_namespace": "39ca67bbe26b4e1584193b87ed82170d", "path": - "griptape.txt"}}} - INFO Subtask 7baae700239943c18b5b6b21873f0e13 - Response: saved successfully -[09/08/23 10:03:14] INFO ToolkitTask 3c1d2f4a49384873820a9a8cd8acc983 - Output: The summarized content of the webpage https://www.griptape.ai has been successfully stored in the file - named griptape.txt. - INFO PromptTask 8635925ff23b46f28a740105bd11ca8f - Input: Say the following in spanish: The summarized content of the webpage https://www.griptape.ai has been - successfully stored in the file named griptape.txt. -[09/08/23 10:03:18] INFO PromptTask 8635925ff23b46f28a740105bd11ca8f - Output: El contenido resumido de la página web https://www.griptape.ai se ha almacenado con éxito en el archivo - llamado griptape.txt. +[08/12/24 14:50:30] INFO Subtask a685799379c5421b91768353fc219939 + Actions: [ + { + "tag": "call_YL5Ozd9WUtag4ykR5Agm12Ce", + "name": "WebScraperTool", + "path": "get_content", + "input": { + "values": { + "url": "https://www.griptape.ai" + } + } + } + ] +[08/12/24 14:50:31] INFO Subtask a685799379c5421b91768353fc219939 + Response: Output of "WebScraperTool.get_content" was stored in memory with memory_name "TaskMemory" and artifact_namespace + "6be3a2e0494841fda966b98bec9ffccb" +[08/12/24 14:50:33] INFO Subtask 1cf0c19843aa4fada5745c4a82eb4237 + Actions: [ + { + "tag": "call_ElTYTPeocOU62I0VjzRqmfoF", + "name": "PromptSummaryTool", + "path": "summarize", + "input": { + "values": { + "summary": { + "memory_name": "TaskMemory", + "artifact_namespace": "6be3a2e0494841fda966b98bec9ffccb" + } + } + } + } + ] +[08/12/24 14:50:35] INFO Subtask 1cf0c19843aa4fada5745c4a82eb4237 + Response: Griptape offers a comprehensive solution for building, deploying, and scaling AI applications in the cloud. It provides developers + with a framework and cloud services to create retrieval-driven AI-powered applications. The Griptape Framework allows developers to build + business logic using Python, ensuring better security, performance, and cost-efficiency. It simplifies the creation of Gen AI Agents, Systems of + Agents, Pipelines, Workflows, and RAG implementations without needing extensive knowledge of Gen AI or Prompt Engineering. + + Griptape Cloud handles infrastructure management, offering services like ETL pipelines for data preparation, Retrieval as a Service (RAG) for + generating answers and summaries, and a Structure Runtime (RUN) for building AI agents and workflows. This enables seamless scaling and + integration with client applications, catering to custom projects, turnkey SaaS offerings, and finished apps. +[08/12/24 14:50:38] INFO Subtask aaaeca1a089844d4915d065deb3c00cf + Actions: [ + { + "tag": "call_eKvIUIw45aRYKDBpT1gGKc9b", + "name": "FileManagerTool", + "path": "save_content_to_file", + "input": { + "values": { + "path": "griptape.txt", + "content": "Griptape offers a comprehensive solution for building, deploying, and scaling AI applications in the cloud. It provides + developers with a framework and cloud services to create retrieval-driven AI-powered applications. The Griptape Framework allows developers to + build business logic using Python, ensuring better security, performance, and cost-efficiency. It simplifies the creation of Gen AI Agents, + Systems of Agents, Pipelines, Workflows, and RAG implementations without needing extensive knowledge of Gen AI or Prompt + Engineering.\n\nGriptape Cloud handles infrastructure management, offering services like ETL pipelines for data preparation, Retrieval as a + Service (RAG) for generating answers and summaries, and a Structure Runtime (RUN) for building AI agents and workflows. This enables seamless + scaling and integration with client applications, catering to custom projects, turnkey SaaS offerings, and finished apps." + } + } + } + ] + INFO Subtask aaaeca1a089844d4915d065deb3c00cf + Response: Successfully saved file +[08/12/24 14:50:39] INFO ToolkitTask 19dcf6020968468a91aa8a93c2a3f645 + Output: The content from https://www.griptape.ai has been summarized and stored in griptape.txt. + INFO PromptTask dbbb38f144f445db896dc12854f17ad3 + Input: Say the following in spanish: The content from https://www.griptape.ai has been summarized and stored in griptape.txt. +[08/12/24 14:50:42] INFO PromptTask dbbb38f144f445db896dc12854f17ad3 + Output: El contenido de https://www.griptape.ai ha sido resumido y almacenado en griptape.txt. ``` diff --git a/docs/griptape-framework/misc/events.md b/docs/griptape-framework/misc/events.md index 1f50fd6d0..3c4181aee 100644 --- a/docs/griptape-framework/misc/events.md +++ b/docs/griptape-framework/misc/events.md @@ -5,7 +5,7 @@ search: ## Overview -You can use [EventListener](../../reference/griptape/events/event_listener.md)s to listen for events during a Structure's execution. +You can configure the global [EventBus](../../reference/griptape/events/event_bus.md) with [EventListener](../../reference/griptape/events/event_listener.md)s to listen for various framework events. See [Event Listener Drivers](../drivers/event-listener-drivers.md) for examples on forwarding events to external services. ## Specific Event Types @@ -13,40 +13,7 @@ See [Event Listener Drivers](../drivers/event-listener-drivers.md) for examples You can listen to specific event types: ```python -from griptape.structures import Agent -from griptape.events import ( - BaseEvent, - StartTaskEvent, - FinishTaskEvent, - StartActionsSubtaskEvent, - FinishActionsSubtaskEvent, - StartPromptEvent, - FinishPromptEvent, - EventListener, -) - - -def handler(event: BaseEvent): - print(event.__class__) - - -agent = Agent( - event_listeners=[ - EventListener( - handler, - event_types=[ - StartTaskEvent, - FinishTaskEvent, - StartActionsSubtaskEvent, - FinishActionsSubtaskEvent, - StartPromptEvent, - FinishPromptEvent, - ], - ) - ] -) - -agent.run("tell me about griptape") +--8<-- "docs/griptape-framework/misc/src/events_1.py" ``` ``` @@ -68,26 +35,7 @@ agent.run("tell me about griptape") Or listen to all events: ```python -from griptape.structures import Agent -from griptape.events import BaseEvent, EventListener - - -def handler1(event: BaseEvent): - print("Handler 1", event.__class__) - - -def handler2(event: BaseEvent): - print("Handler 2", event.__class__) - - -agent = Agent( - event_listeners=[ - EventListener(handler1), - EventListener(handler2), - ] -) - -agent.run("tell me about griptape") +--8<-- "docs/griptape-framework/misc/src/events_2.py" ``` ``` @@ -131,52 +79,14 @@ Handler 2 None: + print(event.__class__) + + +EventBus.add_event_listeners( + [ + EventListener( + handler, + event_types=[ + StartTaskEvent, + FinishTaskEvent, + StartActionsSubtaskEvent, + FinishActionsSubtaskEvent, + StartPromptEvent, + FinishPromptEvent, + ], + ) + ] +) + +agent = Agent() + +agent.run("tell me about griptape") diff --git a/docs/griptape-framework/misc/src/events_2.py b/docs/griptape-framework/misc/src/events_2.py new file mode 100644 index 000000000..7c3a967fe --- /dev/null +++ b/docs/griptape-framework/misc/src/events_2.py @@ -0,0 +1,22 @@ +from griptape.events import BaseEvent, EventBus, EventListener +from griptape.structures import Agent + + +def handler1(event: BaseEvent) -> None: + print("Handler 1", event.__class__) + + +def handler2(event: BaseEvent) -> None: + print("Handler 2", event.__class__) + + +EventBus.add_event_listeners( + [ + EventListener(handler1), + EventListener(handler2), + ] +) + +agent = Agent() + +agent.run("tell me about griptape") diff --git a/docs/griptape-framework/misc/src/events_3.py b/docs/griptape-framework/misc/src/events_3.py new file mode 100644 index 000000000..7adac812f --- /dev/null +++ b/docs/griptape-framework/misc/src/events_3.py @@ -0,0 +1,27 @@ +from typing import cast + +from griptape.drivers import OpenAiChatPromptDriver +from griptape.events import CompletionChunkEvent, EventBus, EventListener +from griptape.structures import Pipeline +from griptape.tasks import ToolkitTask +from griptape.tools import PromptSummaryTool, WebScraperTool + +EventBus.add_event_listeners( + [ + EventListener( + lambda e: print(cast(CompletionChunkEvent, e).token, end="", flush=True), + event_types=[CompletionChunkEvent], + ) + ] +) + +pipeline = Pipeline() +pipeline.add_tasks( + ToolkitTask( + "Based on https://griptape.ai, tell me what griptape is.", + prompt_driver=OpenAiChatPromptDriver(model="gpt-4o", stream=True), + tools=[WebScraperTool(off_prompt=True), PromptSummaryTool(off_prompt=False)], + ) +) + +pipeline.run() diff --git a/docs/griptape-framework/misc/src/events_4.py b/docs/griptape-framework/misc/src/events_4.py new file mode 100644 index 000000000..eba11b07a --- /dev/null +++ b/docs/griptape-framework/misc/src/events_4.py @@ -0,0 +1,22 @@ +import logging + +from griptape.configs import Defaults +from griptape.structures import Agent +from griptape.tools import PromptSummaryTool, WebScraperTool +from griptape.utils import Stream + +# Hide Griptape's usual output +logging.getLogger(Defaults.logging_config.logger_name).setLevel(logging.ERROR) + +agent = Agent( + input="Based on https://griptape.ai, tell me what griptape is.", + tools=[ + PromptSummaryTool(off_prompt=True), + WebScraperTool(off_prompt=False), + ], + stream=True, +) + + +for artifact in Stream(agent).run(): + print(artifact.value, end="", flush=True) diff --git a/docs/griptape-framework/misc/src/events_5.py b/docs/griptape-framework/misc/src/events_5.py new file mode 100644 index 000000000..65bdcec4d --- /dev/null +++ b/docs/griptape-framework/misc/src/events_5.py @@ -0,0 +1,27 @@ +from griptape import utils +from griptape.events import BaseEvent, EventBus, EventListener, FinishPromptEvent +from griptape.structures import Agent + +token_counter = utils.TokenCounter() + + +def count_tokens(e: BaseEvent) -> None: + if isinstance(e, FinishPromptEvent) and e.output_token_count is not None: + token_counter.add_tokens(e.output_token_count) + + +EventBus.add_event_listeners( + [ + EventListener( + count_tokens, + event_types=[FinishPromptEvent], + ) + ] +) + + +agent = Agent() + +agent.run("tell me about large language models") + +print(f"total tokens: {token_counter.tokens}") diff --git a/docs/griptape-framework/misc/src/events_6.py b/docs/griptape-framework/misc/src/events_6.py new file mode 100644 index 000000000..25934442a --- /dev/null +++ b/docs/griptape-framework/misc/src/events_6.py @@ -0,0 +1,16 @@ +from griptape.events import BaseEvent, EventBus, EventListener, StartPromptEvent +from griptape.structures import Agent + +EventBus.add_event_listeners([EventListener(handler=lambda e: print(e), event_types=[StartPromptEvent])]) + + +def handler(event: BaseEvent) -> None: + if isinstance(event, StartPromptEvent): + print("Prompt Stack Messages:") + for message in event.prompt_stack.messages: + print(f"{message.role}: {message.content}") + + +agent = Agent() + +agent.run("Write me a poem.") diff --git a/docs/griptape-framework/misc/src/tokenizers_1.py b/docs/griptape-framework/misc/src/tokenizers_1.py new file mode 100644 index 000000000..c3351c4b9 --- /dev/null +++ b/docs/griptape-framework/misc/src/tokenizers_1.py @@ -0,0 +1,7 @@ +from griptape.tokenizers import OpenAiTokenizer + +tokenizer = OpenAiTokenizer(model="gpt-4o") + +print(tokenizer.count_tokens("Hello world!")) +print(tokenizer.count_input_tokens_left("Hello world!")) +print(tokenizer.count_output_tokens_left("Hello world!")) diff --git a/docs/griptape-framework/misc/src/tokenizers_2.py b/docs/griptape-framework/misc/src/tokenizers_2.py new file mode 100644 index 000000000..037548b1b --- /dev/null +++ b/docs/griptape-framework/misc/src/tokenizers_2.py @@ -0,0 +1,11 @@ +import os + +from cohere import Client + +from griptape.tokenizers import CohereTokenizer + +tokenizer = CohereTokenizer(model="command", client=Client(os.environ["COHERE_API_KEY"])) + +print(tokenizer.count_tokens("Hello world!")) +print(tokenizer.count_input_tokens_left("Hello world!")) +print(tokenizer.count_output_tokens_left("Hello world!")) diff --git a/docs/griptape-framework/misc/src/tokenizers_3.py b/docs/griptape-framework/misc/src/tokenizers_3.py new file mode 100644 index 000000000..636d5c08f --- /dev/null +++ b/docs/griptape-framework/misc/src/tokenizers_3.py @@ -0,0 +1,7 @@ +from griptape.tokenizers import AnthropicTokenizer + +tokenizer = AnthropicTokenizer(model="claude-3-opus-20240229") + +print(tokenizer.count_tokens("Hello world!")) +print(tokenizer.count_input_tokens_left("Hello world!")) +print(tokenizer.count_output_tokens_left("Hello world!")) diff --git a/docs/griptape-framework/misc/src/tokenizers_4.py b/docs/griptape-framework/misc/src/tokenizers_4.py new file mode 100644 index 000000000..04347fe93 --- /dev/null +++ b/docs/griptape-framework/misc/src/tokenizers_4.py @@ -0,0 +1,9 @@ +import os + +from griptape.tokenizers import GoogleTokenizer + +tokenizer = GoogleTokenizer(model="gemini-pro", api_key=os.environ["GOOGLE_API_KEY"]) + +print(tokenizer.count_tokens("Hello world!")) +print(tokenizer.count_input_tokens_left("Hello world!")) +print(tokenizer.count_output_tokens_left("Hello world!")) diff --git a/docs/griptape-framework/misc/src/tokenizers_5.py b/docs/griptape-framework/misc/src/tokenizers_5.py new file mode 100644 index 000000000..3a10e8dc5 --- /dev/null +++ b/docs/griptape-framework/misc/src/tokenizers_5.py @@ -0,0 +1,10 @@ +from griptape.tokenizers import HuggingFaceTokenizer + +tokenizer = HuggingFaceTokenizer( + model="sentence-transformers/all-MiniLM-L6-v2", + max_output_tokens=512, +) + +print(tokenizer.count_tokens("Hello world!")) +print(tokenizer.count_input_tokens_left("Hello world!")) +print(tokenizer.count_output_tokens_left("Hello world!")) diff --git a/docs/griptape-framework/misc/src/tokenizers_6.py b/docs/griptape-framework/misc/src/tokenizers_6.py new file mode 100644 index 000000000..12e9137dc --- /dev/null +++ b/docs/griptape-framework/misc/src/tokenizers_6.py @@ -0,0 +1,7 @@ +from griptape.tokenizers import AmazonBedrockTokenizer + +tokenizer = AmazonBedrockTokenizer(model="amazon.titan-text-express-v1") + +print(tokenizer.count_tokens("Hello world!")) +print(tokenizer.count_input_tokens_left("Hello world!")) +print(tokenizer.count_output_tokens_left("Hello world!")) diff --git a/docs/griptape-framework/misc/src/tokenizers_7.py b/docs/griptape-framework/misc/src/tokenizers_7.py new file mode 100644 index 000000000..faac73c9e --- /dev/null +++ b/docs/griptape-framework/misc/src/tokenizers_7.py @@ -0,0 +1,7 @@ +from griptape.tokenizers import SimpleTokenizer + +tokenizer = SimpleTokenizer(max_input_tokens=1024, max_output_tokens=1024, characters_per_token=6) + +print(tokenizer.count_tokens("Hello world!")) +print(tokenizer.count_input_tokens_left("Hello world!")) +print(tokenizer.count_output_tokens_left("Hello world!")) diff --git a/docs/griptape-framework/misc/tokenizers.md b/docs/griptape-framework/misc/tokenizers.md index 1211987a9..f820d55a9 100644 --- a/docs/griptape-framework/misc/tokenizers.md +++ b/docs/griptape-framework/misc/tokenizers.md @@ -15,95 +15,39 @@ Tokenizers are a low level abstraction that you will rarely interact with direct ### OpenAI ```python -from griptape.tokenizers import OpenAiTokenizer - - -tokenizer = OpenAiTokenizer(model="gpt-4o") - -print(tokenizer.count_tokens("Hello world!")) -print(tokenizer.count_input_tokens_left("Hello world!")) -print(tokenizer.count_output_tokens_left("Hello world!")) +--8<-- "docs/griptape-framework/misc/src/tokenizers_1.py" ``` ### Cohere ```python -import os -from cohere import Client -from griptape.tokenizers import CohereTokenizer - - -tokenizer = CohereTokenizer( - model="command", client=Client(os.environ["COHERE_API_KEY"]) -) - -print(tokenizer.count_tokens("Hello world!")) -print(tokenizer.count_input_tokens_left("Hello world!")) -print(tokenizer.count_output_tokens_left("Hello world!")) +--8<-- "docs/griptape-framework/misc/src/tokenizers_2.py" ``` ### Anthropic ```python -from griptape.tokenizers import AnthropicTokenizer - - -tokenizer = AnthropicTokenizer(model="claude-3-opus-20240229") - -print(tokenizer.count_tokens("Hello world!")) -print(tokenizer.count_input_tokens_left("Hello world!")) -print(tokenizer.count_output_tokens_left("Hello world!")) +--8<-- "docs/griptape-framework/misc/src/tokenizers_3.py" ``` ### Google ```python -import os -from griptape.tokenizers import GoogleTokenizer - -tokenizer = GoogleTokenizer(model="gemini-pro", api_key=os.environ["GOOGLE_API_KEY"]) - -print(tokenizer.count_tokens("Hello world!")) -print(tokenizer.count_input_tokens_left("Hello world!")) -print(tokenizer.count_output_tokens_left("Hello world!")) +--8<-- "docs/griptape-framework/misc/src/tokenizers_4.py" ``` ### Hugging Face ```python -from transformers import AutoTokenizer -from griptape.tokenizers import HuggingFaceTokenizer - - -tokenizer = HuggingFaceTokenizer( - model="sentence-transformers/all-MiniLM-L6-v2", - max_output_tokens=512, -) - -print(tokenizer.count_tokens("Hello world!")) -print(tokenizer.count_input_tokens_left("Hello world!")) -print(tokenizer.count_output_tokens_left("Hello world!")) +--8<-- "docs/griptape-framework/misc/src/tokenizers_5.py" ``` ### Amazon Bedrock ```python -from griptape.tokenizers import AmazonBedrockTokenizer - - -tokenizer = AmazonBedrockTokenizer(model="amazon.titan-text-express-v1") - -print(tokenizer.count_tokens("Hello world!")) -print(tokenizer.count_input_tokens_left("Hello world!")) -print(tokenizer.count_output_tokens_left("Hello world!")) +--8<-- "docs/griptape-framework/misc/src/tokenizers_6.py" ``` ### Simple Not all LLM providers have a public tokenizer API. In this case, you can use the `SimpleTokenizer` to count tokens based on a simple heuristic. ```python -from griptape.tokenizers import SimpleTokenizer - -tokenizer = SimpleTokenizer(max_input_tokens=1024, max_output_tokens=1024, characters_per_token=6) - -print(tokenizer.count_tokens("Hello world!")) -print(tokenizer.count_input_tokens_left("Hello world!")) -print(tokenizer.count_output_tokens_left("Hello world!")) +--8<-- "docs/griptape-framework/misc/src/tokenizers_7.py" ``` diff --git a/griptape/tools/email_client/__init__.py b/docs/griptape-framework/src/__init__.py similarity index 100% rename from griptape/tools/email_client/__init__.py rename to docs/griptape-framework/src/__init__.py diff --git a/docs/griptape-framework/src/index_1.py b/docs/griptape-framework/src/index_1.py new file mode 100644 index 000000000..1255b0b5b --- /dev/null +++ b/docs/griptape-framework/src/index_1.py @@ -0,0 +1,5 @@ +from griptape.structures import Agent +from griptape.utils import Chat + +agent = Agent() +Chat(agent).start() diff --git a/docs/griptape-framework/src/index_2.py b/docs/griptape-framework/src/index_2.py new file mode 100644 index 000000000..70172f598 --- /dev/null +++ b/docs/griptape-framework/src/index_2.py @@ -0,0 +1,4 @@ +from griptape.structures import Agent + +agent = Agent() +agent.run("write me a haiku about griptape") diff --git a/docs/griptape-framework/src/index_3.py b/docs/griptape-framework/src/index_3.py new file mode 100644 index 000000000..ac153b15f --- /dev/null +++ b/docs/griptape-framework/src/index_3.py @@ -0,0 +1,9 @@ +from griptape.structures import Agent +from griptape.tools import CalculatorTool + +calculator = CalculatorTool() + +agent = Agent(tools=[calculator]) + +agent.run("what is 7^12") +print("Answer:", agent.output) diff --git a/docs/griptape-framework/src/index_4.py b/docs/griptape-framework/src/index_4.py new file mode 100644 index 000000000..50465f99e --- /dev/null +++ b/docs/griptape-framework/src/index_4.py @@ -0,0 +1,20 @@ +from griptape.memory.structure import ConversationMemory +from griptape.structures import Pipeline +from griptape.tasks import PromptTask, ToolkitTask +from griptape.tools import FileManagerTool, PromptSummaryTool, WebScraperTool + +# Pipelines represent sequences of tasks. +pipeline = Pipeline(conversation_memory=ConversationMemory()) + +pipeline.add_tasks( + # Load up the first argument from `pipeline.run`. + ToolkitTask( + "{{ args[0] }}", + # Add tools for web scraping, and file management + tools=[WebScraperTool(off_prompt=True), FileManagerTool(off_prompt=True), PromptSummaryTool(off_prompt=False)], + ), + # Augment `input` from the previous task. + PromptTask("Say the following in spanish: {{ parent_output }}"), +) + +pipeline.run("Load https://www.griptape.ai, summarize it, and store it in griptape.txt") diff --git a/docs/griptape-framework/structures/agents.md b/docs/griptape-framework/structures/agents.md index 8f6a6edb0..1b40fad2b 100644 --- a/docs/griptape-framework/structures/agents.md +++ b/docs/griptape-framework/structures/agents.md @@ -12,22 +12,12 @@ directly, which the agent uses to dynamically determine whether to use a [Prompt If [tools](../../reference/griptape/structures/agent.md#griptape.structures.agent.Agent.tools) are passed provided to the Agent, a [Toolkit Task](./tasks.md#toolkit-task) will be used. If no [tools](../../reference/griptape/structures/agent.md#griptape.structures.agent.Agent.tools) are provided, a [Prompt Task](./tasks.md#prompt-task) will be used. -You can access the final output of the Agent by using the [output](../../reference/griptape/structures/agent.md#griptape.structures.structure.Structure.output) attribute. +You can access the final output of the Agent by using the [output](../../reference/griptape/structures/structure.md#griptape.structures.structure.Structure.output) attribute. ## Toolkit Task Agent ```python -from griptape.tools import Calculator -from griptape.structures import Agent - - -agent = Agent( - input="Calculate the following: {{ args[0] }}", - tools=[Calculator()] -) - -agent.run("what's 13^7?") -print("Answer:", agent.output) +--8<-- "docs/griptape-framework/structures/src/agents_1.py" ``` ``` @@ -37,7 +27,7 @@ print("Answer:", agent.output) Actions: [ { "tag": "call_ZSCH6vNoycOgtPJH2DL2U9ji", - "name": "Calculator", + "name": "CalculatorTool", "path": "calculate", "input": { "values": { @@ -56,21 +46,7 @@ Answer: 62,748,517 ## Prompt Task Agent ```python -from griptape.structures import Agent -from griptape.tasks import PromptTask - - -agent = Agent() -agent.add_task( - PromptTask( - "Write me a {{ creative_medium }} about {{ args[0] }} and {{ args[1] }}", - context={ - 'creative_medium': 'haiku' - } - ) -) - -agent.run("Skateboards", "Programming") +--8<-- "docs/griptape-framework/structures/src/agents_2.py" ``` ``` diff --git a/docs/griptape-framework/structures/config.md b/docs/griptape-framework/structures/config.md deleted file mode 100644 index 3f510eb86..000000000 --- a/docs/griptape-framework/structures/config.md +++ /dev/null @@ -1,170 +0,0 @@ ---- -search: - boost: 2 ---- - -## Overview - -The [StructureConfig](../../reference/griptape/config/structure_config.md) class allows for the customization of Structures within Griptape, enabling specific settings such as Drivers to be defined for Tasks. - -### Premade Configs - -Griptape provides predefined [StructureConfig](../../reference/griptape/config/structure_config.md)'s for widely used services that provide APIs for most Driver types Griptape offers. - -#### OpenAI - -The [OpenAI Structure Config](../../reference/griptape/config/openai_structure_config.md) provides default Drivers for OpenAI's APIs. This is the default config for all Structures. - - -```python -from griptape.structures import Agent -from griptape.config import OpenAiStructureConfig - -agent = Agent( - config=OpenAiStructureConfig() -) - -agent = Agent() # This is equivalent to the above -``` - -#### Azure OpenAI - -The [Azure OpenAI Structure Config](../../reference/griptape/config/azure_openai_structure_config.md) provides default Drivers for Azure's OpenAI APIs. - - -```python -import os -from griptape.structures import Agent -from griptape.config import AzureOpenAiStructureConfig - -agent = Agent( - config=AzureOpenAiStructureConfig( - azure_endpoint=os.environ["AZURE_OPENAI_ENDPOINT_3"], - api_key=os.environ["AZURE_OPENAI_API_KEY_3"] - ).merge_config({ - "image_query_driver": { - "azure_deployment": "gpt-4o", - }, - }), -) -``` - -#### Amazon Bedrock -The [Amazon Bedrock Structure Config](../../reference/griptape/config/amazon_bedrock_structure_config.md) provides default Drivers for Amazon Bedrock's APIs. - -```python -import os -import boto3 -from griptape.structures import Agent -from griptape.config import AmazonBedrockStructureConfig - -agent = Agent( - config=AmazonBedrockStructureConfig( - session=boto3.Session( - region_name=os.environ["AWS_DEFAULT_REGION"], - aws_access_key_id=os.environ["AWS_ACCESS_KEY_ID"], - aws_secret_access_key=os.environ["AWS_SECRET_ACCESS_KEY"], - ) - ) -) -``` - -#### Google -The [Google Structure Config](../../reference/griptape/config/google_structure_config.md) provides default Drivers for Google's Gemini APIs. - -```python -from griptape.structures import Agent -from griptape.config import GoogleStructureConfig - -agent = Agent( - config=GoogleStructureConfig() -) -``` - -#### Anthropic - -The [Anthropic Structure Config](../../reference/griptape/config/anthropic_structure_config.md) provides default Drivers for Anthropic's APIs. - -!!! info - Anthropic does not provide an embeddings API which means you will need to use another service for embeddings. - The `AnthropicStructureConfig` defaults to using `VoyageAiEmbeddingDriver` which integrates with [VoyageAI](https://www.voyageai.com/), the service used in Anthropic's [embeddings documentation](https://docs.anthropic.com/claude/docs/embeddings). - To override the default embedding driver, see: [Override Default Structure Embedding Driver](../drivers/embedding-drivers.md#override-default-structure-embedding-driver). - - -```python -from griptape.structures import Agent -from griptape.config import AnthropicStructureConfig - -agent = Agent( - config=AnthropicStructureConfig() -) -``` - -#### Cohere - -The [Cohere Structure Config](../../reference/griptape/config/cohere_structure_config.md) provides default Drivers for Cohere's APIs. - - -```python -import os -from griptape.config import CohereStructureConfig -from griptape.structures import Agent - -agent = Agent(config=CohereStructureConfig(api_key=os.environ["COHERE_API_KEY"])) -``` - -### Custom Configs - -You can create your own [StructureConfig](../../reference/griptape/config/structure_config.md) by overriding relevant Drivers. -The [StructureConfig](../../reference/griptape/config/structure_config.md) class includes "Dummy" Drivers for all types, which throw a [DummyError](../../reference/griptape/exceptions/dummy_exception.md) if invoked without being overridden. -This approach ensures that you are informed through clear error messages if you attempt to use Structures without proper Driver configurations. - -```python -import os -from griptape.structures import Agent -from griptape.config import StructureConfig -from griptape.drivers import AnthropicPromptDriver - -agent = Agent( - config=StructureConfig( - prompt_driver=AnthropicPromptDriver( - model="claude-3-sonnet-20240229", - api_key=os.environ["ANTHROPIC_API_KEY"], - ) - ), -) -``` - -### Loading/Saving Configs - -Configuration classes in Griptape offer utility methods for loading, saving, and merging configurations, streamlining the management of complex setups. - -```python -from griptape.structures import Agent -from griptape.config import AmazonBedrockStructureConfig -from griptape.drivers import AmazonBedrockCohereEmbeddingDriver - -custom_config = AmazonBedrockStructureConfig() -custom_config.embedding_driver = AmazonBedrockCohereEmbeddingDriver() -custom_config.merge_config( - { - "embedding_driver": { - "base_url": None, - "model": "text-embedding-3-small", - "organization": None, - "type": "OpenAiEmbeddingDriver", - }, - } -) -serialized_config = custom_config.to_json() -deserialized_config = AmazonBedrockStructureConfig.from_json(serialized_config) - -agent = Agent( - config=deserialized_config.merge_config({ - "prompt_driver" : { - "model": "anthropic.claude-3-sonnet-20240229-v1:0", - }, - }), -) -``` - diff --git a/docs/griptape-framework/structures/configs.md b/docs/griptape-framework/structures/configs.md new file mode 100644 index 000000000..2a9b5c62d --- /dev/null +++ b/docs/griptape-framework/structures/configs.md @@ -0,0 +1,89 @@ +--- +search: + boost: 2 +--- + +## Overview + +Griptape exposes global configuration options to easily customize different parts of the framework. + +### Drivers Configs + +The [DriversConfig](../../reference/griptape/configs/drivers/drivers_config.md) class allows for the customization of Structures within Griptape, enabling specific settings such as Drivers to be defined for Tasks. + +Griptape provides predefined [DriversConfig](../../reference/griptape/configs/drivers/drivers_config.md)'s for widely used services that provide APIs for most Driver types Griptape offers. + +#### OpenAI + +The [OpenAI Driver config](../../reference/griptape/configs/drivers/openai_drivers_config.md) provides default Drivers for OpenAI's APIs. This is the default config for all Structures. + +```python +--8<-- "docs/griptape-framework/structures/src/drivers_config_1.py" +``` + +#### Azure OpenAI + +The [Azure OpenAI Driver config](../../reference/griptape/configs/drivers/azure_openai_drivers_config.md) provides default Drivers for Azure's OpenAI APIs. + +```python +--8<-- "docs/griptape-framework/structures/src/drivers_config_2.py" +``` + +#### Amazon Bedrock +The [Amazon Bedrock Driver config](../../reference/griptape/configs/drivers/amazon_bedrock_drivers_config.md) provides default Drivers for Amazon Bedrock's APIs. + +```python +--8<-- "docs/griptape-framework/structures/src/drivers_config_3.py" +``` + +#### Google +The [Google Driver config](../../reference/griptape/configs/drivers/google_drivers_config.md) provides default Drivers for Google's Gemini APIs. + +```python +--8<-- "docs/griptape-framework/structures/src/drivers_config_4.py" +``` + +#### Anthropic + +The [Anthropic Driver config](../../reference/griptape/configs/drivers/anthropic_drivers_config.md) provides default Drivers for Anthropic's APIs. + +!!! info + Anthropic does not provide an embeddings API which means you will need to use another service for embeddings. + The `AnthropicDriversConfig` defaults to using `VoyageAiEmbeddingDriver` which integrates with [VoyageAI](https://www.voyageai.com/), the service used in Anthropic's [embeddings documentation](https://docs.anthropic.com/claude/docs/embeddings). + To override the default embedding driver, see: [Override Default Structure Embedding Driver](../drivers/embedding-drivers.md#override-default-structure-embedding-driver). + +```python +--8<-- "docs/griptape-framework/structures/src/drivers_config_5.py" +``` + +#### Cohere + +The [Cohere Driver config](../../reference/griptape/configs/drivers/cohere_drivers_config.md) provides default Drivers for Cohere's APIs. + +```python +--8<-- "docs/griptape-framework/structures/src/drivers_config_6.py" +``` + +#### Custom + +You can create your own [DriversConfig](../../reference/griptape/configs/drivers/drivers_config.md) by overriding relevant Drivers. +The [DriversConfig](../../reference/griptape/configs/drivers/drivers_config.md) class includes "Dummy" Drivers for all types, which throw a [DummyError](../../reference/griptape/exceptions/dummy_exception.md) if invoked without being overridden. +This approach ensures that you are informed through clear error messages if you attempt to use Structures without proper Driver configurations. + +```python +--8<-- "docs/griptape-framework/structures/src/drivers_config_7.py" +``` + +### Logging Config + +Griptape provides a predefined [LoggingConfig](../../reference/griptape/configs/logging/logging_config.md)'s for easily customizing the logging events that the framework emits. In order to customize the logger, the logger can be fetched by using the `Defaults.logging.logger_name`. + +```python +--8<-- "docs/griptape-framework/structures/src/logging_config.py" +``` + +### Loading/Saving Configs + +```python +--8<-- "docs/griptape-framework/structures/src/drivers_config_8.py" +``` diff --git a/docs/griptape-framework/structures/conversation-memory.md b/docs/griptape-framework/structures/conversation-memory.md index 19d79c702..503a00b14 100644 --- a/docs/griptape-framework/structures/conversation-memory.md +++ b/docs/griptape-framework/structures/conversation-memory.md @@ -10,13 +10,7 @@ You can use Conversation Memory to give Griptape Structures the ability to keep ### Example ```python -from griptape.structures import Agent -from griptape.memory.structure import ConversationMemory - -agent = Agent() - -agent.run("My favorite animal is a Liger.") -agent.run("What is my favorite animal?") +--8<-- "docs/griptape-framework/structures/src/conversation_memory_1.py" ``` ``` @@ -35,10 +29,7 @@ agent.run("What is my favorite animal?") You can disable conversation memory in any structure by setting it to `None`: ```python -from griptape.structures import Agent -from griptape.memory.structure import ConversationMemory - -Agent(conversation_memory=None) +--8<-- "docs/griptape-framework/structures/src/conversation_memory_2.py" ``` ## Types of Memory @@ -50,36 +41,13 @@ Griptape provides several types of Conversation Memory to fit various use-cases. [ConversationMemory](../../reference/griptape/memory/structure/conversation_memory.md) will keep track of the full task input and output for all runs. ```python -from griptape.structures import Agent -from griptape.memory.structure import ConversationMemory - -agent = Agent( - conversation_memory=ConversationMemory() -) - -agent.run("Hello!") - -print(agent.conversation_memory) +--8<-- "docs/griptape-framework/structures/src/conversation_memory_3.py" ``` You can set the [max_runs](../../reference/griptape/memory/structure/base_conversation_memory.md#griptape.memory.structure.base_conversation_memory.BaseConversationMemory.max_runs) parameter to limit how many runs are kept in memory. ```python -from griptape.structures import Agent -from griptape.memory.structure import ConversationMemory - -agent = Agent( - conversation_memory=ConversationMemory(max_runs=2) -) - -agent.run("Run 1") -agent.run("Run 2") -agent.run("Run 3") -agent.run("Run 4") -agent.run("Run 5") - -print(agent.conversation_memory.runs[0].input == 'run4') -print(agent.conversation_memory.runs[1].input == 'run5') +--8<-- "docs/griptape-framework/structures/src/conversation_memory_4.py" ``` ### Summary Conversation Memory @@ -90,15 +58,6 @@ You can choose to offset which runs are summarized with the [offset](../../reference/griptape/memory/structure/summary_conversation_memory.md#griptape.memory.structure.summary_conversation_memory.SummaryConversationMemory.offset) parameter. ```python -from griptape.structures import Agent -from griptape.memory.structure import SummaryConversationMemory - -agent = Agent( - conversation_memory=SummaryConversationMemory(offset=2) -) - -agent.run("Hello!") - -print(agent.conversation_memory.summary) +--8<-- "docs/griptape-framework/structures/src/conversation_memory_5.py" ``` diff --git a/docs/griptape-framework/structures/observability.md b/docs/griptape-framework/structures/observability.md index 69af849e6..01e1af336 100644 --- a/docs/griptape-framework/structures/observability.md +++ b/docs/griptape-framework/structures/observability.md @@ -9,17 +9,8 @@ The [Observability](../../reference/griptape/observability/observability.md) con Observability is completely optional. To opt in, wrap your application code with the [Observability](../../reference/griptape/observability/observability.md) context manager, for example: -```python title="PYTEST_IGNORE" -from griptape.drivers import GriptapeCloudObservabilityDriver -from griptape.structures import Agent -from griptape.observability import Observability - -observability_driver = GriptapeCloudObservabilityDriver() - -with Observability(observability_driver=observability_driver): - # Important! Only code within this block is subject to observability - agent = Agent() - agent.run("Name the five greatest rappers of all time") +```python +--8<-- "docs/griptape-framework/structures/src/observability_1.py" ``` !!! info @@ -31,32 +22,6 @@ All functions and methods annotated with the `@observable` decorator will be tra For example: -```python title="PYTEST_IGNORE" -import time -from griptape.drivers import GriptapeCloudObservabilityDriver -from griptape.rules import Rule -from griptape.structures import Agent -from griptape.observability import Observability -from griptape.common import observable - -# Decorate a function -@observable -def my_function(): - time.sleep(3) - -class MyClass: - # Decorate a method - @observable - def my_method(self): - time.sleep(1) - my_function() - time.sleep(2) - -observability_driver = GriptapeCloudObservabilityDriver() - -# When invoking the instrumented code from within the Observability context manager, the -# telemetry for the custom code will be sent to the destination specified by the driver. -with Observability(observability_driver=observability_driver): - my_function() - MyClass().my_method() +```python +--8<-- "docs/griptape-framework/structures/src/observability_2.py" ``` diff --git a/docs/griptape-framework/structures/pipelines.md b/docs/griptape-framework/structures/pipelines.md index ad652f6c0..7bcfc1348 100644 --- a/docs/griptape-framework/structures/pipelines.md +++ b/docs/griptape-framework/structures/pipelines.md @@ -6,7 +6,7 @@ search: ## Overview A [Pipeline](../../reference/griptape/structures/pipeline.md) is very similar to an [Agent](../../reference/griptape/structures/agent.md), but allows for multiple tasks. -You can access the final output of the Pipeline by using the [output](../../reference/griptape/structures/agent.md#griptape.structures.structure.Structure.output) attribute. +You can access the final output of the Pipeline by using the [output](../../reference/griptape/structures/structure.md#griptape.structures.structure.Structure.output) attribute. ## Context @@ -20,20 +20,7 @@ Pipelines have access to the following [context](../../reference/griptape/struct ## Pipeline ```python -from griptape.tasks import PromptTask -from griptape.structures import Pipeline - - -pipeline = Pipeline() - -pipeline.add_tasks( - # take the first argument from the pipeline `run` method - PromptTask("{{ args[0] }}"), - # take the output from the previous task and insert it into the prompt - PromptTask("Say the following like a pirate: {{ parent_output }}") -) - -pipeline.run("Write me a haiku about sailing.") +--8<-- "docs/griptape-framework/structures/src/pipelines_1.py" ``` ``` diff --git a/docs/griptape-framework/structures/rulesets.md b/docs/griptape-framework/structures/rulesets.md index 324973b71..d69b085ac 100644 --- a/docs/griptape-framework/structures/rulesets.md +++ b/docs/griptape-framework/structures/rulesets.md @@ -15,38 +15,7 @@ Rulesets can be used to shape personality, format output, restrict topics, and m You can define a Ruleset at the Structure level if you need to have certain behaviors across all Tasks. ```python -from griptape.structures import Pipeline -from griptape.tasks import PromptTask -from griptape.rules import Rule, Ruleset - -pipeline = Pipeline( - rulesets=[ - Ruleset( - name="Employment", - rules=[ - Rule("Behave like a polite customer support agent"), - Rule("Act like you work for company SkaterWorld, Inc."), - Rule("Discuss only topics related to skateboarding"), - Rule("Limit your response to fewer than 5 sentences."), - ], - ), - Ruleset( - name="Background", - rules=[ - Rule("Your name is Todd"), - ], - ), - ] -) - -pipeline.add_tasks( - PromptTask(input="Respond to this user's question: {{ args[0] }}"), - PromptTask( - input="Extract keywords from this response: {{ parent_output }}" - ), -) - -pipeline.run("How do I do a kickflip?") +--8<-- "docs/griptape-framework/structures/src/rulesets_1.py" ``` ``` @@ -73,24 +42,7 @@ pipeline.run("How do I do a kickflip?") You can pass [rules](../../reference/griptape/structures/structure.md#griptape.structures.structure.Structure.rules) directly to the Structure to have a Ruleset created for you. ```python -from griptape.structures import Pipeline -from griptape.tasks import PromptTask -from griptape.rules import Rule - -pipeline = Pipeline( - rules=[ - Rule("Respond only using emojis"), - ], -) - -pipeline.add_tasks( - PromptTask("Respond to this question from the user: '{{ args[0] }}'"), - PromptTask( - "How would you rate your response (1-5)? 1 being bad, 5 being good. Response: '{{parent_output}}'" - ), -), - -pipeline.run("How do I bake a cake?") +--8<-- "docs/griptape-framework/structures/src/rulesets_2.py" ``` ``` [09/29/23 13:31:41] INFO PromptTask 51c0030b7a854ae5a9bef4595014915c @@ -111,38 +63,7 @@ pipeline.run("How do I bake a cake?") You can define a Ruleset at the Task level if you need to have different behaviors per Task. ```python -from griptape.structures import Pipeline -from griptape.tasks import PromptTask -from griptape.rules import Rule, Ruleset - -pipeline = Pipeline() - -pipeline.add_tasks( - PromptTask( - input="Respond to the following prompt: {{ args[0] }}", - rulesets=[ - Ruleset( - name="Emojis", - rules=[ - Rule("Respond using uppercase characters only."), - ], - ) - ] - ), - PromptTask( - input="Determine the sentiment of the following text: {{ parent_output }}", - rulesets=[ - Ruleset( - name="Diacritic", - rules=[ - Rule("Respond using diacritic characters only."), - ], - ) - ], - ), -) - -pipeline.run("I love skateboarding!") +--8<-- "docs/griptape-framework/structures/src/rulesets_3.py" ``` ``` @@ -162,23 +83,7 @@ pipeline.run("I love skateboarding!") You can pass [rules](../../reference/griptape/mixins/rule_mixin.md#griptape.mixins.rule_mixin.RuleMixin.rules) directly to the Task to have a Ruleset created for you. ```python -from griptape.structures import Pipeline -from griptape.tasks import PromptTask -from griptape.rules import Rule - -pipeline = Pipeline() - -pipeline.add_tasks( - PromptTask( - rules=[ - Rule("Write your answer in json with a single key 'emoji_response'"), - Rule("Respond only using emojis"), - ], - ), -) - -pipeline.run("How are you?") - +--8<-- "docs/griptape-framework/structures/src/rulesets_4.py" ``` ``` [09/25/23 16:29:05] INFO PromptTask d1cc2c0b780d4b32b6309ceab11173f4 diff --git a/griptape/tools/google_cal/__init__.py b/docs/griptape-framework/structures/src/__init__.py similarity index 100% rename from griptape/tools/google_cal/__init__.py rename to docs/griptape-framework/structures/src/__init__.py diff --git a/docs/griptape-framework/structures/src/agents_1.py b/docs/griptape-framework/structures/src/agents_1.py new file mode 100644 index 000000000..9ce4aec22 --- /dev/null +++ b/docs/griptape-framework/structures/src/agents_1.py @@ -0,0 +1,7 @@ +from griptape.structures import Agent +from griptape.tools import CalculatorTool + +agent = Agent(input="Calculate the following: {{ args[0] }}", tools=[CalculatorTool()]) + +agent.run("what's 13^7?") +print("Answer:", agent.output) diff --git a/docs/griptape-framework/structures/src/agents_2.py b/docs/griptape-framework/structures/src/agents_2.py new file mode 100644 index 000000000..84d431c1e --- /dev/null +++ b/docs/griptape-framework/structures/src/agents_2.py @@ -0,0 +1,11 @@ +from griptape.structures import Agent +from griptape.tasks import PromptTask + +agent = Agent() +agent.add_task( + PromptTask( + "Write me a {{ creative_medium }} about {{ args[0] }} and {{ args[1] }}", context={"creative_medium": "haiku"} + ) +) + +agent.run("Skateboards", "Programming") diff --git a/docs/griptape-framework/structures/src/conversation_memory_1.py b/docs/griptape-framework/structures/src/conversation_memory_1.py new file mode 100644 index 000000000..e44697832 --- /dev/null +++ b/docs/griptape-framework/structures/src/conversation_memory_1.py @@ -0,0 +1,6 @@ +from griptape.structures import Agent + +agent = Agent() + +agent.run("My favorite animal is a Liger.") +agent.run("What is my favorite animal?") diff --git a/docs/griptape-framework/structures/src/conversation_memory_2.py b/docs/griptape-framework/structures/src/conversation_memory_2.py new file mode 100644 index 000000000..490c10866 --- /dev/null +++ b/docs/griptape-framework/structures/src/conversation_memory_2.py @@ -0,0 +1,3 @@ +from griptape.structures import Agent + +Agent(conversation_memory=None) diff --git a/docs/griptape-framework/structures/src/conversation_memory_3.py b/docs/griptape-framework/structures/src/conversation_memory_3.py new file mode 100644 index 000000000..425230eb2 --- /dev/null +++ b/docs/griptape-framework/structures/src/conversation_memory_3.py @@ -0,0 +1,8 @@ +from griptape.memory.structure import ConversationMemory +from griptape.structures import Agent + +agent = Agent(conversation_memory=ConversationMemory()) + +agent.run("Hello!") + +print(agent.conversation_memory) diff --git a/docs/griptape-framework/structures/src/conversation_memory_4.py b/docs/griptape-framework/structures/src/conversation_memory_4.py new file mode 100644 index 000000000..e8a3b9861 --- /dev/null +++ b/docs/griptape-framework/structures/src/conversation_memory_4.py @@ -0,0 +1,13 @@ +from griptape.memory.structure import ConversationMemory +from griptape.structures import Agent + +agent = Agent(conversation_memory=ConversationMemory(max_runs=2)) + +agent.run("Run 1") +agent.run("Run 2") +agent.run("Run 3") +agent.run("Run 4") +agent.run("Run 5") + +print(agent.conversation_memory.runs[0].input == "run4") +print(agent.conversation_memory.runs[1].input == "run5") diff --git a/docs/griptape-framework/structures/src/conversation_memory_5.py b/docs/griptape-framework/structures/src/conversation_memory_5.py new file mode 100644 index 000000000..8c7807c58 --- /dev/null +++ b/docs/griptape-framework/structures/src/conversation_memory_5.py @@ -0,0 +1,11 @@ +from griptape.memory.structure import SummaryConversationMemory +from griptape.structures import Agent +from griptape.utils import Conversation + +agent = Agent(conversation_memory=SummaryConversationMemory(offset=2)) + +agent.run("Hello my name is John?") +agent.run("What is my name?") +agent.run("My favorite color is blue.") + +print(Conversation(agent.conversation_memory)) diff --git a/docs/griptape-framework/structures/src/drivers_config_1.py b/docs/griptape-framework/structures/src/drivers_config_1.py new file mode 100644 index 000000000..c156f8594 --- /dev/null +++ b/docs/griptape-framework/structures/src/drivers_config_1.py @@ -0,0 +1,7 @@ +from griptape.configs import Defaults +from griptape.configs.drivers import OpenAiDriversConfig +from griptape.structures import Agent + +Defaults.drivers_config = OpenAiDriversConfig() + +agent = Agent() diff --git a/docs/griptape-framework/structures/src/drivers_config_2.py b/docs/griptape-framework/structures/src/drivers_config_2.py new file mode 100644 index 000000000..b115a22f4 --- /dev/null +++ b/docs/griptape-framework/structures/src/drivers_config_2.py @@ -0,0 +1,11 @@ +import os + +from griptape.configs import Defaults +from griptape.configs.drivers import AzureOpenAiDriversConfig +from griptape.structures import Agent + +Defaults.drivers_config = AzureOpenAiDriversConfig( + azure_endpoint=os.environ["AZURE_OPENAI_ENDPOINT_3"], api_key=os.environ["AZURE_OPENAI_API_KEY_3"] +) + +agent = Agent() diff --git a/docs/griptape-framework/structures/src/drivers_config_3.py b/docs/griptape-framework/structures/src/drivers_config_3.py new file mode 100644 index 000000000..0af0423de --- /dev/null +++ b/docs/griptape-framework/structures/src/drivers_config_3.py @@ -0,0 +1,17 @@ +import os + +import boto3 + +from griptape.configs import Defaults +from griptape.configs.drivers import AmazonBedrockDriversConfig +from griptape.structures import Agent + +Defaults.drivers_config = AmazonBedrockDriversConfig( + session=boto3.Session( + region_name=os.environ["AWS_DEFAULT_REGION"], + aws_access_key_id=os.environ["AWS_ACCESS_KEY_ID"], + aws_secret_access_key=os.environ["AWS_SECRET_ACCESS_KEY"], + ) +) + +agent = Agent() diff --git a/docs/griptape-framework/structures/src/drivers_config_4.py b/docs/griptape-framework/structures/src/drivers_config_4.py new file mode 100644 index 000000000..f9cfb6d16 --- /dev/null +++ b/docs/griptape-framework/structures/src/drivers_config_4.py @@ -0,0 +1,7 @@ +from griptape.configs import Defaults +from griptape.configs.drivers import GoogleDriversConfig +from griptape.structures import Agent + +Defaults.drivers_config = GoogleDriversConfig() + +agent = Agent() diff --git a/docs/griptape-framework/structures/src/drivers_config_5.py b/docs/griptape-framework/structures/src/drivers_config_5.py new file mode 100644 index 000000000..fb2aa8eee --- /dev/null +++ b/docs/griptape-framework/structures/src/drivers_config_5.py @@ -0,0 +1,7 @@ +from griptape.configs import Defaults +from griptape.configs.drivers import AnthropicDriversConfig +from griptape.structures import Agent + +Defaults.drivers_config = AnthropicDriversConfig() + +agent = Agent() diff --git a/docs/griptape-framework/structures/src/drivers_config_6.py b/docs/griptape-framework/structures/src/drivers_config_6.py new file mode 100644 index 000000000..eaa8e3d71 --- /dev/null +++ b/docs/griptape-framework/structures/src/drivers_config_6.py @@ -0,0 +1,9 @@ +import os + +from griptape.configs import Defaults +from griptape.configs.drivers import CohereDriversConfig +from griptape.structures import Agent + +Defaults.drivers_config = CohereDriversConfig(api_key=os.environ["COHERE_API_KEY"]) + +agent = Agent() diff --git a/docs/griptape-framework/structures/src/drivers_config_7.py b/docs/griptape-framework/structures/src/drivers_config_7.py new file mode 100644 index 000000000..3b1d396ce --- /dev/null +++ b/docs/griptape-framework/structures/src/drivers_config_7.py @@ -0,0 +1,16 @@ +import os + +from griptape.configs import Defaults +from griptape.configs.drivers import DriversConfig +from griptape.drivers import AnthropicPromptDriver +from griptape.structures import Agent + +Defaults.drivers_config = DriversConfig( + prompt_driver=AnthropicPromptDriver( + model="claude-3-sonnet-20240229", + api_key=os.environ["ANTHROPIC_API_KEY"], + ) +) + + +agent = Agent() diff --git a/docs/griptape-framework/structures/src/drivers_config_8.py b/docs/griptape-framework/structures/src/drivers_config_8.py new file mode 100644 index 000000000..f34a6d0b1 --- /dev/null +++ b/docs/griptape-framework/structures/src/drivers_config_8.py @@ -0,0 +1,18 @@ +from griptape.configs import Defaults +from griptape.configs.drivers import AmazonBedrockDriversConfig +from griptape.structures import Agent + +custom_config = AmazonBedrockDriversConfig() +dict_config = custom_config.to_dict() +# Use OpenAi for embeddings +dict_config["embedding_driver"] = { + "base_url": None, + "model": "text-embedding-3-small", + "organization": None, + "type": "OpenAiEmbeddingDriver", +} +custom_config = AmazonBedrockDriversConfig.from_dict(dict_config) + +Defaults.drivers_config = custom_config + +agent = Agent() diff --git a/docs/griptape-framework/structures/src/logging_config.py b/docs/griptape-framework/structures/src/logging_config.py new file mode 100644 index 000000000..b220e2478 --- /dev/null +++ b/docs/griptape-framework/structures/src/logging_config.py @@ -0,0 +1,14 @@ +import logging + +from griptape.configs import Defaults +from griptape.configs.drivers import OpenAiDriversConfig +from griptape.configs.logging import TruncateLoggingFilter +from griptape.structures import Agent + +Defaults.drivers_config = OpenAiDriversConfig() + +logger = logging.getLogger(Defaults.logging_config.logger_name) +logger.setLevel(logging.ERROR) +logger.addFilter(TruncateLoggingFilter(max_log_length=100)) + +agent = Agent() diff --git a/docs/griptape-framework/structures/src/observability_1.py b/docs/griptape-framework/structures/src/observability_1.py new file mode 100644 index 000000000..e0680e04c --- /dev/null +++ b/docs/griptape-framework/structures/src/observability_1.py @@ -0,0 +1,10 @@ +from griptape.drivers import GriptapeCloudObservabilityDriver +from griptape.observability import Observability +from griptape.structures import Agent + +observability_driver = GriptapeCloudObservabilityDriver() + +with Observability(observability_driver=observability_driver): + # Important! Only code within this block is subject to observability + agent = Agent() + agent.run("Name the five greatest rappers of all time") diff --git a/docs/griptape-framework/structures/src/observability_2.py b/docs/griptape-framework/structures/src/observability_2.py new file mode 100644 index 000000000..1cc6c1244 --- /dev/null +++ b/docs/griptape-framework/structures/src/observability_2.py @@ -0,0 +1,29 @@ +import time + +from griptape.common import observable +from griptape.drivers import GriptapeCloudObservabilityDriver +from griptape.observability import Observability + + +# Decorate a function +@observable +def my_function() -> None: + time.sleep(3) + + +class MyClass: + # Decorate a method + @observable + def my_method(self) -> None: + time.sleep(1) + my_function() + time.sleep(2) + + +observability_driver = GriptapeCloudObservabilityDriver() + +# When invoking the instrumented code from within the Observability context manager, the +# telemetry for the custom code will be sent to the destination specified by the driver. +with Observability(observability_driver=observability_driver): + my_function() + MyClass().my_method() diff --git a/docs/griptape-framework/structures/src/pipelines_1.py b/docs/griptape-framework/structures/src/pipelines_1.py new file mode 100644 index 000000000..f2b8c6f6f --- /dev/null +++ b/docs/griptape-framework/structures/src/pipelines_1.py @@ -0,0 +1,13 @@ +from griptape.structures import Pipeline +from griptape.tasks import PromptTask + +pipeline = Pipeline() + +pipeline.add_tasks( + # take the first argument from the pipeline `run` method + PromptTask("{{ args[0] }}"), + # take the output from the previous task and insert it into the prompt + PromptTask("Say the following like a pirate: {{ parent_output }}"), +) + +pipeline.run("Write me a haiku about sailing.") diff --git a/docs/griptape-framework/structures/src/rulesets_1.py b/docs/griptape-framework/structures/src/rulesets_1.py new file mode 100644 index 000000000..173e3f029 --- /dev/null +++ b/docs/griptape-framework/structures/src/rulesets_1.py @@ -0,0 +1,30 @@ +from griptape.rules import Rule, Ruleset +from griptape.structures import Pipeline +from griptape.tasks import PromptTask + +pipeline = Pipeline( + rulesets=[ + Ruleset( + name="Employment", + rules=[ + Rule("Behave like a polite customer support agent"), + Rule("Act like you work for company SkaterWorld, Inc."), + Rule("Discuss only topics related to skateboarding"), + Rule("Limit your response to fewer than 5 sentences."), + ], + ), + Ruleset( + name="Background", + rules=[ + Rule("Your name is Todd"), + ], + ), + ] +) + +pipeline.add_tasks( + PromptTask(input="Respond to this user's question: {{ args[0] }}"), + PromptTask(input="Extract keywords from this response: {{ parent_output }}"), +) + +pipeline.run("How do I do a kickflip?") diff --git a/docs/griptape-framework/structures/src/rulesets_2.py b/docs/griptape-framework/structures/src/rulesets_2.py new file mode 100644 index 000000000..dc1b98d31 --- /dev/null +++ b/docs/griptape-framework/structures/src/rulesets_2.py @@ -0,0 +1,16 @@ +from griptape.rules import Rule +from griptape.structures import Pipeline +from griptape.tasks import PromptTask + +pipeline = Pipeline( + rules=[ + Rule("Respond only using emojis"), + ], +) + +pipeline.add_tasks( + PromptTask("Respond to this question from the user: '{{ args[0] }}'"), + PromptTask("How would you rate your response (1-5)? 1 being bad, 5 being good. Response: '{{parent_output}}'"), +) + +pipeline.run("How do I bake a cake?") diff --git a/docs/griptape-framework/structures/src/rulesets_3.py b/docs/griptape-framework/structures/src/rulesets_3.py new file mode 100644 index 000000000..04769dac9 --- /dev/null +++ b/docs/griptape-framework/structures/src/rulesets_3.py @@ -0,0 +1,32 @@ +from griptape.rules import Rule, Ruleset +from griptape.structures import Pipeline +from griptape.tasks import PromptTask + +pipeline = Pipeline() + +pipeline.add_tasks( + PromptTask( + input="Respond to the following prompt: {{ args[0] }}", + rulesets=[ + Ruleset( + name="Emojis", + rules=[ + Rule("Respond using uppercase characters only."), + ], + ) + ], + ), + PromptTask( + input="Determine the sentiment of the following text: {{ parent_output }}", + rulesets=[ + Ruleset( + name="Diacritic", + rules=[ + Rule("Respond using diacritic characters only."), + ], + ) + ], + ), +) + +pipeline.run("I love skateboarding!") diff --git a/docs/griptape-framework/structures/src/rulesets_4.py b/docs/griptape-framework/structures/src/rulesets_4.py new file mode 100644 index 000000000..2bcce73a1 --- /dev/null +++ b/docs/griptape-framework/structures/src/rulesets_4.py @@ -0,0 +1,16 @@ +from griptape.rules import Rule +from griptape.structures import Pipeline +from griptape.tasks import PromptTask + +pipeline = Pipeline() + +pipeline.add_tasks( + PromptTask( + rules=[ + Rule("Write your answer in json with a single key 'emoji_response'"), + Rule("Respond only using emojis"), + ], + ), +) + +pipeline.run("How are you?") diff --git a/docs/griptape-framework/structures/src/task_memory_1.py b/docs/griptape-framework/structures/src/task_memory_1.py new file mode 100644 index 000000000..e8cfbd8ac --- /dev/null +++ b/docs/griptape-framework/structures/src/task_memory_1.py @@ -0,0 +1,7 @@ +from griptape.structures import Agent +from griptape.tools import CalculatorTool + +# Create an agent with the CalculatorTool tool +agent = Agent(tools=[CalculatorTool(off_prompt=False)]) + +agent.run("What is 10 raised to the power of 5?") diff --git a/docs/griptape-framework/structures/src/task_memory_2.py b/docs/griptape-framework/structures/src/task_memory_2.py new file mode 100644 index 000000000..9ff24e1ff --- /dev/null +++ b/docs/griptape-framework/structures/src/task_memory_2.py @@ -0,0 +1,7 @@ +from griptape.structures import Agent +from griptape.tools import CalculatorTool + +# Create an agent with the CalculatorTool tool +agent = Agent(tools=[CalculatorTool(off_prompt=True)]) + +agent.run("What is 10 raised to the power of 5?") diff --git a/docs/griptape-framework/structures/src/task_memory_3.py b/docs/griptape-framework/structures/src/task_memory_3.py new file mode 100644 index 000000000..8a0e53d8c --- /dev/null +++ b/docs/griptape-framework/structures/src/task_memory_3.py @@ -0,0 +1,7 @@ +from griptape.structures import Agent +from griptape.tools import CalculatorTool, PromptSummaryTool + +# Create an agent with the Calculator tool +agent = Agent(tools=[CalculatorTool(off_prompt=True), PromptSummaryTool(off_prompt=False)]) + +agent.run("What is the square root of 12345?") diff --git a/docs/griptape-framework/structures/src/task_memory_4.py b/docs/griptape-framework/structures/src/task_memory_4.py new file mode 100644 index 000000000..cfd6d5711 --- /dev/null +++ b/docs/griptape-framework/structures/src/task_memory_4.py @@ -0,0 +1,9 @@ +from griptape.structures import Agent +from griptape.tools import WebScraperTool + +# Create an agent with the WebScraperTool tool +agent = Agent(tools=[WebScraperTool()]) + +agent.run( + "According to this page https://en.wikipedia.org/wiki/Elden_Ring, how many copies of Elden Ring have been sold?" +) diff --git a/docs/griptape-framework/structures/src/task_memory_5.py b/docs/griptape-framework/structures/src/task_memory_5.py new file mode 100644 index 000000000..a061118e8 --- /dev/null +++ b/docs/griptape-framework/structures/src/task_memory_5.py @@ -0,0 +1,13 @@ +from griptape.structures import Agent +from griptape.tools import QueryTool, WebScraperTool + +agent = Agent( + tools=[ + WebScraperTool(off_prompt=True), + QueryTool(off_prompt=False), + ] +) + +agent.run( + "According to this page https://en.wikipedia.org/wiki/Elden_Ring, how many copies of Elden Ring have been sold?" +) diff --git a/docs/griptape-framework/structures/src/task_memory_6.py b/docs/griptape-framework/structures/src/task_memory_6.py new file mode 100644 index 000000000..006bf8769 --- /dev/null +++ b/docs/griptape-framework/structures/src/task_memory_6.py @@ -0,0 +1,31 @@ +from griptape.configs import Defaults +from griptape.configs.drivers import OpenAiDriversConfig +from griptape.drivers import ( + AmazonBedrockPromptDriver, + LocalVectorStoreDriver, + OpenAiChatPromptDriver, + OpenAiEmbeddingDriver, +) +from griptape.structures import Agent +from griptape.tools import FileManagerTool, QueryTool, WebScraperTool + +Defaults.drivers_config = OpenAiDriversConfig( + prompt_driver=OpenAiChatPromptDriver(model="gpt-4"), +) + +vector_store_driver = LocalVectorStoreDriver(embedding_driver=OpenAiEmbeddingDriver()) + +agent = Agent( + tools=[ + WebScraperTool(off_prompt=True), + QueryTool( + off_prompt=True, + prompt_driver=AmazonBedrockPromptDriver(model="anthropic.claude-3-haiku-20240307-v1:0"), + ), + FileManagerTool(off_prompt=True), + ], +) + +agent.run( + "Use this page https://en.wikipedia.org/wiki/Elden_Ring to find how many copies of Elden Ring have been sold, and then save the result to a file." +) diff --git a/docs/griptape-framework/structures/src/task_memory_7.py b/docs/griptape-framework/structures/src/task_memory_7.py new file mode 100644 index 000000000..d2f07466f --- /dev/null +++ b/docs/griptape-framework/structures/src/task_memory_7.py @@ -0,0 +1,12 @@ +from griptape.structures import Agent +from griptape.tools import WebScraperTool + +agent = Agent( + tools=[ + WebScraperTool(off_prompt=True) # `off_prompt=True` will store the data in Task Memory + # Missing a Tool that can read from Task Memory + ] +) +agent.run( + "According to this page https://en.wikipedia.org/wiki/San_Francisco, what is the population of San Francisco?" +) diff --git a/docs/griptape-framework/structures/src/task_memory_8.py b/docs/griptape-framework/structures/src/task_memory_8.py new file mode 100644 index 000000000..ee106caec --- /dev/null +++ b/docs/griptape-framework/structures/src/task_memory_8.py @@ -0,0 +1,14 @@ +from griptape.structures import Agent +from griptape.tools import PromptSummaryTool, WebScraperTool + +agent = Agent( + tools=[ + WebScraperTool(off_prompt=True), # This tool will store the data in Task Memory + PromptSummaryTool( + off_prompt=True + ), # This tool will store the data back in Task Memory with no way to get it out + ] +) +agent.run( + "According to this page https://en.wikipedia.org/wiki/Dark_forest_hypothesis, what is the Dark Forest Hypothesis?" +) diff --git a/docs/griptape-framework/structures/src/task_memory_9.py b/docs/griptape-framework/structures/src/task_memory_9.py new file mode 100644 index 000000000..66bb562f0 --- /dev/null +++ b/docs/griptape-framework/structures/src/task_memory_9.py @@ -0,0 +1,9 @@ +from griptape.structures import Agent +from griptape.tools import CalculatorTool + +agent = Agent( + tools=[ + CalculatorTool() # Default value of `off_prompt=False` will return the data directly to the LLM + ] +) +agent.run("What is 10 ^ 3, 55 / 23, and 12345 * 0.5?") diff --git a/docs/griptape-framework/structures/src/tasks_1.py b/docs/griptape-framework/structures/src/tasks_1.py new file mode 100644 index 000000000..6382fe027 --- /dev/null +++ b/docs/griptape-framework/structures/src/tasks_1.py @@ -0,0 +1,12 @@ +from griptape.structures import Agent +from griptape.tasks import PromptTask + +agent = Agent() +agent.add_task( + PromptTask( + "Respond to the user's following question '{{ args[0] }}' in the language '{{preferred_language}}' and tone '{{tone}}'.", + context={"preferred_language": "ENGLISH", "tone": "PLAYFUL"}, + ) +) + +agent.run("How do I bake a cake?") diff --git a/docs/griptape-framework/structures/src/tasks_10.py b/docs/griptape-framework/structures/src/tasks_10.py new file mode 100644 index 000000000..c94fa7919 --- /dev/null +++ b/docs/griptape-framework/structures/src/tasks_10.py @@ -0,0 +1,22 @@ +from griptape.artifacts import BaseArtifact, TextArtifact +from griptape.structures import Pipeline +from griptape.tasks import CodeExecutionTask, PromptTask + + +def character_counter(task: CodeExecutionTask) -> BaseArtifact: + result = len(task.input) + # For functions that don't need to return anything, we recommend returning task.input + return TextArtifact(str(result)) + + +# Instantiate the pipeline +pipeline = Pipeline() + +pipeline.add_tasks( + # take the first argument from the pipeline `run` method + CodeExecutionTask(run_fn=character_counter), + # # take the output from the previous task and insert it into the prompt + PromptTask("{{args[0]}} using {{ parent_output }} characters"), +) + +pipeline.run("Write me a line in a poem") diff --git a/docs/griptape-framework/structures/src/tasks_11.py b/docs/griptape-framework/structures/src/tasks_11.py new file mode 100644 index 000000000..9a1f622db --- /dev/null +++ b/docs/griptape-framework/structures/src/tasks_11.py @@ -0,0 +1,30 @@ +from griptape.drivers import OpenAiImageGenerationDriver +from griptape.engines import PromptImageGenerationEngine +from griptape.structures import Pipeline +from griptape.tasks import PromptImageGenerationTask + +# Create a driver configured to use OpenAI's DALL-E 3 model. +driver = OpenAiImageGenerationDriver( + model="dall-e-3", + quality="hd", + style="natural", +) + +# Create an engine configured to use the driver. +engine = PromptImageGenerationEngine( + image_generation_driver=driver, +) + +# Instantiate a pipeline. +pipeline = Pipeline() + +# Add a PromptImageGenerationTask to the pipeline. +pipeline.add_tasks( + PromptImageGenerationTask( + input="{{ args[0] }}", + image_generation_engine=engine, + output_dir="images/", + ) +) + +pipeline.run("An image of a mountain on a summer day") diff --git a/docs/griptape-framework/structures/src/tasks_12.py b/docs/griptape-framework/structures/src/tasks_12.py new file mode 100644 index 000000000..917b50607 --- /dev/null +++ b/docs/griptape-framework/structures/src/tasks_12.py @@ -0,0 +1,35 @@ +from pathlib import Path + +from griptape.drivers import AmazonBedrockImageGenerationDriver, BedrockStableDiffusionImageGenerationModelDriver +from griptape.engines import VariationImageGenerationEngine +from griptape.loaders import ImageLoader +from griptape.structures import Pipeline +from griptape.tasks import VariationImageGenerationTask + +# Create a driver configured to use Stable Diffusion via Bedrock. +driver = AmazonBedrockImageGenerationDriver( + image_generation_model_driver=BedrockStableDiffusionImageGenerationModelDriver(), + model="stability.stable-diffusion-xl-v0", +) + +# Create an engine configured to use the driver. +engine = VariationImageGenerationEngine( + image_generation_driver=driver, +) + +# Load input image artifact. +image_artifact = ImageLoader().load(Path("tests/resources/mountain.png").read_bytes()) + +# Instantiate a pipeline. +pipeline = Pipeline() + +# Add a VariationImageGenerationTask to the pipeline. +pipeline.add_task( + VariationImageGenerationTask( + input=("{{ args[0] }}", image_artifact), + image_generation_engine=engine, + output_dir="images/", + ) +) + +pipeline.run("An image of a mountain landscape on a snowy winter day") diff --git a/docs/griptape-framework/structures/src/tasks_13.py b/docs/griptape-framework/structures/src/tasks_13.py new file mode 100644 index 000000000..d2aa45983 --- /dev/null +++ b/docs/griptape-framework/structures/src/tasks_13.py @@ -0,0 +1,35 @@ +from pathlib import Path + +from griptape.drivers import AmazonBedrockImageGenerationDriver, BedrockStableDiffusionImageGenerationModelDriver +from griptape.engines import InpaintingImageGenerationEngine +from griptape.loaders import ImageLoader +from griptape.structures import Pipeline +from griptape.tasks import InpaintingImageGenerationTask + +# Create a driver configured to use Stable Diffusion via Bedrock. +driver = AmazonBedrockImageGenerationDriver( + image_generation_model_driver=BedrockStableDiffusionImageGenerationModelDriver(), + model="stability.stable-diffusion-xl-v0", +) + +# Create an engine configured to use the driver. +engine = InpaintingImageGenerationEngine( + image_generation_driver=driver, +) + +# Load input image artifacts. +image_artifact = ImageLoader().load(Path("tests/resources/mountain.png").read_bytes()) + +mask_artifact = ImageLoader().load(Path("tests/resources/mountain-mask.png").read_bytes()) + +# Instantiate a pipeline. +pipeline = Pipeline() + +# Add an InpaintingImageGenerationTask to the pipeline. +pipeline.add_task( + InpaintingImageGenerationTask( + input=("{{ args[0] }}", image_artifact, mask_artifact), image_generation_engine=engine, output_dir="images/" + ) +) + +pipeline.run("An image of a castle built into the side of a mountain") diff --git a/docs/griptape-framework/structures/src/tasks_14.py b/docs/griptape-framework/structures/src/tasks_14.py new file mode 100644 index 000000000..ec489096d --- /dev/null +++ b/docs/griptape-framework/structures/src/tasks_14.py @@ -0,0 +1,37 @@ +from pathlib import Path + +from griptape.drivers import AmazonBedrockImageGenerationDriver, BedrockStableDiffusionImageGenerationModelDriver +from griptape.engines import OutpaintingImageGenerationEngine +from griptape.loaders import ImageLoader +from griptape.structures import Pipeline +from griptape.tasks import OutpaintingImageGenerationTask + +# Create a driver configured to use Stable Diffusion via Bedrock. +driver = AmazonBedrockImageGenerationDriver( + image_generation_model_driver=BedrockStableDiffusionImageGenerationModelDriver(), + model="stability.stable-diffusion-xl-v0", +) + +# Create an engine configured to use the driver. +engine = OutpaintingImageGenerationEngine( + image_generation_driver=driver, +) + +# Load input image artifacts. +image_artifact = ImageLoader().load(Path("tests/resources/mountain.png").read_bytes()) + +mask_artifact = ImageLoader().load(Path("tests/resources/mountain-mask.png").read_bytes()) + +# Instantiate a pipeline. +pipeline = Pipeline() + +# Add an OutpaintingImageGenerationTask to the pipeline. +pipeline.add_task( + OutpaintingImageGenerationTask( + input=("{{ args[0] }}", image_artifact, mask_artifact), + image_generation_engine=engine, + output_dir="images/", + ) +) + +pipeline.run("An image of a mountain shrouded by clouds") diff --git a/docs/griptape-framework/structures/src/tasks_15.py b/docs/griptape-framework/structures/src/tasks_15.py new file mode 100644 index 000000000..0c60864f7 --- /dev/null +++ b/docs/griptape-framework/structures/src/tasks_15.py @@ -0,0 +1,34 @@ +from pathlib import Path + +from griptape.drivers import OpenAiImageQueryDriver +from griptape.engines import ImageQueryEngine +from griptape.loaders import ImageLoader +from griptape.structures import Pipeline +from griptape.tasks import ImageQueryTask + +# Create a driver configured to use OpenAI's GPT-4 Vision model. +driver = OpenAiImageQueryDriver( + model="gpt-4o", + max_tokens=100, +) + +# Create an engine configured to use the driver. +engine = ImageQueryEngine( + image_query_driver=driver, +) + +# Load the input image artifact. +image_artifact = ImageLoader().load(Path("tests/resources/mountain.png").read_bytes()) + +# Instantiate a pipeline. +pipeline = Pipeline() + +# Add an ImageQueryTask to the pipeline. +pipeline.add_task( + ImageQueryTask( + input=("{{ args[0] }}", [image_artifact]), + image_query_engine=engine, + ) +) + +pipeline.run("Describe the weather in the image") diff --git a/docs/griptape-framework/structures/src/tasks_16.py b/docs/griptape-framework/structures/src/tasks_16.py new file mode 100644 index 000000000..7496d2d9c --- /dev/null +++ b/docs/griptape-framework/structures/src/tasks_16.py @@ -0,0 +1,130 @@ +import os + +from griptape.drivers import GoogleWebSearchDriver, LocalStructureRunDriver +from griptape.rules import Rule, Ruleset +from griptape.structures import Agent, Pipeline +from griptape.tasks import StructureRunTask +from griptape.tools import ( + PromptSummaryTool, + WebScraperTool, + WebSearchTool, +) + + +def build_researcher() -> Agent: + return Agent( + tools=[ + WebSearchTool( + web_search_driver=GoogleWebSearchDriver( + api_key=os.environ["GOOGLE_API_KEY"], + search_id=os.environ["GOOGLE_API_SEARCH_ID"], + ), + ), + WebScraperTool( + off_prompt=True, + ), + PromptSummaryTool(off_prompt=False), + ], + rulesets=[ + Ruleset( + name="Position", + rules=[ + Rule( + value="Senior Research Analyst", + ) + ], + ), + Ruleset( + name="Objective", + rules=[ + Rule( + value="Uncover cutting-edge developments in AI and data science", + ) + ], + ), + Ruleset( + name="Background", + rules=[ + Rule( + value="""You work at a leading tech think tank., + Your expertise lies in identifying emerging trends. + You have a knack for dissecting complex data and presenting actionable insights.""" + ) + ], + ), + Ruleset( + name="Desired Outcome", + rules=[ + Rule( + value="Full analysis report in bullet points", + ) + ], + ), + ], + ) + + +def build_writer() -> Agent: + return Agent( + input="Instructions: {{args[0]}}\nContext: {{args[1]}}", + rulesets=[ + Ruleset( + name="Position", + rules=[ + Rule( + value="Tech Content Strategist", + ) + ], + ), + Ruleset( + name="Objective", + rules=[ + Rule( + value="Craft compelling content on tech advancements", + ) + ], + ), + Ruleset( + name="Backstory", + rules=[ + Rule( + value="""You are a renowned Content Strategist, known for your insightful and engaging articles. + You transform complex concepts into compelling narratives.""" + ) + ], + ), + Ruleset( + name="Desired Outcome", + rules=[ + Rule( + value="Full blog post of at least 4 paragraphs", + ) + ], + ), + ], + ) + + +team = Pipeline( + tasks=[ + StructureRunTask( + ( + """Perform a detailed examination of the newest developments in AI as of 2024. + Pinpoint major trends, breakthroughs, and their implications for various industries.""", + ), + driver=LocalStructureRunDriver(structure_factory_fn=build_researcher), + ), + StructureRunTask( + ( + """Utilize the gathered insights to craft a captivating blog + article showcasing the key AI innovations. + Ensure the content is engaging yet straightforward, appealing to a tech-aware readership. + Keep the tone appealing and use simple language to make it less technical.""", + "{{parent_output}}", + ), + driver=LocalStructureRunDriver(structure_factory_fn=build_writer), + ), + ], +) + +team.run() diff --git a/docs/griptape-framework/structures/src/tasks_17.py b/docs/griptape-framework/structures/src/tasks_17.py new file mode 100644 index 000000000..e0bcae7fb --- /dev/null +++ b/docs/griptape-framework/structures/src/tasks_17.py @@ -0,0 +1,20 @@ +import os + +from griptape.drivers import ElevenLabsTextToSpeechDriver +from griptape.engines import TextToSpeechEngine +from griptape.structures import Pipeline +from griptape.tasks import TextToSpeechTask + +driver = ElevenLabsTextToSpeechDriver( + api_key=os.environ["ELEVEN_LABS_API_KEY"], + model="eleven_multilingual_v2", + voice="Matilda", +) + +task = TextToSpeechTask( + text_to_speech_engine=TextToSpeechEngine( + text_to_speech_driver=driver, + ), +) + +Pipeline(tasks=[task]).run("Generate audio from this text: 'Hello, world!'") diff --git a/docs/griptape-framework/structures/src/tasks_18.py b/docs/griptape-framework/structures/src/tasks_18.py new file mode 100644 index 000000000..08ece5a92 --- /dev/null +++ b/docs/griptape-framework/structures/src/tasks_18.py @@ -0,0 +1,17 @@ +from griptape.drivers import OpenAiAudioTranscriptionDriver +from griptape.engines import AudioTranscriptionEngine +from griptape.loaders import AudioLoader +from griptape.structures import Pipeline +from griptape.tasks import AudioTranscriptionTask +from griptape.utils import load_file + +driver = OpenAiAudioTranscriptionDriver(model="whisper-1") + +task = AudioTranscriptionTask( + input=lambda _: AudioLoader().load(load_file("tests/resources/sentences2.wav")), + audio_transcription_engine=AudioTranscriptionEngine( + audio_transcription_driver=driver, + ), +) + +Pipeline(tasks=[task]).run() diff --git a/docs/griptape-framework/structures/src/tasks_2.py b/docs/griptape-framework/structures/src/tasks_2.py new file mode 100644 index 000000000..0f5f152f6 --- /dev/null +++ b/docs/griptape-framework/structures/src/tasks_2.py @@ -0,0 +1,10 @@ +from griptape.structures import Agent +from griptape.tasks import PromptTask + +agent = Agent() +agent.add_task( + # take the first argument from the agent `run` method + PromptTask("Respond to the following request: {{ args[0] }}"), +) + +agent.run("Write me a haiku") diff --git a/docs/griptape-framework/structures/src/tasks_3.py b/docs/griptape-framework/structures/src/tasks_3.py new file mode 100644 index 000000000..6584049d0 --- /dev/null +++ b/docs/griptape-framework/structures/src/tasks_3.py @@ -0,0 +1,10 @@ +from pathlib import Path + +from griptape.loaders import ImageLoader +from griptape.structures import Agent + +agent = Agent() + +image_artifact = ImageLoader().load(Path("tests/resources/mountain.jpg").read_bytes()) + +agent.run([image_artifact, "What's in this image?"]) diff --git a/docs/griptape-framework/structures/src/tasks_4.py b/docs/griptape-framework/structures/src/tasks_4.py new file mode 100644 index 000000000..cd73b3ada --- /dev/null +++ b/docs/griptape-framework/structures/src/tasks_4.py @@ -0,0 +1,13 @@ +from griptape.structures import Agent +from griptape.tasks import ToolkitTask +from griptape.tools import FileManagerTool, PromptSummaryTool, WebScraperTool + +agent = Agent() +agent.add_task( + ToolkitTask( + "Load https://www.griptape.ai, summarize it, and store it in a file called griptape.txt", + tools=[WebScraperTool(off_prompt=True), FileManagerTool(off_prompt=True), PromptSummaryTool(off_prompt=True)], + ), +) + +agent.run() diff --git a/docs/griptape-framework/structures/src/tasks_5.py b/docs/griptape-framework/structures/src/tasks_5.py new file mode 100644 index 000000000..a0d537aa7 --- /dev/null +++ b/docs/griptape-framework/structures/src/tasks_5.py @@ -0,0 +1,10 @@ +from griptape.structures import Agent +from griptape.tasks import ToolTask +from griptape.tools import CalculatorTool + +# Initialize the agent and add a task +agent = Agent() +agent.add_task(ToolTask(tool=CalculatorTool())) + +# Run the agent with a prompt +agent.run("Give me the answer for 5*4.") diff --git a/docs/griptape-framework/structures/src/tasks_6.py b/docs/griptape-framework/structures/src/tasks_6.py new file mode 100644 index 000000000..a1b84e44d --- /dev/null +++ b/docs/griptape-framework/structures/src/tasks_6.py @@ -0,0 +1,29 @@ +from griptape.drivers import OpenAiChatPromptDriver +from griptape.engines import CsvExtractionEngine +from griptape.structures import Agent +from griptape.tasks import ExtractionTask + +# Instantiate the CSV extraction engine +csv_extraction_engine = CsvExtractionEngine(prompt_driver=OpenAiChatPromptDriver(model="gpt-3.5-turbo")) + +# Define some unstructured data and columns +csv_data = """ +Alice, 28, lives in New York. +Bob, 35 lives in California. +Charlie is 40 and lives in Texas. +""" + +columns = ["Name", "Age", "Address"] + + +# Create an agent and add the ExtractionTask to it +agent = Agent() +agent.add_task( + ExtractionTask( + extraction_engine=csv_extraction_engine, + args={"column_names": columns}, + ) +) + +# Run the agent +agent.run(csv_data) diff --git a/docs/griptape-framework/structures/src/tasks_7.py b/docs/griptape-framework/structures/src/tasks_7.py new file mode 100644 index 000000000..909d00084 --- /dev/null +++ b/docs/griptape-framework/structures/src/tasks_7.py @@ -0,0 +1,29 @@ +from schema import Schema + +from griptape.drivers import OpenAiChatPromptDriver +from griptape.engines import JsonExtractionEngine +from griptape.structures import Agent +from griptape.tasks import ExtractionTask + +# Instantiate the json extraction engine +json_extraction_engine = JsonExtractionEngine( + prompt_driver=OpenAiChatPromptDriver(model="gpt-3.5-turbo"), +) + +# Define some unstructured data and a schema +json_data = """ +Alice (Age 28) lives in New York. +Bob (Age 35) lives in California. +""" +user_schema = Schema({"users": [{"name": str, "age": int, "location": str}]}).json_schema("UserSchema") + +agent = Agent() +agent.add_task( + ExtractionTask( + extraction_engine=json_extraction_engine, + args={"template_schema": user_schema}, + ) +) + +# Run the agent +agent.run(json_data) diff --git a/docs/griptape-framework/structures/src/tasks_8.py b/docs/griptape-framework/structures/src/tasks_8.py new file mode 100644 index 000000000..cad73adf0 --- /dev/null +++ b/docs/griptape-framework/structures/src/tasks_8.py @@ -0,0 +1,24 @@ +from griptape.structures import Agent +from griptape.tasks import TextSummaryTask + +# Create a new agent +agent = Agent() + +# Add the TextSummaryTask to the agent +agent.add_task(TextSummaryTask()) + + +# Run the agent +agent.run( + "Artificial Intelligence (AI) is a branch of computer science that deals with " + "creating machines capable of thinking and learning. It encompasses various fields " + "such as machine learning, neural networks, and deep learning. AI has the potential " + "to revolutionize many sectors, including healthcare, finance, and transportation. " + "Our life in this modern age depends largely on computers. It is almost impossible " + "to think about life without computers. We need computers in everything that we use " + "in our daily lives. So it becomes very important to make computers intelligent so " + "that our lives become easy. Artificial Intelligence is the theory and development " + "of computers, which imitates the human intelligence and senses, such as visual " + "perception, speech recognition, decision-making, and translation between languages." + " Artificial Intelligence has brought a revolution in the world of technology. " +) diff --git a/docs/griptape-framework/structures/src/tasks_9.py b/docs/griptape-framework/structures/src/tasks_9.py new file mode 100644 index 000000000..1033f1b2f --- /dev/null +++ b/docs/griptape-framework/structures/src/tasks_9.py @@ -0,0 +1,39 @@ +from griptape.artifacts import TextArtifact +from griptape.drivers import LocalVectorStoreDriver, OpenAiChatPromptDriver, OpenAiEmbeddingDriver +from griptape.engines.rag import RagEngine +from griptape.engines.rag.modules import PromptResponseRagModule, VectorStoreRetrievalRagModule +from griptape.engines.rag.stages import ResponseRagStage, RetrievalRagStage +from griptape.structures import Agent +from griptape.tasks import RagTask + +# Initialize Embedding Driver and Vector Store Driver +vector_store_driver = LocalVectorStoreDriver(embedding_driver=OpenAiEmbeddingDriver()) + +artifacts = [ + TextArtifact("Griptape builds AI-powered applications that connect securely to your enterprise data and APIs."), + TextArtifact("Griptape Agents provide incredible power and flexibility when working with large language models."), +] +vector_store_driver.upsert_text_artifacts({"griptape": artifacts}) + +# Instantiate the agent and add RagTask with the RagEngine +agent = Agent() +agent.add_task( + RagTask( + "Respond to the following query: {{ args[0] }}", + rag_engine=RagEngine( + retrieval_stage=RetrievalRagStage( + retrieval_modules=[ + VectorStoreRetrievalRagModule( + vector_store_driver=vector_store_driver, query_params={"namespace": "griptape", "top_n": 20} + ) + ] + ), + response_stage=ResponseRagStage( + response_modules=[PromptResponseRagModule(prompt_driver=OpenAiChatPromptDriver(model="gpt-4o"))] + ), + ), + ) +) + +# Run the agent with a query string +agent.run("Give me information about Griptape") diff --git a/docs/griptape-framework/structures/src/workflows_1.py b/docs/griptape-framework/structures/src/workflows_1.py new file mode 100644 index 000000000..e6e57bafb --- /dev/null +++ b/docs/griptape-framework/structures/src/workflows_1.py @@ -0,0 +1,34 @@ +from griptape.structures import Workflow +from griptape.tasks import PromptTask +from griptape.utils import StructureVisualizer + +world_task = PromptTask( + "Create a fictional world based on the following key words {{ keywords|join(', ') }}", + context={"keywords": ["fantasy", "ocean", "tidal lock"]}, + id="world", +) + + +def character_task(task_id: str, character_name: str) -> PromptTask: + return PromptTask( + "Based on the following world description create a character named {{ name }}:\n{{ parent_outputs['world'] }}", + context={"name": character_name}, + id=task_id, + parent_ids=["world"], + ) + + +scotty_task = character_task("scotty", "Scotty") +annie_task = character_task("annie", "Annie") + +story_task = PromptTask( + "Based on the following description of the world and characters, write a short story:\n{{ parent_outputs['world'] }}\n{{ parent_outputs['scotty'] }}\n{{ parent_outputs['annie'] }}", + id="story", + parent_ids=["world", "scotty", "annie"], +) + +workflow = Workflow(tasks=[world_task, story_task, scotty_task, annie_task, story_task]) + +print(StructureVisualizer(workflow).to_url()) + +workflow.run() diff --git a/docs/griptape-framework/structures/src/workflows_2.py b/docs/griptape-framework/structures/src/workflows_2.py new file mode 100644 index 000000000..2811166ca --- /dev/null +++ b/docs/griptape-framework/structures/src/workflows_2.py @@ -0,0 +1,14 @@ +from griptape.rules import Rule +from griptape.structures import Workflow +from griptape.tasks import PromptTask + +workflow = Workflow( + tasks=[ + PromptTask("Name an animal", id="animal"), + PromptTask("Describe {{ parent_outputs['animal'] }} with an adjective", id="adjective", parent_ids=["animal"]), + PromptTask("Name a {{ parent_outputs['adjective'] }} animal", id="new-animal", parent_ids=["adjective"]), + ], + rules=[Rule("output a single lowercase word")], +) + +workflow.run() diff --git a/docs/griptape-framework/structures/src/workflows_3.py b/docs/griptape-framework/structures/src/workflows_3.py new file mode 100644 index 000000000..d65e7fc53 --- /dev/null +++ b/docs/griptape-framework/structures/src/workflows_3.py @@ -0,0 +1,16 @@ +from griptape.rules import Rule +from griptape.structures import Workflow +from griptape.tasks import PromptTask + +workflow = Workflow( + tasks=[ + PromptTask("Name an animal", id="animal", child_ids=["adjective"]), + PromptTask( + "Describe {{ parent_outputs['animal'] }} with an adjective", id="adjective", child_ids=["new-animal"] + ), + PromptTask("Name a {{ parent_outputs['adjective'] }} animal", id="new-animal"), + ], + rules=[Rule("output a single lowercase word")], +) + +workflow.run() diff --git a/docs/griptape-framework/structures/src/workflows_4.py b/docs/griptape-framework/structures/src/workflows_4.py new file mode 100644 index 000000000..c6fcb077f --- /dev/null +++ b/docs/griptape-framework/structures/src/workflows_4.py @@ -0,0 +1,19 @@ +from griptape.rules import Rule +from griptape.structures import Workflow +from griptape.tasks import PromptTask + +workflow = Workflow( + tasks=[ + PromptTask("Name an animal", id="animal"), + PromptTask( + "Describe {{ parent_outputs['animal'] }} with an adjective", + id="adjective", + parent_ids=["animal"], + child_ids=["new-animal"], + ), + PromptTask("Name a {{ parent_outputs['adjective'] }} animal", id="new-animal"), + ], + rules=[Rule("output a single lowercase word")], +) + +workflow.run() diff --git a/docs/griptape-framework/structures/src/workflows_5.py b/docs/griptape-framework/structures/src/workflows_5.py new file mode 100644 index 000000000..53c30e76a --- /dev/null +++ b/docs/griptape-framework/structures/src/workflows_5.py @@ -0,0 +1,17 @@ +from griptape.rules import Rule +from griptape.structures import Workflow +from griptape.tasks import PromptTask + +animal_task = PromptTask("Name an animal", id="animal") +adjective_task = PromptTask("Describe {{ parent_outputs['animal'] }} with an adjective", id="adjective") +new_animal_task = PromptTask("Name a {{ parent_outputs['adjective'] }} animal", id="new-animal") + +adjective_task.add_parent(animal_task) +new_animal_task.add_parent(adjective_task) + +workflow = Workflow( + tasks=[animal_task, adjective_task, new_animal_task], + rules=[Rule("output a single lowercase word")], +) + +workflow.run() diff --git a/docs/griptape-framework/structures/src/workflows_6.py b/docs/griptape-framework/structures/src/workflows_6.py new file mode 100644 index 000000000..2f3467d83 --- /dev/null +++ b/docs/griptape-framework/structures/src/workflows_6.py @@ -0,0 +1,17 @@ +from griptape.rules import Rule +from griptape.structures import Workflow +from griptape.tasks import PromptTask + +animal_task = PromptTask("Name an animal", id="animal") +adjective_task = PromptTask("Describe {{ parent_outputs['animal'] }} with an adjective", id="adjective") +new_animal_task = PromptTask("Name a {{ parent_outputs['adjective'] }} animal", id="new-animal") + +animal_task.add_child(adjective_task) +adjective_task.add_child(new_animal_task) + +workflow = Workflow( + tasks=[animal_task, adjective_task, new_animal_task], + rules=[Rule("output a single lowercase word")], +) + +workflow.run() diff --git a/docs/griptape-framework/structures/src/workflows_7.py b/docs/griptape-framework/structures/src/workflows_7.py new file mode 100644 index 000000000..9ef291290 --- /dev/null +++ b/docs/griptape-framework/structures/src/workflows_7.py @@ -0,0 +1,18 @@ +from griptape.rules import Rule +from griptape.structures import Workflow +from griptape.tasks import PromptTask + +workflow = Workflow( + rules=[Rule("output a single lowercase word")], +) + +animal_task = PromptTask("Name an animal", id="animal", structure=workflow) +adjective_task = PromptTask( + "Describe {{ parent_outputs['animal'] }} with an adjective", id="adjective", structure=workflow +) +new_animal_task = PromptTask("Name a {{ parent_outputs['adjective'] }} animal", id="new-animal", structure=workflow) + +adjective_task.add_parent(animal_task) +adjective_task.add_child(new_animal_task) + +workflow.run() diff --git a/docs/griptape-framework/structures/src/workflows_8.py b/docs/griptape-framework/structures/src/workflows_8.py new file mode 100644 index 000000000..f8b8326ca --- /dev/null +++ b/docs/griptape-framework/structures/src/workflows_8.py @@ -0,0 +1,19 @@ +from griptape.rules import Rule +from griptape.structures import Workflow +from griptape.tasks import PromptTask + +animal_task = PromptTask("Name an animal", id="animal") +adjective_task = PromptTask( + "Describe {{ parent_outputs['animal'] }} with an adjective", id="adjective", parent_ids=["animal"] +) + + +new_animal_task = PromptTask("Name a {{ parent_outputs['adjective'] }} animal", id="new-animal") +new_animal_task.add_parent(adjective_task) + +workflow = Workflow( + tasks=[animal_task, adjective_task, new_animal_task], + rules=[Rule("output a single lowercase word")], +) + +workflow.run() diff --git a/docs/griptape-framework/structures/src/workflows_9.py b/docs/griptape-framework/structures/src/workflows_9.py new file mode 100644 index 000000000..7ecd9233e --- /dev/null +++ b/docs/griptape-framework/structures/src/workflows_9.py @@ -0,0 +1,23 @@ +from griptape.rules import Rule +from griptape.structures import Workflow +from griptape.tasks import PromptTask + +workflow = Workflow( + rules=[Rule("output a single lowercase word")], +) + +animal_task = PromptTask("Name an animal", id="animal") +adjective_task = PromptTask("Describe {{ parent_outputs['animal'] }} with an adjective", id="adjective") +color_task = PromptTask("Describe {{ parent_outputs['animal'] }} with a color", id="color") +new_animal_task = PromptTask("Name an animal described as: \n{{ parents_output_text }}", id="new-animal") + +# The following workflow runs animal_task, then (adjective_task, and color_task) +# in parallel, then finally new_animal_task. +# +# In other words, the output of animal_task is passed to both adjective_task and color_task +# and the outputs of adjective_task and color_task are then passed to new_animal_task. +workflow.add_task(animal_task) +workflow.add_task(new_animal_task) +workflow.insert_tasks(animal_task, [adjective_task, color_task], new_animal_task) + +workflow.run() diff --git a/docs/griptape-framework/structures/task-memory.md b/docs/griptape-framework/structures/task-memory.md index ea4a787f6..a3fc04dc5 100644 --- a/docs/griptape-framework/structures/task-memory.md +++ b/docs/griptape-framework/structures/task-memory.md @@ -22,114 +22,110 @@ When `off_prompt` is set to `True`, the Tool will store its output in Task Memor Lets look at a simple example where `off_prompt` is set to `False`: ```python -from griptape.structures import Agent -from griptape.tools import Calculator - -# Create an agent with the Calculator tool -agent = Agent( - tools=[Calculator(off_prompt=False)] -) - -agent.run("What is 10 raised to the power of 5?") +--8<-- "docs/griptape-framework/structures/src/task_memory_1.py" ``` ``` [04/26/24 13:06:42] INFO ToolkitTask 36b9dea13b9d479fb752014f41dca54c Input: What is the square root of 12345? [04/26/24 13:06:48] INFO Subtask a88c0feeaef6493796a9148ed68c9caf - Thought: To find the square root of 12345, I can use the Calculator action with the expression "12345 ** 0.5". - Actions: [{"name": "Calculator", "path": "calculate", "input": {"values": {"expression": "12345 ** 0.5"}}, "tag": "sqrt_12345"}] + Thought: To find the square root of 12345, I can use the CalculatorTool action with the expression "12345 ** 0.5". + Actions: [{"name": "CalculatorTool", "path": "calculate", "input": {"values": {"expression": "12345 ** 0.5"}}, "tag": "sqrt_12345"}] INFO Subtask a88c0feeaef6493796a9148ed68c9caf Response: 111.1080555135405 [04/26/24 13:06:49] INFO ToolkitTask 36b9dea13b9d479fb752014f41dca54c Output: The square root of 12345 is approximately 111.108. ``` -Since the result of the Calculator Tool is neither sensitive nor too large, we can set `off_prompt` to `False` and not use Task Memory. +Since the result of the CalculatorTool Tool is neither sensitive nor too large, we can set `off_prompt` to `False` and not use Task Memory. Let's explore what happens when `off_prompt` is set to `True`: ```python -from griptape.structures import Agent -from griptape.tools import Calculator - -# Create an agent with the Calculator tool -agent = Agent( - tools=[Calculator(off_prompt=True)] -) - -agent.run("What is 10 raised to the power of 5?") +--8<-- "docs/griptape-framework/structures/src/task_memory_2.py" ``` ``` [04/26/24 13:07:02] INFO ToolkitTask ecbb788d9830491ab72a8a2bbef5fb0a Input: What is the square root of 12345? [04/26/24 13:07:10] INFO Subtask 4700dc0c2e934d1a9af60a28bd770bc6 - Thought: To find the square root of a number, we can use the Calculator action with the expression "sqrt(12345)". However, the Calculator + Thought: To find the square root of a number, we can use the CalculatorTool action with the expression "sqrt(12345)". However, the CalculatorTool action only supports basic arithmetic operations and does not support the sqrt function. Therefore, we need to use the equivalent expression for square root which is raising the number to the power of 0.5. - Actions: [{"name": "Calculator", "path": "calculate", "input": {"values": {"expression": "12345**0.5"}}, "tag": "sqrt_calculation"}] + Actions: [{"name": "CalculatorTool", "path": "calculate", "input": {"values": {"expression": "12345**0.5"}}, "tag": "sqrt_calculation"}] INFO Subtask 4700dc0c2e934d1a9af60a28bd770bc6 - Response: Output of "Calculator.calculate" was stored in memory with memory_name "TaskMemory" and artifact_namespace + Response: Output of "CalculatorTool.calculate" was stored in memory with memory_name "TaskMemory" and artifact_namespace "6be74c5128024c0588eb9bee1fdb9aa5" [04/26/24 13:07:16] ERROR Subtask ecbb788d9830491ab72a8a2bbef5fb0a - Invalid action JSON: Or({Literal("name", description=""): 'Calculator', Literal("path", description="Can be used for computing simple + Invalid action JSON: Or({Literal("name", description=""): 'CalculatorTool', Literal("path", description="Can be used for computing simple numerical or algebraic calculations in Python"): 'calculate', Literal("input", description=""): {'values': Schema({Literal("expression", description="Arithmetic expression parsable in pure Python. Single line only. Don't use variables. Don't use any imports or external libraries"): })}, Literal("tag", description="Unique tag name for action execution."): }) did not validate {'name': 'Memory', 'path': 'get', 'input': {'memory_name': 'TaskMemory', 'artifact_namespace': '6be74c5128024c0588eb9bee1fdb9aa5'}, 'tag': 'get_sqrt_result'} Key 'name' error: - 'Calculator' does not match 'Memory' + 'CalculatorTool' does not match 'Memory' ...Output truncated for brevity... ``` When we set `off_prompt` to `True`, the Agent does not function as expected, even generating an error. This is because the Calculator output is being stored in Task Memory but the Agent has no way to access it. -To fix this, we need a [Tool that can read from Task Memory](#tools-that-can-read-from-task-memory) such as the `TaskMemoryClient`. +To fix this, we need a [Tool that can read from Task Memory](#tools-that-can-read-from-task-memory) such as the `PromptSummaryTool`. This is an example of [not providing a Task Memory compatible Tool](#not-providing-a-task-memory-compatible-tool). -## Task Memory Client +## Prompt Summary Tool -The [TaskMemoryClient](../../griptape-tools/official-tools/task-memory-client.md) is a Tool that allows an Agent to interact with Task Memory. It has the following methods: +The [PromptSummaryTool](../../griptape-tools/official-tools/prompt-summary-tool.md) is a Tool that allows an Agent to summarize the Artifacts in Task Memory. It has the following methods: -- `query`: Retrieve the content of an Artifact stored in Task Memory. -- `summarize`: Summarize the content of an Artifact stored in Task Memory. - -Let's add `TaskMemoryClient` to the Agent and run the same task. -Note that on the `TaskMemoryClient` we've set `off_prompt` to `False` so that the results of the query can be returned directly to the LLM. +Let's add `PromptSummaryTool` to the Agent and run the same task. +Note that on the `PromptSummaryTool` we've set `off_prompt` to `False` so that the results of the query can be returned directly to the LLM. If we had kept it as `True`, the results would have been stored back Task Memory which would've put us back to square one. See [Task Memory Looping](#task-memory-looping) for more information on this scenario. ```python -from griptape.structures import Agent -from griptape.tools import Calculator, TaskMemoryClient - -# Create an agent with the Calculator tool -agent = Agent(tools=[Calculator(off_prompt=True), TaskMemoryClient(off_prompt=False)]) - -agent.run("What is the square root of 12345?") +--8<-- "docs/griptape-framework/structures/src/task_memory_3.py" ``` ``` -[04/26/24 13:13:01] INFO ToolkitTask 5b46f9ef677c4b31906b48aba3f45e2c +[08/12/24 14:54:04] INFO ToolkitTask f7ebd8acc3d64e3ca9db82ef9ec4e65f Input: What is the square root of 12345? -[04/26/24 13:13:07] INFO Subtask 611d98ea5576430fbc63259420577ab2 - Thought: To find the square root of 12345, I can use the Calculator action with the expression "12345 ** 0.5". - Actions: [{"name": "Calculator", "path": "calculate", "input": {"values": {"expression": "12345 ** 0.5"}}, "tag": "sqrt_12345"}] -[04/26/24 13:13:08] INFO Subtask 611d98ea5576430fbc63259420577ab2 +[08/12/24 14:54:05] INFO Subtask 777693d039e74ed288f663742fdde2ea + Actions: [ + { + "tag": "call_DXSs19G27VOV7EmP3PoRwGZI", + "name": "Calculator", + "path": "calculate", + "input": { + "values": { + "expression": "12345 ** 0.5" + } + } + } + ] + INFO Subtask 777693d039e74ed288f663742fdde2ea Response: Output of "Calculator.calculate" was stored in memory with memory_name "TaskMemory" and artifact_namespace - "7554b69e1d414a469b8882e2266dcea1" -[04/26/24 13:13:15] INFO Subtask 32b9163a15644212be60b8fba07bd23b - Thought: The square root of 12345 has been calculated and stored in memory. I can retrieve this value using the TaskMemoryClient action with - the query path, providing the memory_name and artifact_namespace as input. - Actions: [{"tag": "retrieve_sqrt", "name": "TaskMemoryClient", "path": "query", "input": {"values": {"memory_name": "TaskMemory", - "artifact_namespace": "7554b69e1d414a469b8882e2266dcea1", "query": "What is the result of the calculation?"}}}] -[04/26/24 13:13:16] INFO Subtask 32b9163a15644212be60b8fba07bd23b - Response: The result of the calculation is 111.1080555135405. -[04/26/24 13:13:17] INFO ToolkitTask 5b46f9ef677c4b31906b48aba3f45e2c + "370853a8937f4dd7a9e923254459cff2" +[08/12/24 14:54:06] INFO Subtask c8394ca51f1f4ae1b715618a2c5c8120 + Actions: [ + { + "tag": "call_qqpsWEvAUGIcPLrwAHGuH6o3", + "name": "PromptSummaryTool", + "path": "summarize", + "input": { + "values": { + "summary": { + "memory_name": "TaskMemory", + "artifact_namespace": "370853a8937f4dd7a9e923254459cff2" + } + } + } + } + ] +[08/12/24 14:54:07] INFO Subtask c8394ca51f1f4ae1b715618a2c5c8120 + Response: The text contains a single numerical value: 111.1080555135405. +[08/12/24 14:54:08] INFO ToolkitTask f7ebd8acc3d64e3ca9db82ef9ec4e65f Output: The square root of 12345 is approximately 111.108. ``` -While this fixed the problem, it took a handful more steps than when we just had `Calculator()`. Something like a basic calculation is an instance of where [Task Memory may not be necessary](#task-memory-may-not-be-necessary). +While this fixed the problem, it took a handful more steps than when we just had `CalculatorTool()`. Something like a basic calculation is an instance of where [Task Memory may not be necessary](#task-memory-may-not-be-necessary). Let's look at a more complex example where Task Memory shines. ## Large Data @@ -137,15 +133,7 @@ Let's look at a more complex example where Task Memory shines. Let's say we want to query the contents of a very large webpage. ```python -from griptape.structures import Agent -from griptape.tools import WebScraper - -# Create an agent with the WebScraper tool -agent = Agent(tools=[WebScraper()]) - -agent.run( - "According to this page https://en.wikipedia.org/wiki/Elden_Ring, how many copies of Elden Ring have been sold?" -) +--8<-- "docs/griptape-framework/structures/src/task_memory_4.py" ``` When running this example, we get the following error: @@ -155,172 +143,128 @@ When running this example, we get the following error: Please reduce the length of the messages.", 'type': 'invalid_request_error', 'param': 'messages', 'code': 'context_length_exceeded'}} ``` -This is because the content of the webpage is too large to fit in the LLM's input token limit. We can fix this by storing the content in Task Memory, and then querying it with the `TaskMemoryClient`. -Note that we're setting `off_prompt` to `False` on the `TaskMemoryClient` so that the _queried_ content can be returned directly to the LLM. +This is because the content of the webpage is too large to fit in the LLM's input token limit. We can fix this by storing the content in Task Memory, and then querying it with the `QueryTool`. +Note that we're setting `off_prompt` to `False` on the `QueryTool` so that the _queried_ content can be returned directly to the LLM. ```python -from griptape.structures import Agent -from griptape.tools import WebScraper, TaskMemoryClient - -agent = Agent( - tools=[ - WebScraper(off_prompt=True), - TaskMemoryClient(off_prompt=False), - ] -) - -agent.run( - "According to this page https://en.wikipedia.org/wiki/Elden_Ring, how many copies of Elden Ring have been sold?" -) +--8<-- "docs/griptape-framework/structures/src/task_memory_5.py" ``` And now we get the expected output: ``` -[04/26/24 13:51:51] INFO ToolkitTask 7aca20f202df47a2b9848ed7025f9c21 +[08/12/24 14:56:18] INFO ToolkitTask d3ce58587dc944b0a30a205631b82944 Input: According to this page https://en.wikipedia.org/wiki/Elden_Ring, how many copies of Elden Ring have been sold? -[04/26/24 13:51:58] INFO Subtask 5b21d8ead32b4644abcd1e852bb5f512 - Thought: I need to scrape the content of the provided URL to find the information about how many copies of Elden Ring have been sold. - Actions: [{"name": "WebScraper", "path": "get_content", "input": {"values": {"url": "https://en.wikipedia.org/wiki/Elden_Ring"}}, "tag": - "scrape_elden_ring"}] -[04/26/24 13:52:04] INFO Subtask 5b21d8ead32b4644abcd1e852bb5f512 - Response: Output of "WebScraper.get_content" was stored in memory with memory_name "TaskMemory" and artifact_namespace - "2d4ebc7211074bb7be26613eb25d8fc1" -[04/26/24 13:52:11] INFO Subtask f12eb3d3b4924e4085808236b460b43d - Thought: Now that the webpage content is stored in memory, I need to query this memory to find the information about how many copies of Elden - Ring have been sold. - Actions: [{"tag": "query_sales", "name": "TaskMemoryClient", "path": "query", "input": {"values": {"memory_name": "TaskMemory", - "artifact_namespace": "2d4ebc7211074bb7be26613eb25d8fc1", "query": "How many copies of Elden Ring have been sold?"}}}] -[04/26/24 13:52:14] INFO Subtask f12eb3d3b4924e4085808236b460b43d - Response: Elden Ring sold 23 million copies by February 2024. -[04/26/24 13:52:15] INFO ToolkitTask 7aca20f202df47a2b9848ed7025f9c21 - Output: Elden Ring sold 23 million copies by February 2024. +[08/12/24 14:56:20] INFO Subtask 494850ec40fe474c83d48b5620c5dcbb + Actions: [ + { + "tag": "call_DGsOHC4AVxhV7RPVA7q3rATX", + "name": "WebScraperTool", + "path": "get_content", + "input": { + "values": { + "url": "https://en.wikipedia.org/wiki/Elden_Ring" + } + } + } + ] +[08/12/24 14:56:25] INFO Subtask 494850ec40fe474c83d48b5620c5dcbb + Response: Output of "WebScraperTool.get_content" was stored in memory with memory_name "TaskMemory" and artifact_namespace + "b9f53d6d9b35455aaf4d99719c1bfffa" +[08/12/24 14:56:26] INFO Subtask 8669ee523bb64550850566011bcd14e2 + Actions: [ + { + "tag": "call_DGsOHC4AVxhV7RPVA7q3rATX", + "name": "QueryTool", + "path": "search", + "input": { + "values": { + "query": "number of copies sold", + "content": { + "memory_name": "TaskMemory", + "artifact_namespace": "b9f53d6d9b35455aaf4d99719c1bfffa" + } + } + } + } + ] +[08/12/24 14:56:29] INFO Subtask 8669ee523bb64550850566011bcd14e2 + Response: "Elden Ring" sold 13.4 million copies worldwide by the end of March 2022 and 25 million by June 2024. The downloadable content (DLC) + "Shadow of the Erdtree" sold five million copies within three days of its release. +[08/12/24 14:56:30] INFO ToolkitTask d3ce58587dc944b0a30a205631b82944 + Output: Elden Ring sold 13.4 million copies worldwide by the end of March 2022 and 25 million by June 2024. ``` ## Sensitive Data Because Task Memory splits up the storage and retrieval of data, you can use different models for each step. -Here is an example where we use GPT-4 to orchestrate the Tools and store the data in Task Memory, and Amazon Bedrock's Titan model to query the raw content. -In this example, GPT-4 _never_ sees the contents of the page, only that it was stored in Task Memory. Even the query results generated by the Titan model are stored in Task Memory so that the `FileManager` can save the results to disk without GPT-4 ever seeing them. +Here is an example where we use GPT-4 to orchestrate the Tools and store the data in Task Memory, and Anthropic's Claude 3 Haiku model to query the raw content. +In this example, GPT-4 _never_ sees the contents of the page, only that it was stored in Task Memory. Even the query results generated by the Haiku model are stored in Task Memory so that the `FileManagerTool` can save the results to disk without GPT-4 ever seeing them. ```python -from griptape.artifacts import TextArtifact -from griptape.config import ( - OpenAiStructureConfig, -) -from griptape.drivers import ( - LocalVectorStoreDriver, - OpenAiChatPromptDriver, OpenAiEmbeddingDriver, -) -from griptape.engines.rag import RagEngine -from griptape.engines.rag.modules import VectorStoreRetrievalRagModule, PromptResponseRagModule -from griptape.engines.rag.stages import RetrievalRagStage, ResponseRagStage -from griptape.memory import TaskMemory -from griptape.memory.task.storage import TextArtifactStorage -from griptape.structures import Agent -from griptape.tools import FileManager, TaskMemoryClient, WebScraper - -vector_store_driver = LocalVectorStoreDriver(embedding_driver=OpenAiEmbeddingDriver()) - -agent = Agent( - config=OpenAiStructureConfig( - prompt_driver=OpenAiChatPromptDriver(model="gpt-4"), - ), - task_memory=TaskMemory( - artifact_storages={ - TextArtifact: TextArtifactStorage( - rag_engine=RagEngine( - retrieval_stage=RetrievalRagStage( - retrieval_modules=[ - VectorStoreRetrievalRagModule( - - vector_store_driver=vector_store_driver, - query_params={ - "namespace": "griptape", - "count": 20 - } - ) - ] - ), - response_stage=ResponseRagStage( - response_module=PromptResponseRagModule( - prompt_driver=OpenAiChatPromptDriver(model="gpt-4o") - ) - ) - ), - retrieval_rag_module_name="VectorStoreRetrievalRagModule", - vector_store_driver=vector_store_driver - ) - } - ), - tools=[ - WebScraper(off_prompt=True), - TaskMemoryClient(off_prompt=True, allowlist=["query"]), - FileManager(off_prompt=True), - ], -) - -agent.run( - "Use this page https://en.wikipedia.org/wiki/Elden_Ring to find how many copies of Elden Ring have been sold, and then save the result to a file." -) +--8<-- "docs/griptape-framework/structures/src/task_memory_6.py" ``` ``` -[06/21/24 16:00:01] INFO ToolkitTask 17f30ac14701490c8ef71508f420ea9f - Input: Use this page - https://en.wikipedia.org/wiki/Elden_Ring to find - how many copies of Elden Ring have been sold, and - then save the result to a file. -[06/21/24 16:00:05] INFO Subtask cb06889205334ec9afd7e97f7f231ab5 - Thought: First, I need to scrape the content of the - provided URL to find the information about how many - copies of Elden Ring have been sold. Then, I will - save this information to a file. - - Actions: [{"name": "WebScraper", "path": - "get_content", "input": {"values": {"url": - "https://en.wikipedia.org/wiki/Elden_Ring"}}, - "tag": "scrape_elden_ring"}] -[06/21/24 16:00:12] INFO Subtask cb06889205334ec9afd7e97f7f231ab5 - Response: Output of "WebScraper.get_content" was - stored in memory with memory_name "TaskMemory" and - artifact_namespace - "7e48bcff0da94ad3b06aa4e173f8f37b" -[06/21/24 16:00:17] INFO Subtask 56102d42475d413299ce52a0230506b7 - Thought: Now that the webpage content is stored in - memory, I need to query this memory to find the - information about how many copies of Elden Ring - have been sold. - Actions: [{"tag": "query_sales", "name": - "TaskMemoryClient", "path": "query", "input": - {"values": {"memory_name": "TaskMemory", - "artifact_namespace": - "7e48bcff0da94ad3b06aa4e173f8f37b", "query": "How - many copies of Elden Ring have been sold?"}}}] -[06/21/24 16:00:19] INFO Subtask 56102d42475d413299ce52a0230506b7 - Response: Output of "TaskMemoryClient.query" was - stored in memory with memory_name "TaskMemory" and - artifact_namespace - "9ecf4d7b7d0c46149dfc46ba236f178e" -[06/21/24 16:00:25] INFO Subtask ed2921791dcf46b68c9d8d2f8dbeddbd - Thought: Now that I have the sales information - stored in memory, I need to save this information - to a file. - Actions: [{"tag": "save_sales_info", "name": - "FileManager", "path": - "save_memory_artifacts_to_disk", "input": - {"values": {"dir_name": "sales_info", "file_name": - "elden_ring_sales.txt", "memory_name": - "TaskMemory", "artifact_namespace": - "9ecf4d7b7d0c46149dfc46ba236f178e"}}}] - INFO Subtask ed2921791dcf46b68c9d8d2f8dbeddbd - Response: Successfully saved memory artifacts to - disk -[06/21/24 16:00:27] INFO ToolkitTask 17f30ac14701490c8ef71508f420ea9f - Output: The information about how many copies of - Elden Ring have been sold has been successfully - saved to the file "elden_ring_sales.txt" in the - "sales_info" directory. +[08/12/24 14:55:21] INFO ToolkitTask 329b1abc760e4d30bbf23e349451d930 + Input: Use this page https://en.wikipedia.org/wiki/Elden_Ring to find how many copies of Elden Ring have been sold, and then save the result to + a file. +[08/12/24 14:55:23] INFO Subtask 26205b5623174424b618abafd886c4d8 + Actions: [ + { + "tag": "call_xMK0IyFZFbjlTapK7AA6kbNq", + "name": "WebScraperTool", + "path": "get_content", + "input": { + "values": { + "url": "https://en.wikipedia.org/wiki/Elden_Ring" + } + } + } + ] +[08/12/24 14:55:28] INFO Subtask 26205b5623174424b618abafd886c4d8 + Response: Output of "WebScraperTool.get_content" was stored in memory with memory_name "TaskMemory" and artifact_namespace + "44b8f230645148d0b8d44354c0f2df5b" +[08/12/24 14:55:31] INFO Subtask d8b4cf297a0d4d9db04e4f8e63b746c8 + Actions: [ + { + "tag": "call_Oiqq6oI20yqmdNrH9Mawb2fS", + "name": "QueryTool", + "path": "search", + "input": { + "values": { + "query": "copies sold", + "content": { + "memory_name": "TaskMemory", + "artifact_namespace": "44b8f230645148d0b8d44354c0f2df5b" + } + } + } + } + ] +[08/12/24 14:55:34] INFO Subtask d8b4cf297a0d4d9db04e4f8e63b746c8 + Response: Output of "QueryTool.search" was stored in memory with memory_name "TaskMemory" and artifact_namespace + "fd828ddd629e4974a7837f9dfde65954" +[08/12/24 14:55:38] INFO Subtask 7aafcb3fb0d845858e2fcf9b8dc8a7ec + Actions: [ + { + "tag": "call_nV1DIPAEhUEAVMCjXND0pKoS", + "name": "FileManagerTool", + "path": "save_memory_artifacts_to_disk", + "input": { + "values": { + "dir_name": "results", + "file_name": "elden_ring_sales.txt", + "memory_name": "TaskMemory", + "artifact_namespace": "fd828ddd629e4974a7837f9dfde65954" + } + } + } + ] + INFO Subtask 7aafcb3fb0d845858e2fcf9b8dc8a7ec + Response: Successfully saved memory artifacts to disk +[08/12/24 14:55:40] INFO ToolkitTask 329b1abc760e4d30bbf23e349451d930 + Output: Successfully saved the number of copies sold of Elden Ring to a file named "elden_ring_sales.txt" in the "results" directory. ``` ## Tools That Can Read From Task Memory @@ -329,11 +273,10 @@ As seen in the previous example, certain Tools are designed to read directly fro Today, these include: -- [TaskMemoryClient](../../griptape-tools/official-tools/task-memory-client.md) -- [FileManager](../../griptape-tools/official-tools/file-manager.md) -- [AwsS3Client](../../griptape-tools/official-tools/aws-s3-client.md) -- [GoogleDriveClient](../../griptape-tools/official-tools/google-drive-client.md) -- [GoogleDocsClient](../../griptape-tools/official-tools/google-docs-client.md) +- [PromptSummaryTool](../../griptape-tools/official-tools/prompt-summary-tool.md) +- [ExtractionTool](../../griptape-tools/official-tools/extraction-tool.md) +- [RagClient](../../griptape-tools/official-tools/rag-tool.md) +- [FileManagerTool](../../griptape-tools/official-tools/file-manager-tool.md) ## Task Memory Considerations @@ -348,16 +291,7 @@ By default, Griptape will store `TextArtifact`'s, `BlobArtifact`'s in Task Memor When using Task Memory, make sure that you have at least one Tool that can read from Task Memory. If you don't, the data stored in Task Memory will be inaccessible to the Agent and it may hallucinate Tool Activities. ```python -from griptape.structures import Agent -from griptape.tools import WebScraper - -agent = Agent( - tools=[ - WebScraper(off_prompt=True) # `off_prompt=True` will store the data in Task Memory - # Missing a Tool that can read from Task Memory - ] -) -agent.run("According to this page https://en.wikipedia.org/wiki/San_Francisco, what is the population of San Francisco?") +--8<-- "docs/griptape-framework/structures/src/task_memory_7.py" ``` ### Task Memory Looping @@ -365,30 +299,13 @@ An improper configuration of Tools can lead to the LLM using the Tools in a loop This can create a loop where the same data is stored and queried over and over again. ```python -from griptape.structures import Agent -from griptape.tools import WebScraper, TaskMemoryClient - -agent = Agent( - tools=[ - WebScraper(off_prompt=True), # This tool will store the data in Task Memory - TaskMemoryClient(off_prompt=True) # This tool will store the data back in Task Memory with no way to get it out - ] -) -agent.run("According to this page https://en.wikipedia.org/wiki/Dark_forest_hypothesis, what is the Dark Forest Hypothesis?") +--8<-- "docs/griptape-framework/structures/src/task_memory_8.py" ``` ### Task Memory May Not Be Necessary Task Memory may not be necessary for all use cases. If the data returned by a Tool is not sensitive, not too large, and does not need to be acted upon by another Tool, you can leave the default of `off_prompt` to `False` and return the data directly to the LLM. ```python -from griptape.structures import Agent -from griptape.tools import Calculator - -agent = Agent( - tools=[ - Calculator() # Default value of `off_prompt=False` will return the data directly to the LLM - ] -) -agent.run("What is 10 ^ 3, 55 / 23, and 12345 * 0.5?") +--8<-- "docs/griptape-framework/structures/src/task_memory_9.py" ``` diff --git a/docs/griptape-framework/structures/tasks.md b/docs/griptape-framework/structures/tasks.md index 6d479578b..f91937ec0 100644 --- a/docs/griptape-framework/structures/tasks.md +++ b/docs/griptape-framework/structures/tasks.md @@ -18,19 +18,7 @@ Within the [input](../../reference/griptape/tasks/base_text_input_task.md#gripta Additional [context](../../reference/griptape/structures/structure.md#griptape.structures.structure.Structure.context) variables may be added based on the Structure running the task. ```python -from griptape.structures import Agent -from griptape.tasks import PromptTask - - -agent = Agent() -agent.add_task( - PromptTask( - "Respond to the user's following question '{{ args[0] }}' in the language '{{preferred_language}}' and tone '{{tone}}'.", - context={"preferred_language": "ENGLISH", "tone": "PLAYFUL"}, - ) -) - -agent.run("How do I bake a cake?") +--8<-- "docs/griptape-framework/structures/src/tasks_1.py" ``` ``` @@ -70,17 +58,7 @@ agent.run("How do I bake a cake?") For general purpose prompting, use the [PromptTask](../../reference/griptape/tasks/prompt_task.md): ```python -from griptape.tasks import PromptTask -from griptape.structures import Agent - - -agent = Agent() -agent.add_task( - # take the first argument from the agent `run` method - PromptTask("Respond to the following request: {{ args[0] }}"), -) - -agent.run("Write me a haiku") +--8<-- "docs/griptape-framework/structures/src/tasks_2.py" ``` ``` @@ -96,14 +74,7 @@ agent.run("Write me a haiku") If the model supports it, you can also pass image inputs: ```python -from griptape.structures import Agent -from griptape.loaders import ImageLoader - -agent = Agent() -with open("tests/resources/mountain.jpg", "rb") as f: - image_artifact = ImageLoader().load(f.read()) - -agent.run([image_artifact, "What's in this image?"]) +--8<-- "docs/griptape-framework/structures/src/tasks_3.py" ``` ``` @@ -124,56 +95,67 @@ To use [Griptape Tools](../../griptape-framework/tools/index.md), use a [Toolkit This Task takes in one or more Tools which the LLM will decide to use through Chain of Thought (CoT) reasoning. Because this Task uses CoT, it is recommended to only use with very capable models. ```python -from griptape.tasks import ToolkitTask -from griptape.structures import Agent -from griptape.tools import WebScraper, FileManager, TaskMemoryClient - - -agent = Agent() -agent.add_task( - ToolkitTask( - "Load https://www.griptape.ai, summarize it, and store it in a file called griptape.txt", - tools=[WebScraper(off_prompt=True), FileManager(off_prompt=True), TaskMemoryClient(off_prompt=True)] - ), -) - -agent.run() +--8<-- "docs/griptape-framework/structures/src/tasks_4.py" ``` ``` -[09/08/23 11:14:55] INFO ToolkitTask 22af656c6ad643e188fe80f9378dfff9 +[08/12/24 15:16:30] INFO ToolkitTask f5b44fe1dadc4e6688053df71d97e0de Input: Load https://www.griptape.ai, summarize it, and store it in a file called griptape.txt -[09/08/23 11:15:02] INFO Subtask 7a6356470e6a4b08b61edc5591b37f0c - Thought: The first step is to load the webpage using the WebScraper tool's get_content activity. - - Action: {"name": "WebScraper", "path": "get_content", "input": {"values": {"url": - "https://www.griptape.ai"}}} -[09/08/23 11:15:03] INFO Subtask 7a6356470e6a4b08b61edc5591b37f0c - Response: Output of "WebScraper.get_content" was stored in memory with memory_name "TaskMemory" and - artifact_namespace "2b50373849d140f698ba8071066437ee" -[09/08/23 11:15:11] INFO Subtask a22a7e4ebf594b4b895fcbe8a95c1dd3 - Thought: Now that the webpage content is stored in memory, I can use the TaskMemory tool's summarize activity - to summarize it. - Action: {"name": "TaskMemoryClient", "path": "summarize", "input": {"values": {"memory_name": "TaskMemory", "artifact_namespace": "2b50373849d140f698ba8071066437ee"}}} -[09/08/23 11:15:15] INFO Subtask a22a7e4ebf594b4b895fcbe8a95c1dd3 - Response: Griptape is an open source framework that allows developers to build and deploy AI applications - using large language models (LLMs). It provides the ability to create conversational and event-driven apps that - can access and manipulate data securely. Griptape enforces structures like sequential pipelines and DAG-based - workflows for predictability, while also allowing for creativity by safely prompting LLMs with external APIs and - data stores. The framework can be used to create AI systems that operate across both dimensions. Griptape Cloud - is a managed platform for deploying and managing AI apps, and it offers features like scheduling and connecting - to data stores and APIs. -[09/08/23 11:15:27] INFO Subtask 7afb3d44d0114b7f8ef2dac4314a8e90 - Thought: Now that I have the summary, I can use the FileManager tool's save_file_to_disk activity to store the - summary in a file named griptape.txt. - Action: {"name": "FileManager", "path": "save_file_to_disk", "input": {"values": - {"memory_name": "TaskMemory", "artifact_namespace": "2b50373849d140f698ba8071066437ee", "path": - "griptape.txt"}}} - INFO Subtask 7afb3d44d0114b7f8ef2dac4314a8e90 - Response: saved successfully -[09/08/23 11:15:31] INFO ToolkitTask 22af656c6ad643e188fe80f9378dfff9 - Output: The summary of the webpage https://www.griptape.ai has been successfully stored in a file named - griptape.txt. +[08/12/24 15:16:32] INFO Subtask a4483eddfbe84129b0f4c04ef0f5d695 + Actions: [ + { + "tag": "call_AFeOL9MGhZ4mPFCULcBEm4NQ", + "name": "WebScraperTool", + "path": "get_content", + "input": { + "values": { + "url": "https://www.griptape.ai" + } + } + } + ] + INFO Subtask a4483eddfbe84129b0f4c04ef0f5d695 + Response: Output of "WebScraperTool.get_content" was stored in memory with memory_name "TaskMemory" and artifact_namespace + "c6a6bcfc16f34481a068108aeaa6838e" +[08/12/24 15:16:33] INFO Subtask ee5f11666ded4dc39b94e4c59d18fbc7 + Actions: [ + { + "tag": "call_aT7DX0YSQPmOcnumWXrGoMNt", + "name": "PromptSummaryTool", + "path": "summarize", + "input": { + "values": { + "summary": { + "memory_name": "TaskMemory", + "artifact_namespace": "c6a6bcfc16f34481a068108aeaa6838e" + } + } + } + } + ] +[08/12/24 15:16:37] INFO Subtask ee5f11666ded4dc39b94e4c59d18fbc7 + Response: Output of "PromptSummaryTool.summarize" was stored in memory with memory_name "TaskMemory" and artifact_namespace + "669d29a704444176be93d09d014298df" +[08/12/24 15:16:38] INFO Subtask d9b2dd9f96d841f49f5d460e33905183 + Actions: [ + { + "tag": "call_QgMk1M1UuD6DAnxjfQz1MH6X", + "name": "FileManagerTool", + "path": "save_memory_artifacts_to_disk", + "input": { + "values": { + "dir_name": ".", + "file_name": "griptape.txt", + "memory_name": "TaskMemory", + "artifact_namespace": "669d29a704444176be93d09d014298df" + } + } + } + ] + INFO Subtask d9b2dd9f96d841f49f5d460e33905183 + Response: Successfully saved memory artifacts to disk +[08/12/24 15:16:39] INFO ToolkitTask f5b44fe1dadc4e6688053df71d97e0de + Output: The content from https://www.griptape.ai has been summarized and stored in a file called `griptape.txt`. ``` ## Tool Task @@ -182,16 +164,7 @@ Another way to use [Griptape Tools](../../griptape-framework/tools/index.md), is This Task takes in a single Tool which the LLM will use without Chain of Thought (CoT) reasoning. Because this Task does not use CoT, it is better suited for less capable models. ```python -from griptape.structures import Agent -from griptape.tasks import ToolTask -from griptape.tools import Calculator - -# Initialize the agent and add a task -agent = Agent() -agent.add_task(ToolTask(tool=Calculator())) - -# Run the agent with a prompt -agent.run("Give me the answer for 5*4.") +--8<-- "docs/griptape-framework/structures/src/tasks_5.py" ``` ``` @@ -200,7 +173,7 @@ agent.run("Give me the answer for 5*4.") [10/20/23 14:20:29] INFO Subtask a9a9ad7be2bf465fa82bd350116fabe4 Action: { - "name": "Calculator", + "name": "CalculatorTool", "path": "calculate", "input": { "values": { @@ -223,37 +196,7 @@ This Task takes an [Extraction Engine](../../griptape-framework/engines/extracti ### CSV Extraction ```python -from griptape.drivers import OpenAiChatPromptDriver -from griptape.tasks import ExtractionTask -from griptape.structures import Agent -from griptape.engines import CsvExtractionEngine - -# Instantiate the CSV extraction engine -csv_extraction_engine = CsvExtractionEngine( - prompt_driver=OpenAiChatPromptDriver(model="gpt-3.5-turbo") -) - -# Define some unstructured data and columns -csv_data = """ -Alice, 28, lives in New York. -Bob, 35 lives in California. -Charlie is 40 and lives in Texas. -""" - -columns = ["Name", "Age", "Address"] - - -# Create an agent and add the ExtractionTask to it -agent = Agent() -agent.add_task( - ExtractionTask( - extraction_engine=csv_extraction_engine, - args={"column_names": columns}, - ) -) - -# Run the agent -agent.run(csv_data) +--8<-- "docs/griptape-framework/structures/src/tasks_6.py" ``` ``` [12/19/23 10:33:11] INFO ExtractionTask e87fb457edf8423ab8a78583badd7a11 @@ -272,37 +215,7 @@ agent.run(csv_data) ### JSON Extraction ```python -from schema import Schema - -from griptape.drivers import OpenAiChatPromptDriver -from griptape.tasks import ExtractionTask -from griptape.structures import Agent -from griptape.engines import JsonExtractionEngine - -# Instantiate the json extraction engine -json_extraction_engine = JsonExtractionEngine( - prompt_driver=OpenAiChatPromptDriver(model="gpt-3.5-turbo"), -) - -# Define some unstructured data and a schema -json_data = """ -Alice (Age 28) lives in New York. -Bob (Age 35) lives in California. -""" -user_schema = Schema( - {"users": [{"name": str, "age": int, "location": str}]} -).json_schema("UserSchema") - -agent = Agent() -agent.add_task( - ExtractionTask( - extraction_engine=json_extraction_engine, - args={"template_schema": user_schema}, - ) -) - -# Run the agent -agent.run(json_data) +--8<-- "docs/griptape-framework/structures/src/tasks_7.py" ``` ``` [12/19/23 10:37:41] INFO ExtractionTask 3315cc77f94943a2a2dceccfe44f6a67 @@ -321,30 +234,7 @@ To summarize a text, use the [TextSummaryTask](../../reference/griptape/tasks/te This Task takes an [Summarization Engine](../../griptape-framework/engines/summary-engines.md), and a set of arguments to the engine. ```python -from griptape.structures import Agent -from griptape.tasks import TextSummaryTask - -# Create a new agent -agent = Agent() - -# Add the TextSummaryTask to the agent -agent.add_task(TextSummaryTask()) - - -# Run the agent -agent.run( - "Artificial Intelligence (AI) is a branch of computer science that deals with " - "creating machines capable of thinking and learning. It encompasses various fields " - "such as machine learning, neural networks, and deep learning. AI has the potential " - "to revolutionize many sectors, including healthcare, finance, and transportation. " - "Our life in this modern age depends largely on computers. It is almost impossible " - "to think about life without computers. We need computers in everything that we use " - "in our daily lives. So it becomes very important to make computers intelligent so " - "that our lives become easy. Artificial Intelligence is the theory and development " - "of computers, which imitates the human intelligence and senses, such as visual " - "perception, speech recognition, decision-making, and translation between languages." - " Artificial Intelligence has brought a revolution in the world of technology. " -) +--8<-- "docs/griptape-framework/structures/src/tasks_8.py" ``` ``` @@ -386,51 +276,7 @@ To query text, use the [RagTask](../../reference/griptape/tasks/rag_task.md). This task takes a [RAG Engine](../../griptape-framework/engines/rag-engines.md), and a set of arguments specific to the engine. ```python -from griptape.structures import Agent -from griptape.tasks import RagTask -from griptape.drivers import LocalVectorStoreDriver, OpenAiEmbeddingDriver, OpenAiChatPromptDriver -from griptape.artifacts import TextArtifact -from griptape.engines.rag import RagEngine -from griptape.engines.rag.modules import VectorStoreRetrievalRagModule, PromptResponseRagModule -from griptape.engines.rag.stages import RetrievalRagStage, ResponseRagStage - -# Initialize Embedding Driver and Vector Store Driver -vector_store_driver = LocalVectorStoreDriver(embedding_driver=OpenAiEmbeddingDriver()) - -artifacts = [ - TextArtifact("Griptape builds AI-powered applications that connect securely to your enterprise data and APIs."), - TextArtifact("Griptape Agents provide incredible power and flexibility when working with large language models.") -] -vector_store_driver.upsert_text_artifacts({"griptape": artifacts}) - -# Instantiate the agent and add RagTask with the RagEngine -agent = Agent() -agent.add_task( - RagTask( - "Respond to the following query: {{ args[0] }}", - rag_engine=RagEngine( - retrieval_stage=RetrievalRagStage( - retrieval_modules=[ - VectorStoreRetrievalRagModule( - vector_store_driver=vector_store_driver, - query_params={ - "namespace": "griptape", - "top_n": 20 - } - ) - ] - ), - response_stage=ResponseRagStage( - response_module=PromptResponseRagModule( - prompt_driver=OpenAiChatPromptDriver(model="gpt-4o") - ) - ) - ), - ) -) - -# Run the agent with a query string -agent.run("Give me information about Griptape") +--8<-- "docs/griptape-framework/structures/src/tasks_9.py" ``` ## Code Execution Task @@ -439,29 +285,7 @@ To execute an arbitrary Python function, use the [CodeExecutionTask](../../refer This task takes a python function, and authors can elect to return a custom artifact. ```python -from griptape.structures import Pipeline -from griptape.tasks import CodeExecutionTask, PromptTask -from griptape.artifacts import BaseArtifact, TextArtifact - - -def character_counter(task: CodeExecutionTask) -> BaseArtifact: - result = len(task.input) - # For functions that don't need to return anything, we recommend returning task.input - return TextArtifact(str(result)) - - -# Instantiate the pipeline -pipeline = Pipeline() - -pipeline.add_tasks( - - # take the first argument from the pipeline `run` method - CodeExecutionTask(run_fn=character_counter), - # # take the output from the previous task and insert it into the prompt - PromptTask("{{args[0]}} using {{ parent_output }} characters") -) - -pipeline.run("Write me a line in a poem") +--8<-- "docs/griptape-framework/structures/src/tasks_10.py" ``` ``` @@ -480,44 +304,14 @@ pipeline.run("Write me a line in a poem") To generate an image, use one of the following [Image Generation Tasks](../../reference/griptape/tasks/index.md). All Image Generation Tasks accept an [Image Generation Engine](../engines/image-generation-engines.md) configured to use an [Image Generation Driver](../drivers/image-generation-drivers.md). -All successful Image Generation Tasks will always output an [Image Artifact](../data/artifacts.md#imageartifact). Each task can be configured to additionally write the generated image to disk by providing either the `output_file` or `output_dir` field. The `output_file` field supports file names in the current directory (`my_image.png`), relative directory prefixes (`images/my_image.png`), or absolute paths (`/usr/var/my_image.png`). By setting `output_dir`, the task will generate a file name and place the image in the requested directory. +All successful Image Generation Tasks will always output an [Image Artifact](../data/artifacts.md#image). Each task can be configured to additionally write the generated image to disk by providing either the `output_file` or `output_dir` field. The `output_file` field supports file names in the current directory (`my_image.png`), relative directory prefixes (`images/my_image.png`), or absolute paths (`/usr/var/my_image.png`). By setting `output_dir`, the task will generate a file name and place the image in the requested directory. ### Prompt Image Generation Task The [Prompt Image Generation Task](../../reference/griptape/tasks/prompt_image_generation_task.md) generates an image from a text prompt. ```python -from griptape.engines import PromptImageGenerationEngine -from griptape.drivers import OpenAiImageGenerationDriver -from griptape.tasks import PromptImageGenerationTask -from griptape.structures import Pipeline - - -# Create a driver configured to use OpenAI's DALL-E 3 model. -driver = OpenAiImageGenerationDriver( - model="dall-e-3", - quality="hd", - style="natural", -) - -# Create an engine configured to use the driver. -engine = PromptImageGenerationEngine( - image_generation_driver=driver, -) - -# Instantiate a pipeline. -pipeline = Pipeline() - -# Add a PromptImageGenerationTask to the pipeline. -pipeline.add_tasks( - PromptImageGenerationTask( - input="{{ args[0] }}", - image_generation_engine=engine, - output_dir="images/", - ) -) - -pipeline.run("An image of a mountain on a summer day") +--8<-- "docs/griptape-framework/structures/src/tasks_11.py" ``` ### Variation Image Generation Task @@ -525,42 +319,7 @@ pipeline.run("An image of a mountain on a summer day") The [Variation Image Generation Task](../../reference/griptape/tasks/variation_image_generation_task.md) generates an image using an input image and a text prompt. The input image is used as a basis for generating a new image as requested by the text prompt. ```python -from griptape.engines import VariationImageGenerationEngine -from griptape.drivers import AmazonBedrockImageGenerationDriver, \ - BedrockStableDiffusionImageGenerationModelDriver -from griptape.tasks import VariationImageGenerationTask -from griptape.loaders import ImageLoader -from griptape.structures import Pipeline - - -# Create a driver configured to use Stable Diffusion via Bedrock. -driver = AmazonBedrockImageGenerationDriver( - image_generation_model_driver=BedrockStableDiffusionImageGenerationModelDriver(), - model="stability.stable-diffusion-xl-v0", -) - -# Create an engine configured to use the driver. -engine = VariationImageGenerationEngine( - image_generation_driver=driver, -) - -# Load input image artifact. -with open("tests/resources/mountain.png", "rb") as f: - image_artifact = ImageLoader().load(f.read()) - -# Instantiate a pipeline. -pipeline = Pipeline() - -# Add a VariationImageGenerationTask to the pipeline. -pipeline.add_task( - VariationImageGenerationTask( - input=("{{ args[0] }}", image_artifact), - image_generation_engine=engine, - output_dir="images/", - ) -) - -pipeline.run("An image of a mountain landscape on a snowy winter day") +--8<-- "docs/griptape-framework/structures/src/tasks_12.py" ``` ### Inpainting Image Generation Task @@ -568,45 +327,7 @@ pipeline.run("An image of a mountain landscape on a snowy winter day") The [Inpainting Image Generation Task](../../reference/griptape/tasks/inpainting_image_generation_task.md) generates an image using an input image, a mask image, and a text prompt. The input image will be modified within the bounds of the mask image as requested by the text prompt. ```python -from griptape.engines import InpaintingImageGenerationEngine -from griptape.drivers import AmazonBedrockImageGenerationDriver, \ - BedrockStableDiffusionImageGenerationModelDriver -from griptape.tasks import InpaintingImageGenerationTask -from griptape.loaders import ImageLoader -from griptape.structures import Pipeline - - -# Create a driver configured to use Stable Diffusion via Bedrock. -driver = AmazonBedrockImageGenerationDriver( - image_generation_model_driver=BedrockStableDiffusionImageGenerationModelDriver(), - model="stability.stable-diffusion-xl-v0", -) - -# Create an engine configured to use the driver. -engine = InpaintingImageGenerationEngine( - image_generation_driver=driver, -) - -# Load input image artifacts. -with open("tests/resources/mountain.png", "rb") as f: - image_artifact = ImageLoader().load(f.read()) - -with open("tests/resources/mountain-mask.png", "rb") as f: - mask_artifact = ImageLoader().load(f.read()) - -# Instantiate a pipeline. -pipeline = Pipeline() - -# Add an InpaintingImageGenerationTask to the pipeline. -pipeline.add_task( - InpaintingImageGenerationTask( - input=("{{ args[0] }}", image_artifact, mask_artifact), - image_generation_engine=engine, - output_dir="images/" - ) -) - -pipeline.run("An image of a castle built into the side of a mountain") +--8<-- "docs/griptape-framework/structures/src/tasks_13.py" ``` ### Outpainting Image Generation Task @@ -614,87 +335,17 @@ pipeline.run("An image of a castle built into the side of a mountain") The [Outpainting Image Generation Task](../../reference/griptape/tasks/outpainting_image_generation_task.md) generates an image using an input image, a mask image, and a text prompt. The input image will be modified outside the bounds of a mask image as requested by the text prompt. ```python -from griptape.engines import OutpaintingImageGenerationEngine -from griptape.drivers import AmazonBedrockImageGenerationDriver, \ - BedrockStableDiffusionImageGenerationModelDriver -from griptape.tasks import OutpaintingImageGenerationTask -from griptape.loaders import ImageLoader -from griptape.structures import Pipeline - - -# Create a driver configured to use Stable Diffusion via Bedrock. -driver = AmazonBedrockImageGenerationDriver( - image_generation_model_driver=BedrockStableDiffusionImageGenerationModelDriver(), - model="stability.stable-diffusion-xl-v0", -) - -# Create an engine configured to use the driver. -engine = OutpaintingImageGenerationEngine( - image_generation_driver=driver, -) - -# Load input image artifacts. -with open("tests/resources/mountain.png", "rb") as f: - image_artifact = ImageLoader().load(f.read()) - -with open("tests/resources/mountain-mask.png", "rb") as f: - mask_artifact = ImageLoader().load(f.read()) - -# Instantiate a pipeline. -pipeline = Pipeline() - -# Add an OutpaintingImageGenerationTask to the pipeline. -pipeline.add_task( - OutpaintingImageGenerationTask( - input=("{{ args[0] }}", image_artifact, mask_artifact), - image_generation_engine=engine, - output_dir="images/", - ) -) - -pipeline.run("An image of a mountain shrouded by clouds") +--8<-- "docs/griptape-framework/structures/src/tasks_14.py" ``` ## Image Query Task The [Image Query Task](../../reference/griptape/tasks/image_query_task.md) performs a natural language query on one or more input images. This Task uses an [Image Query Engine](../engines/image-query-engines.md) configured with an [Image Query Driver](../drivers/image-query-drivers.md) to perform the query. The functionality provided by this Task depend on the capabilities of the model provided by the Driver. -This Task accepts two inputs: a query (represented by either a string or a [Text Artifact](../data/artifacts.md#textartifact)) and a list of [Image Artifacts](../data/artifacts.md#imageartifact) or a Callable returning these two values. +This Task accepts two inputs: a query (represented by either a string or a [Text Artifact](../data/artifacts.md#text)) and a list of [Image Artifacts](../data/artifacts.md#image) or a Callable returning these two values. ```python -from griptape.engines import ImageQueryEngine -from griptape.drivers import OpenAiImageQueryDriver -from griptape.tasks import ImageQueryTask -from griptape.loaders import ImageLoader -from griptape.structures import Pipeline - -# Create a driver configured to use OpenAI's GPT-4 Vision model. -driver = OpenAiImageQueryDriver( - model="gpt-4o", - max_tokens=100, -) - -# Create an engine configured to use the driver. -engine = ImageQueryEngine( - image_query_driver=driver, -) - -# Load the input image artifact. -with open("tests/resources/mountain.png", "rb") as f: - image_artifact = ImageLoader().load(f.read()) - -# Instantiate a pipeline. -pipeline = Pipeline() - -# Add an ImageQueryTask to the pipeline. -pipeline.add_task( - ImageQueryTask( - input=("{{ args[0] }}", [image_artifact]), - image_query_engine=engine, - ) -) - -pipeline.run("Describe the weather in the image") +--8<-- "docs/griptape-framework/structures/src/tasks_15.py" ``` ## Structure Run Task @@ -702,140 +353,7 @@ The [Structure Run Task](../../reference/griptape/tasks/structure_run_task.md) r This Task is useful for orchestrating multiple specialized Structures in a single run. Note that the input to the Task is a tuple of arguments that will be passed to the Structure. ```python -import os - -from griptape.rules import Rule, Ruleset -from griptape.structures import Agent, Pipeline -from griptape.tasks import StructureRunTask -from griptape.drivers import LocalStructureRunDriver, GoogleWebSearchDriver -from griptape.tools import ( - TaskMemoryClient, - WebScraper, - WebSearch, -) - - -def build_researcher(): - researcher = Agent( - tools=[ - WebSearch( - web_search_driver=GoogleWebSearchDriver( - api_key=os.environ["GOOGLE_API_KEY"], - search_id=os.environ["GOOGLE_API_SEARCH_ID"], - ), - ), - WebScraper( - off_prompt=True, - ), - TaskMemoryClient(off_prompt=False), - ], - rulesets=[ - Ruleset( - name="Position", - rules=[ - Rule( - value="Senior Research Analyst", - ) - ], - ), - Ruleset( - name="Objective", - rules=[ - Rule( - value="Uncover cutting-edge developments in AI and data science", - ) - ], - ), - Ruleset( - name="Background", - rules=[ - Rule( - value="""You work at a leading tech think tank., - Your expertise lies in identifying emerging trends. - You have a knack for dissecting complex data and presenting actionable insights.""" - ) - ], - ), - Ruleset( - name="Desired Outcome", - rules=[ - Rule( - value="Full analysis report in bullet points", - ) - ], - ), - ], - ) - - return researcher - - -def build_writer(): - writer = Agent( - input="Instructions: {{args[0]}}\nContext: {{args[1]}}", - rulesets=[ - Ruleset( - name="Position", - rules=[ - Rule( - value="Tech Content Strategist", - ) - ], - ), - Ruleset( - name="Objective", - rules=[ - Rule( - value="Craft compelling content on tech advancements", - ) - ], - ), - Ruleset( - name="Backstory", - rules=[ - Rule( - value="""You are a renowned Content Strategist, known for your insightful and engaging articles. - You transform complex concepts into compelling narratives.""" - ) - ], - ), - Ruleset( - name="Desired Outcome", - rules=[ - Rule( - value="Full blog post of at least 4 paragraphs", - ) - ], - ), - ], - ) - - return writer - - -team = Pipeline( - tasks=[ - StructureRunTask( - ( - """Perform a detailed examination of the newest developments in AI as of 2024. - Pinpoint major trends, breakthroughs, and their implications for various industries.""", - ), - driver=LocalStructureRunDriver(structure_factory_fn=build_researcher), - ), - StructureRunTask( - ( - """Utilize the gathered insights to craft a captivating blog - article showcasing the key AI innovations. - Ensure the content is engaging yet straightforward, appealing to a tech-aware readership. - Keep the tone appealing and use simple language to make it less technical.""", - "{{parent_output}}", - ), - driver=LocalStructureRunDriver(structure_factory_fn=build_writer), - ), - ], -) - -team.run() +--8<-- "docs/griptape-framework/structures/src/tasks_16.py" ``` ## Text to Speech Task @@ -843,27 +361,7 @@ team.run() This Task enables Structures to synthesize speech from text using [Text to Speech Engines](../../reference/griptape/engines/audio/text_to_speech_engine.md) and [Text to Speech Drivers](../../reference/griptape/drivers/text_to_speech/index.md). ```python -import os - -from griptape.drivers import ElevenLabsTextToSpeechDriver -from griptape.engines import TextToSpeechEngine -from griptape.tasks import TextToSpeechTask -from griptape.structures import Pipeline - - -driver = ElevenLabsTextToSpeechDriver( - api_key=os.getenv("ELEVEN_LABS_API_KEY"), - model="eleven_multilingual_v2", - voice="Matilda", -) - -task = TextToSpeechTask( - text_to_speech_engine=TextToSpeechEngine( - text_to_speech_driver=driver, - ), -) - -Pipeline(tasks=[task]).run("Generate audio from this text: 'Hello, world!'") +--8<-- "docs/griptape-framework/structures/src/tasks_17.py" ``` ## Audio Transcription Task @@ -871,23 +369,5 @@ Pipeline(tasks=[task]).run("Generate audio from this text: 'Hello, world!'") This Task enables Structures to transcribe speech from text using [Audio Transcription Engines](../../reference/griptape/engines/audio/audio_transcription_engine.md) and [Audio Transcription Drivers](../../reference/griptape/drivers/audio_transcription/index.md). ```python -from griptape.drivers import OpenAiAudioTranscriptionDriver -from griptape.engines import AudioTranscriptionEngine -from griptape.loaders import AudioLoader -from griptape.tasks import AudioTranscriptionTask -from griptape.structures import Pipeline -from griptape.utils import load_file - -driver = OpenAiAudioTranscriptionDriver( - model="whisper-1" -) - -task = AudioTranscriptionTask( - input=lambda _: AudioLoader().load(load_file("tests/resources/sentences2.wav")), - audio_transcription_engine=AudioTranscriptionEngine( - audio_transcription_driver=driver, - ), -) - -Pipeline(tasks=[task]).run() +--8<-- "docs/griptape-framework/structures/src/tasks_18.py" ``` diff --git a/docs/griptape-framework/structures/workflows.md b/docs/griptape-framework/structures/workflows.md index 75511171f..9161268ae 100644 --- a/docs/griptape-framework/structures/workflows.md +++ b/docs/griptape-framework/structures/workflows.md @@ -7,7 +7,7 @@ search: A [Workflow](../../reference/griptape/structures/workflow.md) is a non-sequential DAG that can be used for complex concurrent scenarios with tasks having multiple inputs. -You can access the final output of the Workflow by using the [output](../../reference/griptape/structures/agent.md#griptape.structures.structure.Structure.output) attribute. +You can access the final output of the Workflow by using the [output](../../reference/griptape/structures/structure.md#griptape.structures.structure.Structure.output) attribute. ## Context @@ -22,43 +22,7 @@ Workflows have access to the following [context](../../reference/griptape/struct Let's build a simple workflow. Let's say, we want to write a story in a fantasy world with some unique characters. We could setup a workflow that generates a world based on some keywords. Then we pass the world description to any number of child tasks that create characters. Finally, the last task pulls in information from all parent tasks and writes up a short story. ```python -from griptape.tasks import PromptTask -from griptape.structures import Workflow -from griptape.utils import StructureVisualizer - - -world_task = PromptTask( - "Create a fictional world based on the following key words {{ keywords|join(', ') }}", - context={ - "keywords": ["fantasy", "ocean", "tidal lock"] - }, - id="world" -) - -def character_task(task_id, character_name) -> PromptTask: - return PromptTask( - "Based on the following world description create a character named {{ name }}:\n{{ parent_outputs['world'] }}", - context={ - "name": character_name - }, - id=task_id, - parent_ids=["world"] - ) - -scotty_task = character_task("scotty", "Scotty") -annie_task = character_task("annie", "Annie") - -story_task = PromptTask( - "Based on the following description of the world and characters, write a short story:\n{{ parent_outputs['world'] }}\n{{ parent_outputs['scotty'] }}\n{{ parent_outputs['annie'] }}", - id="story", - parent_ids=["world", "scotty", "annie"] -) - -workflow = Workflow(tasks=[world_task, story_task, scotty_task, annie_task, story_task]) - -print(StructureVisualizer(workflow).to_url()) - -workflow.run() +--8<-- "docs/griptape-framework/structures/src/workflows_1.py" ``` Note that we use the `StructureVisualizer` to get a visual representation of the workflow. If we visit the printed url, it should look like this: @@ -164,146 +128,43 @@ The above example showed how to create a workflow using the declarative syntax v Declaratively specify parents (same as above example): ```python -from griptape.tasks import PromptTask -from griptape.structures import Workflow -from griptape.rules import Rule - -workflow = Workflow( - tasks=[ - PromptTask("Name an animal", id="animal"), - PromptTask("Describe {{ parent_outputs['animal'] }} with an adjective", id="adjective", parent_ids=["animal"]), - PromptTask("Name a {{ parent_outputs['adjective'] }} animal", id="new-animal", parent_ids=["adjective"]), - ], - rules=[Rule("output a single lowercase word")] -) - -workflow.run() +--8<-- "docs/griptape-framework/structures/src/workflows_2.py" ``` Declaratively specify children: ```python -from griptape.tasks import PromptTask -from griptape.structures import Workflow -from griptape.rules import Rule - -workflow = Workflow( - tasks=[ - PromptTask("Name an animal", id="animal", child_ids=["adjective"]), - PromptTask("Describe {{ parent_outputs['animal'] }} with an adjective", id="adjective", child_ids=["new-animal"]), - PromptTask("Name a {{ parent_outputs['adjective'] }} animal", id="new-animal"), - ], - rules=[Rule("output a single lowercase word")], -) - -workflow.run() +--8<-- "docs/griptape-framework/structures/src/workflows_3.py" ``` Declaratively specifying a mix of parents and children: ```python -from griptape.tasks import PromptTask -from griptape.structures import Workflow -from griptape.rules import Rule - -workflow = Workflow( - tasks=[ - PromptTask("Name an animal", id="animal"), - PromptTask("Describe {{ parent_outputs['animal'] }} with an adjective", id="adjective", parent_ids=["animal"], child_ids=["new-animal"]), - PromptTask("Name a {{ parent_outputs['adjective'] }} animal", id="new-animal"), - ], - rules=[Rule("output a single lowercase word")], -) - -workflow.run() +--8<-- "docs/griptape-framework/structures/src/workflows_4.py" ``` Imperatively specify parents: ```python -from griptape.tasks import PromptTask -from griptape.structures import Workflow -from griptape.rules import Rule - -animal_task = PromptTask("Name an animal", id="animal") -adjective_task = PromptTask("Describe {{ parent_outputs['animal'] }} with an adjective", id="adjective") -new_animal_task = PromptTask("Name a {{ parent_outputs['adjective'] }} animal", id="new-animal") - -adjective_task.add_parent(animal_task) -new_animal_task.add_parent(adjective_task) - -workflow = Workflow( - tasks=[animal_task, adjective_task, new_animal_task], - rules=[Rule("output a single lowercase word")], -) - -workflow.run() +--8<-- "docs/griptape-framework/structures/src/workflows_5.py" ``` Imperatively specify children: ```python -from griptape.tasks import PromptTask -from griptape.structures import Workflow -from griptape.rules import Rule - -animal_task = PromptTask("Name an animal", id="animal") -adjective_task = PromptTask("Describe {{ parent_outputs['animal'] }} with an adjective", id="adjective") -new_animal_task = PromptTask("Name a {{ parent_outputs['adjective'] }} animal", id="new-animal") - -animal_task.add_child(adjective_task) -adjective_task.add_child(new_animal_task) - -workflow = Workflow( - tasks=[animal_task, adjective_task, new_animal_task], - rules=[Rule("output a single lowercase word")], -) - -workflow.run() +--8<-- "docs/griptape-framework/structures/src/workflows_6.py" ``` Imperatively specify a mix of parents and children: ```python -from griptape.tasks import PromptTask -from griptape.structures import Workflow -from griptape.rules import Rule - -animal_task = PromptTask("Name an animal", id="animal") -adjective_task = PromptTask("Describe {{ parent_outputs['animal'] }} with an adjective", id="adjective") -new_animal_task = PromptTask("Name a {{ parent_outputs['adjective'] }} animal", id="new-animal") - -adjective_task.add_parent(animal_task) -adjective_task.add_child(new_animal_task) - -workflow = Workflow( - tasks=[animal_task, adjective_task, new_animal_task], - rules=[Rule("output a single lowercase word")], -) - -workflow.run() +--8<-- "docs/griptape-framework/structures/src/workflows_7.py" ``` Or even mix imperative and declarative: ```python -from griptape.tasks import PromptTask -from griptape.structures import Workflow -from griptape.rules import Rule - -animal_task = PromptTask("Name an animal", id="animal") -adjective_task = PromptTask("Describe {{ parent_outputs['animal'] }} with an adjective", id="adjective", parent_ids=["animal"]) - - -new_animal_task = PromptTask("Name a {{ parent_outputs['adjective'] }} animal", id="new-animal") -new_animal_task.add_parent(adjective_task) - -workflow = Workflow( - tasks=[animal_task, adjective_task, new_animal_task], - rules=[Rule("output a single lowercase word")], -) - -workflow.run() +--8<-- "docs/griptape-framework/structures/src/workflows_8.py" ``` ### Insert Parallel Tasks @@ -316,29 +177,7 @@ workflow.run() Imperatively insert parallel tasks between a parent and child: ```python -from griptape.tasks import PromptTask -from griptape.structures import Workflow -from griptape.rules import Rule - -workflow = Workflow( - rules=[Rule("output a single lowercase word")], -) - -animal_task = PromptTask("Name an animal", id="animal") -adjective_task = PromptTask("Describe {{ parent_outputs['animal'] }} with an adjective", id="adjective") -color_task = PromptTask("Describe {{ parent_outputs['animal'] }} with a color", id="color") -new_animal_task = PromptTask("Name an animal described as: \n{{ parents_output_text }}", id="new-animal") - -# The following workflow runs animal_task, then (adjective_task, and color_task) -# in parallel, then finally new_animal_task. -# -# In other words, the output of animal_task is passed to both adjective_task and color_task -# and the outputs of adjective_task and color_task are then passed to new_animal_task. -workflow.add_task(animal_task) -workflow.add_task(new_animal_task) -workflow.insert_tasks(animal_task, [adjective_task, color_task], new_animal_task) - -workflow.run() +--8<-- "docs/griptape-framework/structures/src/workflows_9.py" ``` output: @@ -362,3 +201,31 @@ output: [06/18/24 09:52:23] INFO PromptTask new-animal Output: elephant ``` + +### Bitshift Composition + +Task relationships can also be set up with the Python bitshift operators `>>` and `<<`. The following four statements are all functionally equivalent: + +```python +task1 >> task2 +task1.add_child(task2) + +task2 << task1 +task2.add_parent(task1) +``` + +When using the bitshift to compose operators, the relationship is set in the direction that the bitshift operator points. +For example, `task1 >> task2` means that `task1` runs first and `task2` runs second. +Multiple operators can be composed – keep in mind the chain is executed left-to-right and the rightmost object is always returned. For example: + +```python +task1 >> task2 >> task3 << task4 +``` + +is equivalent to: + +```python +task1.add_child(task2) +task2.add_child(task3) +task3.add_parent(task4) +``` diff --git a/docs/griptape-framework/tools/index.md b/docs/griptape-framework/tools/index.md index 15be7be3c..d97a9347c 100644 --- a/docs/griptape-framework/tools/index.md +++ b/docs/griptape-framework/tools/index.md @@ -15,55 +15,76 @@ You can switch between the two strategies by setting `use_native_tools` to `True Here is an example of a Pipeline using Tools: ```python -from griptape.tasks import ToolkitTask -from griptape.structures import Pipeline -from griptape.tools import WebScraper, FileManager, TaskMemoryClient - - -pipeline = Pipeline() - -pipeline.add_tasks( - ToolkitTask( - "Load https://www.griptape.ai, summarize it, and store it in a file called griptape.txt", - tools=[WebScraper(off_prompt=True), FileManager(off_prompt=True), TaskMemoryClient(off_prompt=False)] - ), -) - -pipeline.run() +--8<-- "docs/griptape-framework/tools/src/index_1.py" ``` ``` -[09/08/23 10:53:56] INFO ToolkitTask 979d99f68766423ea05b367e951281bc - Input: Load https://www.griptape.ai, summarize it, and store it in a file called griptape.txt -[09/08/23 10:54:02] INFO Subtask 97bd154a71e14a1699f8152e50490a71 - Thought: The first step is to load the content of the webpage. I can use the WebScraper tool with the get_content - activity for this. - - Action: {"name": "WebScraper", "path": "get_content", "input": {"values": {"url": - "https://www.griptape.ai"}}} -[09/08/23 10:54:03] INFO Subtask 97bd154a71e14a1699f8152e50490a71 - Response: Output of "WebScraper.get_content" was stored in memory with memory_name "TaskMemory" and - artifact_namespace "9eb6f5828cf64356bf323f11d28be27e" -[09/08/23 10:54:09] INFO Subtask 7ee08458ce154e3d970711b7d3ed79ba - Thought: Now that the webpage content is stored in memory, I can use the TaskMemory tool with the summarize - activity to summarize the content. - Action: {"name": "TaskMemoryClient", "path": "summarize", "input": {"values": {"memory_name": "TaskMemory", "artifact_namespace": "9eb6f5828cf64356bf323f11d28be27e"}}} -[09/08/23 10:54:12] INFO Subtask 7ee08458ce154e3d970711b7d3ed79ba - Response: Griptape is an open source framework that allows developers to build and deploy AI applications - using large language models (LLMs). It provides the ability to create conversational and event-driven apps that - can access and manipulate data securely. Griptape enforces structures like sequential pipelines and workflows for - predictability, while also allowing for creativity by safely prompting LLMs with external APIs and data stores. - The framework can be used to create AI systems that operate across both predictability and creativity dimensions. - Griptape Cloud is a managed platform for deploying and managing AI apps. -[09/08/23 10:54:24] INFO Subtask a024949a9a134f058f2e6b7c379c8713 - Thought: Now that I have the summary, I can store it in a file called griptape.txt. I can use the FileManager - tool with the save_file_to_disk activity for this. - Action: {"name": "FileManager", "path": "save_file_to_disk", "input": {"values": - {"memory_name": "TaskMemory", "artifact_namespace": "9eb6f5828cf64356bf323f11d28be27e", "path": - "griptape.txt"}}} - INFO Subtask a024949a9a134f058f2e6b7c379c8713 - Response: saved successfully -[09/08/23 10:54:27] INFO ToolkitTask 979d99f68766423ea05b367e951281bc - Output: The summary of the webpage https://www.griptape.ai has been successfully stored in a file called - griptape.txt. +[08/12/24 15:18:19] INFO ToolkitTask 48ac0486e5374e1ea53e8d2b955e511f + Input: Load https://www.griptape.ai, summarize it, and store it in griptape.txt +[08/12/24 15:18:20] INFO Subtask 3b8365c077ae4a7e94087bfeff7a858c + Actions: [ + { + "tag": "call_P6vaURTXfiYBJZolTkUSRHRc", + "name": "WebScraperTool", + "path": "get_content", + "input": { + "values": { + "url": "https://www.griptape.ai" + } + } + } + ] + INFO Subtask 3b8365c077ae4a7e94087bfeff7a858c + Response: Output of "WebScraperTool.get_content" was stored in memory with memory_name "TaskMemory" and artifact_namespace + "301e546f4450489ea4680645297092a2" +[08/12/24 15:18:21] INFO Subtask 930e9ca52e4140a48cce1e47368d45be + Actions: [ + { + "tag": "call_0VOTEvinRer7rG4oEirBYcow", + "name": "PromptSummaryTool", + "path": "summarize", + "input": { + "values": { + "summary": { + "memory_name": "TaskMemory", + "artifact_namespace": "301e546f4450489ea4680645297092a2" + } + } + } + } + ] +[08/12/24 15:18:24] INFO Subtask 930e9ca52e4140a48cce1e47368d45be + Response: Griptape offers a comprehensive solution for building, deploying, and scaling AI applications in the cloud. It provides developers + with a framework and cloud services to create retrieval-driven AI-powered applications without needing extensive knowledge of AI or prompt + engineering. The Griptape Framework allows developers to build business logic using Python, ensuring better security, performance, and + cost-efficiency. Griptape Cloud handles infrastructure management, enabling seamless deployment and scaling of applications. Key features + include automated data preparation (ETL), retrieval as a service (RAG), and a structure runtime (RUN) for building AI agents, pipelines, and + workflows. Griptape also offers solutions for custom projects, turnkey SaaS offerings, and finished applications. +[08/12/24 15:18:27] INFO Subtask d0f22504f576401f8d7e8ea78270a376 + Actions: [ + { + "tag": "call_zdUe2vdR0DCfR6LKcxjI6ayb", + "name": "FileManagerTool", + "path": "save_content_to_file", + "input": { + "values": { + "path": "griptape.txt", + "content": "Griptape offers a comprehensive solution for building, deploying, and scaling AI applications in the cloud. It provides + developers with a framework and cloud services to create retrieval-driven AI-powered applications without needing extensive knowledge of AI or + prompt engineering. The Griptape Framework allows developers to build business logic using Python, ensuring better security, performance, and + cost-efficiency. Griptape Cloud handles infrastructure management, enabling seamless deployment and scaling of applications. Key features + include automated data preparation (ETL), retrieval as a service (RAG), and a structure runtime (RUN) for building AI agents, pipelines, and + workflows. Griptape also offers solutions for custom projects, turnkey SaaS offerings, and finished applications." + } + } + } + ] + INFO Subtask d0f22504f576401f8d7e8ea78270a376 + Response: Successfully saved file +[08/12/24 15:18:28] INFO ToolkitTask 48ac0486e5374e1ea53e8d2b955e511f + Output: The content from https://www.griptape.ai has been summarized and stored in griptape.txt. + INFO PromptTask 4a9c59b1c06d4c549373d243a12f1285 + Input: Say the following in spanish: The content from https://www.griptape.ai has been summarized and stored in griptape.txt. + INFO PromptTask 4a9c59b1c06d4c549373d243a12f1285 + Output: El contenido de https://www.griptape.ai ha sido resumido y almacenado en griptape.txt. ``` diff --git a/griptape/tools/griptape_cloud_knowledge_base_client/__init__.py b/docs/griptape-framework/tools/src/__init__.py similarity index 100% rename from griptape/tools/griptape_cloud_knowledge_base_client/__init__.py rename to docs/griptape-framework/tools/src/__init__.py diff --git a/docs/griptape-framework/tools/src/index_1.py b/docs/griptape-framework/tools/src/index_1.py new file mode 100644 index 000000000..a894e2037 --- /dev/null +++ b/docs/griptape-framework/tools/src/index_1.py @@ -0,0 +1,18 @@ +from griptape.structures import Pipeline +from griptape.tasks import ToolkitTask +from griptape.tools import FileManagerTool, PromptSummaryTool, WebScraperTool + +pipeline = Pipeline() + +pipeline.add_tasks( + ToolkitTask( + "Load https://www.griptape.ai, summarize it, and store it in a file called griptape.txt", + tools=[ + WebScraperTool(off_prompt=True), + FileManagerTool(off_prompt=True), + PromptSummaryTool(off_prompt=False), + ], + ), +) + +pipeline.run() diff --git a/docs/griptape-tools/custom-tools/index.md b/docs/griptape-tools/custom-tools/index.md index 5b0fe7ad1..0cb248cf4 100644 --- a/docs/griptape-tools/custom-tools/index.md +++ b/docs/griptape-tools/custom-tools/index.md @@ -30,48 +30,16 @@ To add Python dependencies for your tool, add a `requirements.txt` file. The too Next, create a `tool.py` file with the following code: -```python title="PYTEST_IGNORE" -import random -from griptape.artifacts import TextArtifact -from griptape.tools import BaseTool -from griptape.utils.decorators import activity -from schema import Schema, Literal, Optional - - -class RandomNumberGenerator(BaseTool): - @activity(config={ - "description": "Can be used to generate random numbers", - "schema": Schema({ - Optional(Literal( - "decimals", - description="Number of decimals to round the random number to" - )): int - }) - }) - def generate(self, params: dict) -> TextArtifact: - return TextArtifact( - str(round(random.random(), params["values"].get("decimals"))) - ) +```python +--8<-- "docs/griptape-tools/custom-tools/src/index_1.py" ``` ## Testing Custom Tools Finally, let's test our tool: -```python title="PYTEST_IGNORE" -from griptape.structures import Agent -from rng_tool.tool import RandomNumberGenerator - -rng_tool = RandomNumberGenerator() - -agent = Agent( - tools=[rng_tool] -) - -agent.run( - "generate a random number rounded to 5 decimal places" -) - +```python +--8<-- "docs/griptape-tools/custom-tools/src/index_2.py" ``` That's it! You can start using this tool with any converter or directly via Griptape. diff --git a/docs/griptape-tools/custom-tools/src/index_1.py b/docs/griptape-tools/custom-tools/src/index_1.py new file mode 100644 index 000000000..2ffe2895c --- /dev/null +++ b/docs/griptape-tools/custom-tools/src/index_1.py @@ -0,0 +1,20 @@ +import random + +from schema import Literal, Optional, Schema + +from griptape.artifacts import TextArtifact +from griptape.tools import BaseTool +from griptape.utils.decorators import activity + + +class RandomNumberGenerator(BaseTool): + @activity( + config={ + "description": "Can be used to generate random numbers", + "schema": Schema( + {Optional(Literal("decimals", description="Number of decimals to round the random number to")): int} + ), + } + ) + def generate(self, params: dict) -> TextArtifact: + return TextArtifact(str(round(random.random(), params["values"].get("decimals")))) diff --git a/docs/griptape-tools/custom-tools/src/index_2.py b/docs/griptape-tools/custom-tools/src/index_2.py new file mode 100644 index 000000000..727b649dd --- /dev/null +++ b/docs/griptape-tools/custom-tools/src/index_2.py @@ -0,0 +1,28 @@ +import random + +from schema import Literal, Optional, Schema + +from griptape.artifacts import TextArtifact +from griptape.structures import Agent +from griptape.tools import BaseTool +from griptape.utils.decorators import activity + + +class RandomNumberGenerator(BaseTool): + @activity( + config={ + "description": "Can be used to generate random numbers", + "schema": Schema( + {Optional(Literal("decimals", description="Number of decimals to round the random number to")): int} + ), + } + ) + def generate(self, params: dict) -> TextArtifact: + return TextArtifact(str(round(random.random(), params["values"].get("decimals")))) + + +rng_tool = RandomNumberGenerator() + +agent = Agent(tools=[rng_tool]) + +agent.run("generate a random number rounded to 5 decimal places") diff --git a/docs/griptape-tools/index.md b/docs/griptape-tools/index.md index 869cde41e..f483d493f 100644 --- a/docs/griptape-tools/index.md +++ b/docs/griptape-tools/index.md @@ -2,36 +2,12 @@ Tools give the LLM abilities to invoke outside APIs, reference data sets, and ge Griptape tools are special Python classes that LLMs can use to accomplish specific goals. Here is an example custom tool for generating a random number: -```python title="PYTEST_IGNORE" -import random -from griptape.artifacts import TextArtifact -from griptape.tools import BaseTool -from griptape.utils.decorators import activity -from schema import Schema, Literal, Optional - - -class RandomNumberGenerator(BaseTool): - @activity(config={ - "description": "Can be used to generate random numbers", - "schema": Schema({ - Optional(Literal( - "decimals", - description="Number of decimals to round the random number to" - )): int - }) - }) - def generate(self, params: dict) -> TextArtifact: - return TextArtifact( - str(round(random.random(), params["values"].get("decimals"))) - ) +```python +--8<-- "docs/griptape-tools/src/index_1.py" ``` A tool can have many "activities" as denoted by the `@activity` decorator. Each activity has a description (used to provide context to the LLM), and the input schema that the LLM must follow in order to use the tool. Output artifacts from all tool activities (except for `InfoArtifact` and `ErrorArtifact`) go to short-term `TaskMemory`. To disable that behavior set the `off_prompt` tool parameter to `False`: -```python title="PYTEST_IGNORE" -RandomNumberGenerator() -``` - We provide a set of official Griptape Tools for accessing and processing data. You can also [build your own tools](./custom-tools/index.md). diff --git a/docs/griptape-tools/official-tools/audio-transcription-client.md b/docs/griptape-tools/official-tools/audio-transcription-client.md deleted file mode 100644 index 5cb458d76..000000000 --- a/docs/griptape-tools/official-tools/audio-transcription-client.md +++ /dev/null @@ -1,24 +0,0 @@ -# AudioTranscriptionClient - -This Tool enables [Agents](../../griptape-framework/structures/agents.md) to transcribe speech from text using [Audio Transcription Engines](../../reference/griptape/engines/audio/audio_transcription_engine.md) and [Audio Transcription Drivers](../../reference/griptape/drivers/audio_transcription/index.md). - -```python -from griptape.drivers import OpenAiAudioTranscriptionDriver -from griptape.engines import AudioTranscriptionEngine -from griptape.tools.audio_transcription_client.tool import AudioTranscriptionClient -from griptape.structures import Agent - - -driver = OpenAiAudioTranscriptionDriver( - model="whisper-1" -) - -tool = AudioTranscriptionClient( - off_prompt=False, - engine=AudioTranscriptionEngine( - audio_transcription_driver=driver, - ), -) - -Agent(tools=[tool]).run("Transcribe the following audio file: /Users/andrew/code/griptape/tests/resources/sentences2.wav") -``` \ No newline at end of file diff --git a/docs/griptape-tools/official-tools/audio-transcription-tool.md b/docs/griptape-tools/official-tools/audio-transcription-tool.md new file mode 100644 index 000000000..ad8eeaa9b --- /dev/null +++ b/docs/griptape-tools/official-tools/audio-transcription-tool.md @@ -0,0 +1,7 @@ +# Audio Transcription Tool + +This Tool enables [Agents](../../griptape-framework/structures/agents.md) to transcribe speech from text using [Audio Transcription Engines](../../reference/griptape/engines/audio/audio_transcription_engine.md) and [Audio Transcription Drivers](../../reference/griptape/drivers/audio_transcription/index.md). + +```python +--8<-- "docs/griptape-tools/official-tools/src/audio_transcription_tool_1.py" +``` diff --git a/docs/griptape-tools/official-tools/aws-iam-client.md b/docs/griptape-tools/official-tools/aws-iam-client.md deleted file mode 100644 index ac831d492..000000000 --- a/docs/griptape-tools/official-tools/aws-iam-client.md +++ /dev/null @@ -1,68 +0,0 @@ -# AwsIamClient - -This tool enables LLMs to make AWS IAM API requests. - -```python -import boto3 -from griptape.structures import Agent -from griptape.tools import AwsIamClient - -# Initialize the AWS IAM client -aws_iam_client = AwsIamClient(session=boto3.Session()) - -# Create an agent with the AWS IAM client tool -agent = Agent( - tools=[aws_iam_client] -) - -# Run the agent with a high-level task -agent.run("List all my IAM users") -``` -``` -[09/11/23 16:45:45] INFO Task 890fcf77fb074c9490d5c91563e0c995 - Input: List all my IAM users -[09/11/23 16:45:51] INFO Subtask f2f0809ee10d4538972ed01fdd6a2fb8 - Thought: To list all IAM users, I can use the - AwsIamClient tool with the list_users activity. - This activity does not require any input. - - Action: {"name": "AwsIamClient", - "path": "list_users"} -[09/11/23 16:45:52] INFO Subtask f2f0809ee10d4538972ed01fdd6a2fb8 - Response: Output of "AwsIamClient.list_users" - was stored in memory with memory_name - "TaskMemory" and artifact_namespace - "51d22a018a434904a5da3bb8d4f763f7" -[09/11/23 16:45:59] INFO Subtask 8e0e918571544eeebf46de898466c48c - Thought: The output of the list_users activity is - stored in memory. I can retrieve this information - using the TaskMemory tool with the summarize - activity. - Action: {"name": "TaskMemoryClient", "path": - "summarize", "input": {"values": {"memory_name": - "TaskMemory", "artifact_namespace": - "51d22a018a434904a5da3bb8d4f763f7"}}} -[09/11/23 16:46:03] INFO Subtask 8e0e918571544eeebf46de898466c48c - Response: The text provides information about - two different users in an AWS IAM system. The first - user is named "example-user-1" and has a - user ID of "AIDASHBEHWJLQV2IOYDHM". The second user - is named "example-user-2" and - has a user ID of "AIDASHBEHWJLWHVS76C6X". Both - users have a path of "/", and their ARNs (Amazon - Resource Names) indicate their location in the IAM - system. The first user was created on July 18, - 2023, at 20:29:27 UTC, while the second user was - created on August 29, 2023, at 20:56:37 UTC. -[09/11/23 16:46:13] INFO Task 890fcf77fb074c9490d5c91563e0c995 - Output: There are two IAM users in your AWS - account: - - 1. User "example-user-1" with user ID - "AIDASHBEHWJLQV2IOYDHM", created on July 18, 2023, - at 20:29:27 UTC. - 2. User "example-user-2" with - user ID "AIDASHBEHWJLWHVS76C6X", created on August - 29, 2023, at 20:56:37 UTC. - -``` diff --git a/docs/griptape-tools/official-tools/aws-iam-tool.md b/docs/griptape-tools/official-tools/aws-iam-tool.md new file mode 100644 index 000000000..8be54afb5 --- /dev/null +++ b/docs/griptape-tools/official-tools/aws-iam-tool.md @@ -0,0 +1,64 @@ +# Aws Iam Tool + +This tool enables LLMs to make AWS IAM API requests. + +```python +--8<-- "docs/griptape-tools/official-tools/src/aws_iam_tool_1.py" +``` +``` +[08/12/24 14:56:59] INFO ToolkitTask 12345abcd67890efghijk1112131415 + Input: List all my IAM users +[08/12/24 14:57:00] INFO Subtask 54321dcba09876fedcba1234567890ab + Actions: [ + { + "tag": "call_OxhQ9ITNIFq0WjkSnOCYAx8h", + "name": "AwsIamClient", + "path": "list_users", + "input": { + "values": {} + } + } + ] + INFO Subtask 54321dcba09876fedcba1234567890ab + Response: {'Path': '/', 'UserName': 'dummy-user-1', 'UserId': 'AIDAAAAAA1111AAAAAA1111', 'Arn': + 'arn:aws:iam::123456789012:user/dummy-user-1', 'CreateDate': datetime.datetime(2024, 8, 7, 15, 8, 7, tzinfo=tzutc())} + + {'Path': '/', 'UserName': 'dummy-user-2', 'UserId': 'AIDBBBBBB2222BBBBBB2222', 'Arn': + 'arn:aws:iam::123456789012:user/dummy-user-2', 'CreateDate': datetime.datetime(2023, 7, 18, 20, 29, 27, tzinfo=tzutc())} + + {'Path': '/', 'UserName': 'dummy-user-3', 'UserId': 'AIDCCCCCC3333CCCCCC3333', 'Arn': + 'arn:aws:iam::123456789012:user/dummy-user-3', 'CreateDate': datetime.datetime(2024, 7, 15, 19, 39, 41, tzinfo=tzutc())} + + {'Path': '/', 'UserName': 'dummy-user-4', 'UserId': 'AIDDDDDDD4444DDDDDD4444', 'Arn': + 'arn:aws:iam::123456789012:user/dummy-user-4', 'CreateDate': datetime.datetime(2024, 8, 2, 19, 28, 31, tzinfo=tzutc())} + + {'Path': '/', 'UserName': 'dummy-user-5', 'UserId': 'AIDEEEEE5555EEEEE5555', 'Arn': + 'arn:aws:iam::123456789012:user/dummy-user-5', 'CreateDate': datetime.datetime(2023, 8, 29, 20, 56, 37, tzinfo=tzutc())} +[08/12/24 14:57:08] INFO ToolkitTask 12345abcd67890efghijk1112131415 + Output: Here are all your IAM users: + + 1. **Username:** dummy-user-1 + - **UserId:** AIDAAAAAA1111AAAAAA1111 + - **Arn:** arn:aws:iam::123456789012:user/dummy-user-1 + - **CreateDate:** 2024-08-07 + + 2. **Username:** dummy-user-2 + - **UserId:** AIDBBBBBB2222BBBBBB2222 + - **Arn:** arn:aws:iam::123456789012:user/dummy-user-2 + - **CreateDate:** 2023-07-18 + + 3. **Username:** dummy-user-3 + - **UserId:** AIDCCCCCC3333CCCCCC3333 + - **Arn:** arn:aws:iam::123456789012:user/dummy-user-3 + - **CreateDate:** 2024-07-15 + + 4. **Username:** dummy-user-4 + - **UserId:** AIDDDDDDD4444DDDDDD4444 + - **Arn:** arn:aws:iam::123456789012:user/dummy-user-4 + - **CreateDate:** 2024-08-02 + + 5. **Username:** dummy-user-5 + - **UserId:** AIDEEEEE5555EEEEE5555 + - **Arn:** arn:aws:iam::123456789012:user/dummy-user-5 + - **CreateDate:** 2023-08-29 +``` diff --git a/docs/griptape-tools/official-tools/aws-s3-client.md b/docs/griptape-tools/official-tools/aws-s3-client.md deleted file mode 100644 index d7406a987..000000000 --- a/docs/griptape-tools/official-tools/aws-s3-client.md +++ /dev/null @@ -1,66 +0,0 @@ -# AwsS3Client - -This tool enables LLMs to make AWS S3 API requests. - -```python -import boto3 -from griptape.structures import Agent -from griptape.tools import AwsS3Client, TaskMemoryClient - -# Initialize the AWS S3 client -aws_s3_client = AwsS3Client( - session=boto3.Session(), - off_prompt=True -) - -# Create an agent with the AWS S3 client tool -agent = Agent( - tools=[aws_s3_client, TaskMemoryClient(off_prompt=False)] -) - -# Task to list all the AWS S3 buckets -agent.run("List all my S3 buckets.") -``` -``` -[09/11/23 16:49:35] INFO Task 8bf7538e217a4b5a8472829f5eee75b9 - Input: List all my S3 buckets. -[09/11/23 16:49:41] INFO Subtask 9fc44f5c8e73447ba737283cb2ef7f5d - Thought: To list all S3 buckets, I can use the - "list_s3_buckets" activity of the "AwsS3Client" - tool. This activity doesn't require any input. - - Action: {"name": "AwsS3Client", - "path": "list_s3_buckets"} -[09/11/23 16:49:42] INFO Subtask 9fc44f5c8e73447ba737283cb2ef7f5d - Response: Output of - "AwsS3Client.list_s3_buckets" was stored in memory - with memory_name "TaskMemory" and - artifact_namespace - "f2592085fd4a430286a46770ea508cc9" -[09/11/23 16:49:50] INFO Subtask 0e9bb639a432431a92ef40a8c085ca0f - Thought: The output of the "list_s3_buckets" - activity is stored in memory. I can retrieve this - information using the "summarize" activity of the - "TaskMemory" tool. - Action: {"name": "TaskMemoryClient", "path": - "summarize", "input": {"values": {"memory_name": - "TaskMemory", "artifact_namespace": - "f2592085fd4a430286a46770ea508cc9"}}} -[09/11/23 16:49:52] INFO Subtask 0e9bb639a432431a92ef40a8c085ca0f - Response: The text consists of multiple - dictionaries, each containing a 'Name' and - 'CreationDate' key-value pair. The 'Name' - represents the name of a resource or bucket, while - the 'CreationDate' represents the date and time - when the resource or bucket was created. -[09/11/23 16:50:03] INFO Task 8bf7538e217a4b5a8472829f5eee75b9 - Output: The names of your S3 buckets are as - follows: - 1. Bucket Name: 'example-bucket-1', Creation Date: - '2022-01-01T00:00:00Z' - 2. Bucket Name: 'example-bucket-2', Creation Date: - '2022-01-02T00:00:00Z' - 3. Bucket Name: 'example-bucket-3', Creation Date: - '2022-01-03T00:00:00Z' - Please note that the creation dates are in UTC. -``` diff --git a/docs/griptape-tools/official-tools/aws-s3-tool.md b/docs/griptape-tools/official-tools/aws-s3-tool.md new file mode 100644 index 000000000..c6a972d76 --- /dev/null +++ b/docs/griptape-tools/official-tools/aws-s3-tool.md @@ -0,0 +1,41 @@ +# Aws S3 Tool + +This tool enables LLMs to make AWS S3 API requests. + +```python +--8<-- "docs/griptape-tools/official-tools/src/aws_s3_tool_1.py" +``` +``` +[08/12/24 14:51:36] INFO ToolkitTask bfc329ebc7d34497b429ab0d18ff7e7b + Input: List all my S3 buckets. +[08/12/24 14:51:37] INFO Subtask dfd07f9e204c4a3d8f55ca3eb9d37ec5 + Actions: [ + { + "tag": "call_pZQ05Zmm6lSbEcvPWt4XEDj6", + "name": "AwsS3Client", + "path": "list_s3_buckets", + "input": { + "values": {} + } + } + ] + INFO Subtask dfd07f9e204c4a3d8f55ca3eb9d37ec5 + Response: {'Name': 'dummy-bucket-1', 'CreationDate': datetime.datetime(2023, 9, 14, 15, 41, 46, + tzinfo=tzutc())} + + {'Name': 'dummy-bucket-2', 'CreationDate': datetime.datetime(2023, 9, 14, 15, 40, 33, tzinfo=tzutc())} + + {'Name': 'dummy-bucket-3', 'CreationDate': datetime.datetime(2023, 6, 23, 20, 19, 53, tzinfo=tzutc())} + + {'Name': 'dummy-bucket-4', 'CreationDate': datetime.datetime(2023, 8, 19, 17, 17, 13, tzinfo=tzutc())} + + {'Name': 'dummy-bucket-5', 'CreationDate': datetime.datetime(2024, 2, 15, 23, 17, 21, tzinfo=tzutc())} +[08/12/24 14:51:43] INFO ToolkitTask bfc329ebc7d34497b429ab0d18ff7e7b + Output: Here are all your S3 buckets: + + 1. dummy-bucket-1 (Created on 2023-09-14) + 2. dummy-bucket-2 (Created on 2023-09-14) + 3. dummy-bucket-3 (Created on 2023-06-23) + 4. dummy-bucket-4 (Created on 2023-08-19) + 5. dummy-bucket-5 (Created on 2024-02-15) +``` diff --git a/docs/griptape-tools/official-tools/calculator.md b/docs/griptape-tools/official-tools/calculator-tool.md similarity index 71% rename from docs/griptape-tools/official-tools/calculator.md rename to docs/griptape-tools/official-tools/calculator-tool.md index 648eee7f3..afe17a364 100644 --- a/docs/griptape-tools/official-tools/calculator.md +++ b/docs/griptape-tools/official-tools/calculator-tool.md @@ -1,18 +1,9 @@ -# Calculator +# Calculator Tool This tool enables LLMs to make simple calculations. ```python -from griptape.structures import Agent -from griptape.tools import Calculator - -# Create an agent with the Calculator tool -agent = Agent( - tools=[Calculator()] -) - -# Run the agent with a task to perform the arithmetic calculation of \(10^5\) -agent.run("What is 10 raised to the power of 5?") +--8<-- "docs/griptape-tools/official-tools/src/calculator_tool_1.py" ``` ``` [09/08/23 14:23:51] INFO Task bbc6002a5e5b4655bb52b6a550a1b2a5 @@ -21,9 +12,9 @@ agent.run("What is 10 raised to the power of 5?") Thought: The question is asking for the result of 10 raised to the power of 5. This is a mathematical operation that can be performed using the - Calculator tool. + CalculatorTool tool. - Action: {"name": "Calculator", + Action: {"name": "CalculatorTool", "path": "calculate", "input": {"values": {"expression": "10**5"}}} INFO Subtask 3e9211a0f44c4277812ae410c43adbc9 diff --git a/docs/griptape-tools/official-tools/computer-tool.md b/docs/griptape-tools/official-tools/computer-tool.md new file mode 100644 index 000000000..f21d4bda9 --- /dev/null +++ b/docs/griptape-tools/official-tools/computer-tool.md @@ -0,0 +1,47 @@ +# Computer Tool + +This tool enables LLMs to execute Python code and run shell commands inside a Docker container. You have to have the Docker daemon running in order for this tool to work. + +You can specify a local working directory and environment variables during tool initialization: + +```python +--8<-- "docs/griptape-tools/official-tools/src/computer_tool_1.py" +``` +``` +❮ poetry run python src/docs/task-memory.py +[08/12/24 15:13:56] INFO ToolkitTask 203ee958d1934811afe0bb86fb246e86 + Input: Make 2 files and then list the files in the current directory +[08/12/24 15:13:58] INFO Subtask eb4e843b6f37498f9f0e85ada68114ac + Actions: [ + { + "tag": "call_S17vPQsMCqWY1Lt5x8NtDnTK", + "name": "Computer", + "path": "execute_command", + "input": { + "values": { + "command": "touch file1.txt file2.txt" + } + } + } + ] + INFO Subtask eb4e843b6f37498f9f0e85ada68114ac + Response: Tool returned an empty value +[08/12/24 15:13:59] INFO Subtask 032770e7697d44f6a0c8559bfea60420 + Actions: [ + { + "tag": "call_n61SVDYUGWTt681BaDSaHgt1", + "name": "Computer", + "path": "execute_command", + "input": { + "values": { + "command": "ls" + } + } + } + ] + INFO Subtask 032770e7697d44f6a0c8559bfea60420 + Response: file1.txt + file2.txt +[08/12/24 15:14:00] INFO ToolkitTask 203ee958d1934811afe0bb86fb246e86 + Output: file1.txt, file2.txt +``` diff --git a/docs/griptape-tools/official-tools/computer.md b/docs/griptape-tools/official-tools/computer.md deleted file mode 100644 index 6496e6fff..000000000 --- a/docs/griptape-tools/official-tools/computer.md +++ /dev/null @@ -1,107 +0,0 @@ -# Computer - -This tool enables LLMs to execute Python code and run shell commands inside a Docker container. You have to have the Docker daemon running in order for this tool to work. - -You can specify a local working directory and environment variables during tool initialization: - -```python title="PYTEST_IGNORE" -from griptape.structures import Agent -from griptape.tools import Computer - -# Initialize the Computer tool -computer = Computer() - -# Create an agent with the Computer tool -agent = Agent( - tools=[computer] -) - -# Create a file using the shell command -filename = "my_new_file.txt" -agent.run(f"Run this shell command for me: touch {filename}") - -# Add content to the file using the shell command -content = "This is the content of the file." -agent.run(f"Run this shell command for me: echo '{content}' > {filename}") - -# Output the contents of the file using the shell command -agent.run(f"Run this shell command for me: cat {filename}") -``` -``` -[09/11/23 16:24:15] INFO Task d08009ee983c4286ba10f83bcf3080e6 - Input: Run this shell command for me: touch - my_new_file.txt -[09/11/23 16:24:21] INFO Subtask 1ec0f9ea528e44b89eb9d41da0e00856 - Thought: The user wants to create a new file named - "my_new_file.txt". I can do this by executing the - shell command "touch my_new_file.txt" using the - Computer tool with the execute_command activity. - - Action: {"name": "Computer", - "path": "execute_command", "input": {"values": - {"command": "touch my_new_file.txt"}}} -[09/11/23 16:24:22] INFO Subtask 1ec0f9ea528e44b89eb9d41da0e00856 - Response: Output of "Computer.execute_command" - was stored in memory with memory_name - "TaskMemory" and artifact_namespace - "54ffbc84a37a497480cab6ab4f904e7e" -[09/11/23 16:24:28] INFO Subtask f3e1b1d09b4c46babda27342680aa770 - Thought: The command has been executed and the - output is stored in memory. However, the "touch" - command does not produce any output when it - successfully creates a file. To confirm that the - file was created, I can list the files in the - current directory using the "ls" command. - - Action: {"name": "Computer", - "path": "execute_command", "input": {"values": - {"command": "ls"}}} -[09/11/23 16:24:29] INFO Subtask f3e1b1d09b4c46babda27342680aa770 - Response: Output of "Computer.execute_command" - was stored in memory with memory_name - "TaskMemory" and artifact_namespace - "82bc4937564e4901b7fc51fced45b643" -[09/11/23 16:24:40] INFO Subtask 36f0bbfdd7974e6cb52766ba21dc64e0 - Thought: The output of the "ls" command is stored - in memory. I need to retrieve this output to check - if "my_new_file.txt" is listed, which would confirm - that the file was created successfully. - Action: {"name": "TaskMemoryClient", "path": - "query", "input": {"values": {"memory_name": - "TaskMemory", "artifact_namespace": - "82bc4937564e4901b7fc51fced45b643", "query": "Is - my_new_file.txt in the list of files?"}}} -[09/11/23 16:24:41] INFO Subtask 36f0bbfdd7974e6cb52766ba21dc64e0 - Response: Yes. -[09/11/23 16:24:42] INFO Task d08009ee983c4286ba10f83bcf3080e6 - Output: The file "my_new_file.txt" has been - successfully created. - INFO Task d08009ee983c4286ba10f83bcf3080e6 - Input: Run this shell command for me: echo 'This is - the content of the file.' > my_new_file.txt -[09/11/23 16:24:53] INFO Subtask a0a3fb162d6d4f3398a98c6d3604a491 - Thought: The user wants to write the text 'This is - the content of the file.' into the file - 'my_new_file.txt'. I can achieve this by using the - 'execute_command' activity of the 'Computer' tool. - - Action: {"name": "Computer", - "path": "execute_command", "input": {"values": - {"command": "echo 'This is the content of the - file.' > my_new_file.txt"}}} - INFO Subtask a0a3fb162d6d4f3398a98c6d3604a491 - Response: Output of "Computer.execute_command" - was stored in memory with memory_name - "TaskMemory" and artifact_namespace - "ec20f2e7ec674e0286c8d1f05d528957" -[09/11/23 16:25:00] INFO Task d08009ee983c4286ba10f83bcf3080e6 - Output: The text 'This is the content of the file.' - has been successfully written into - 'my_new_file.txt'. - INFO Task d08009ee983c4286ba10f83bcf3080e6 - Input: Run this shell command for me: cat - my_new_file.txt -[09/11/23 16:25:10] INFO Task d08009ee983c4286ba10f83bcf3080e6 - Output: The content of the file 'my_new_file.txt' - is: 'This is the content of the file.' -``` diff --git a/docs/griptape-tools/official-tools/date-time.md b/docs/griptape-tools/official-tools/date-time-tool.md similarity index 83% rename from docs/griptape-tools/official-tools/date-time.md rename to docs/griptape-tools/official-tools/date-time-tool.md index aa0c0cc55..bdc5ccbf4 100644 --- a/docs/griptape-tools/official-tools/date-time.md +++ b/docs/griptape-tools/official-tools/date-time-tool.md @@ -1,18 +1,9 @@ -# DateTime +# Date Time Tool This tool enables LLMs to get current date and time. ```python -from griptape.structures import Agent -from griptape.tools import DateTime - -# Create an agent with the DateTime tool -agent = Agent( - tools=[DateTime()] -) - -# Fetch the current date and time -agent.run("What is the current date and time?") +--8<-- "docs/griptape-tools/official-tools/src/date_time_tool_1.py" ``` ``` [09/11/23 15:26:02] INFO Task d0bf49dacd8849e695494578a333f6cc @@ -31,4 +22,4 @@ agent.run("What is the current date and time?") [09/11/23 15:26:08] INFO Task d0bf49dacd8849e695494578a333f6cc Output: The current date and time is September 11, 2023, 15:26:06. -``` \ No newline at end of file +``` diff --git a/docs/griptape-tools/official-tools/email-client.md b/docs/griptape-tools/official-tools/email-client.md deleted file mode 100644 index ec8af7bb6..000000000 --- a/docs/griptape-tools/official-tools/email-client.md +++ /dev/null @@ -1,22 +0,0 @@ -# EmailClient - -The [EmailClient](../../reference/griptape/tools/email_client/tool.md) enables LLMs to send emails. - -```python -import os -from griptape.tools import EmailClient - -email_client = EmailClient( - smtp_host=os.environ.get("SMTP_HOST"), - smtp_port=int(os.environ.get("SMTP_PORT", 465)), - smtp_password=os.environ.get("SMTP_PASSWORD"), - smtp_user=os.environ.get("FROM_EMAIL"), - smtp_use_ssl=bool(os.environ.get("SMTP_USE_SSL")), -) -``` - -For debugging purposes, you can run a local SMTP server that the LLM can send emails to: - -```shell -python -m smtpd -c DebuggingServer -n localhost:1025 -``` diff --git a/docs/griptape-tools/official-tools/email-tool.md b/docs/griptape-tools/official-tools/email-tool.md new file mode 100644 index 000000000..91ba6f19b --- /dev/null +++ b/docs/griptape-tools/official-tools/email-tool.md @@ -0,0 +1,13 @@ +# Email Tool + +The [EmailTool](../../reference/griptape/tools/email/tool.md) enables LLMs to send emails. + +```python +--8<-- "docs/griptape-tools/official-tools/src/email_tool_1.py" +``` + +For debugging purposes, you can run a local SMTP server that the LLM can send emails to: + +```shell +python -m smtpd -c DebuggingServer -n localhost:1025 +``` diff --git a/docs/griptape-tools/official-tools/extraction-tool.md b/docs/griptape-tools/official-tools/extraction-tool.md new file mode 100644 index 000000000..5b0486ffd --- /dev/null +++ b/docs/griptape-tools/official-tools/extraction-tool.md @@ -0,0 +1,53 @@ +The [ExractionTool](../../reference/griptape/tools/extraction/tool.md) enables LLMs to extract structured text from unstructured data. + +```python +--8<-- "docs/griptape-tools/official-tools/src/extraction_tool_1.py" +``` +``` +[08/12/24 15:58:03] INFO ToolkitTask 43b3d209a83c470d8371b7ef4af175b4 + Input: Load https://griptape.ai and extract key info +[08/12/24 15:58:05] INFO Subtask 6a9a63802faf4717bab24bbbea2cb49b + Actions: [ + { + "tag": "call_SgrmWdXaYTQ1Cz9iB0iIZSYD", + "name": "WebScraperTool", + "path": "get_content", + "input": { + "values": { + "url": "https://griptape.ai" + } + } + } + ] +[08/12/24 15:58:06] INFO Subtask 6a9a63802faf4717bab24bbbea2cb49b + Response: Output of "WebScraperTool.get_content" was stored in memory with memory_name "TaskMemory" and artifact_namespace + "bf1c865b82554c9e896cb514bb86844c" +[08/12/24 15:58:07] INFO Subtask c06388d6079541d5aaff25c30e322c51 + Actions: [ + { + "tag": "call_o3MrpM01OnhCfpxsMe85tpDF", + "name": "ExtractionTool", + "path": "extract_json", + "input": { + "values": { + "data": { + "memory_name": "TaskMemory", + "artifact_namespace": "bf1c865b82554c9e896cb514bb86844c" + } + } + } + } + ] +[08/12/24 15:58:11] INFO Subtask c06388d6079541d5aaff25c30e322c51 + Response: {"company_name": "Griptape", "industry": "AI Applications", "product_features": ["Turn any developer into an AI developer.", "Build + your business logic using predictable, programmable python.", "Off-Prompt\u2122 for better security, performance, and lower costs.", "Deploy and + run the ETL, RAG, and structures you developed.", "Simple API abstractions.", "Skip the infrastructure management.", "Scale seamlessly with + workload requirements.", "Clean and clear abstractions for building Gen AI Agents, Systems of Agents, Pipelines, Workflows, and RAG + implementations.", "Build ETL pipelines to prep data for secure LLM access.", "Compose retrieval patterns for fast, accurate, detailed + information.", "Write agents, pipelines, and workflows to integrate business logic.", "Automated Data Prep (ETL): Connect any data source, + extract, prep/transform, and load into a vector database index.", "Retrieval as a Service (RAG): Generate answers, summaries, and details from + your own data with ready-made or custom retrieval patterns.", "Structure Runtime (RUN): Build AI agents, pipelines, and workflows for real-time + interfaces, transactional processes, and batch workloads."]} +[08/12/24 15:58:14] INFO ToolkitTask 43b3d209a83c470d8371b7ef4af175b4 + Output: Extracted key information from Griptape's website. +``` diff --git a/docs/griptape-tools/official-tools/file-manager.md b/docs/griptape-tools/official-tools/file-manager-tool.md similarity index 70% rename from docs/griptape-tools/official-tools/file-manager.md rename to docs/griptape-tools/official-tools/file-manager-tool.md index 491711ba0..2c27c86ea 100644 --- a/docs/griptape-tools/official-tools/file-manager.md +++ b/docs/griptape-tools/official-tools/file-manager-tool.md @@ -1,39 +1,20 @@ -# FileManager +# File Manager Tool This tool enables LLMs to save and load files. ```python -from griptape.structures import Agent -from griptape.tools import FileManager - -# Initialize the FileManager tool with the current directory as its base -file_manager_tool = FileManager() - -# Add the tool to the Agent -agent = Agent( - tools=[file_manager_tool] -) - -# Directly create a file named 'sample1.txt' with some content -filename = "sample1.txt" -content = "This is the content of sample1.txt" - -with open(filename, "w") as f: - f.write(content) - -# Now, read content from the file 'sample1.txt' using the agent's command -agent.run("Can you get me the sample1.txt file?") +--8<-- "docs/griptape-tools/official-tools/src/file_manager_tool_1.py" ``` ``` [09/12/23 12:07:56] INFO Task 16a1ce1847284ae3805485bad7d99116 Input: Can you get me the sample1.txt file? [09/12/23 12:08:04] INFO Subtask ddcf48d970ce4edbbc22a46b2f83ec4f Thought: The user wants the content of the file - named "sample1.txt". I can use the FileManager tool + named "sample1.txt". I can use the FileManagerTool tool with the activity "load_files_from_disk" to load the file from the disk. - Action: {"name": "FileManager", + Action: {"name": "FileManagerTool", "path": "load_files_from_disk", "input": {"values": {"paths": ["sample1.txt"]}}} INFO Subtask ddcf48d970ce4edbbc22a46b2f83ec4f diff --git a/docs/griptape-tools/official-tools/google-calendar-tool.md b/docs/griptape-tools/official-tools/google-calendar-tool.md new file mode 100644 index 000000000..e0b5d9cdc --- /dev/null +++ b/docs/griptape-tools/official-tools/google-calendar-tool.md @@ -0,0 +1,8 @@ +# Google Calendar Tool + +The [GoogleCalendarTool](../../reference/griptape/tools/google_calendar/tool.md) tool allows you to interact with Google Calendar. + + +```python +--8<-- "docs/griptape-tools/official-tools/src/google_calendar_tool_1.py" +``` diff --git a/docs/griptape-tools/official-tools/google-docs-client.md b/docs/griptape-tools/official-tools/google-docs-tool.md similarity index 54% rename from docs/griptape-tools/official-tools/google-docs-client.md rename to docs/griptape-tools/official-tools/google-docs-tool.md index db2f3c75b..1f02196b9 100644 --- a/docs/griptape-tools/official-tools/google-docs-client.md +++ b/docs/griptape-tools/official-tools/google-docs-tool.md @@ -1,38 +1,9 @@ -# GoogleDocsClient +# Google Docs Tool -The GoogleDocsClient tool provides a way to interact with the Google Docs API. It can be used to create new documents, save content to existing documents, and more. +The [GoogleDocsTool](../../reference/griptape/tools/google_docs/tool.md) tool provides a way to interact with the Google Docs API. It can be used to create new documents, save content to existing documents, and more. ```python -import os -from griptape.structures import Agent -from griptape.tools import GoogleDocsClient - -# Create the GoogleDocsClient tool -google_docs_tool = GoogleDocsClient( - service_account_credentials={ - "type": os.environ["GOOGLE_ACCOUNT_TYPE"], - "project_id": os.environ["GOOGLE_PROJECT_ID"], - "private_key_id": os.environ["GOOGLE_PRIVATE_KEY_ID"], - "private_key": os.environ["GOOGLE_PRIVATE_KEY"], - "client_email": os.environ["GOOGLE_CLIENT_EMAIL"], - "client_id": os.environ["GOOGLE_CLIENT_ID"], - "auth_uri": "https://accounts.google.com/o/oauth2/auth", - "token_uri": "https://oauth2.googleapis.com/token", - "auth_provider_x509_cert_url": "https://www.googleapis.com/oauth2/v1/certs", - "client_x509_cert_url": os.environ["GOOGLE_CERT_URL"] - }, - owner_email=os.environ["GOOGLE_OWNER_EMAIL"], -) - -# Set up an agent using the GoogleDocsClient tool -agent = Agent( - tools=[google_docs_tool] -) - -# Task: Create a new Google Doc and save content to it -agent.run( - "Create doc with name 'test_creation' in test folder with content 'Hey, Tony.", -) +--8<-- "docs/griptape-tools/official-tools/src/google_docs_tool_1.py" ``` ``` [10/05/23 12:56:19] INFO ToolkitTask 90721b7478a74618a63d852d35be3b18 @@ -43,10 +14,10 @@ agent.run( named 'test_creation' in a folder named 'test' with the content 'Hey, Tony.'. I can use the 'save_content_to_google_doc' activity of the - GoogleDocsClient tool to achieve this. + GoogleDocsTool tool to achieve this. Action: {"name": - "GoogleDocsClient", "path": + "GoogleDocsTool", "path": "save_content_to_google_doc", "input": {"values": {"file_path": "test_creation", "content": "Hey, Tony.", "folder_path": "test"}}} diff --git a/docs/griptape-tools/official-tools/google-drive-client.md b/docs/griptape-tools/official-tools/google-drive-tool.md similarity index 50% rename from docs/griptape-tools/official-tools/google-drive-client.md rename to docs/griptape-tools/official-tools/google-drive-tool.md index 069b9a6f9..18e10ec08 100644 --- a/docs/griptape-tools/official-tools/google-drive-client.md +++ b/docs/griptape-tools/official-tools/google-drive-tool.md @@ -1,38 +1,9 @@ -# GoogleDriveClient +# Google Drive Tool -The GoogleDriveClient tool provides a way to interact with the Google Drive API. It can be used to save content on Drive, list files, and more. +The [GoogleDriveTool](../../reference/griptape/tools/google_drive/tool.md) tool provides a way to interact with the Google Drive API. It can be used to save content on Drive, list files, and more. ```python -import os -from griptape.structures import Agent -from griptape.tools import GoogleDriveClient - -# Create the GoogleDriveClient tool -google_drive_tool = GoogleDriveClient( - service_account_credentials={ - "type": os.environ["GOOGLE_ACCOUNT_TYPE"], - "project_id": os.environ["GOOGLE_PROJECT_ID"], - "private_key_id": os.environ["GOOGLE_PRIVATE_KEY_ID"], - "private_key": os.environ["GOOGLE_PRIVATE_KEY"], - "client_email": os.environ["GOOGLE_CLIENT_EMAIL"], - "client_id": os.environ["GOOGLE_CLIENT_ID"], - "auth_uri": "https://accounts.google.com/o/oauth2/auth", - "token_uri": "https://oauth2.googleapis.com/token", - "auth_provider_x509_cert_url": "https://www.googleapis.com/oauth2/v1/certs", - "client_x509_cert_url": os.environ["GOOGLE_CERT_URL"] - }, - owner_email=os.environ["GOOGLE_OWNER_EMAIL"], -) - -# Set up an agent using the GoogleDriveClient tool -agent = Agent( - tools=[google_drive_tool] -) - -# Task: Save content to my Google Drive (default directory is root) -agent.run( - "Save the content 'Hi this is Tony' in a filed named 'hello.txt' to my Drive.", -) +--8<-- "docs/griptape-tools/official-tools/src/google_drive_tool_1.py" ``` ``` [10/05/23 10:49:14] INFO ToolkitTask 2ae3bb7e828744f3a2631c29c6fce001 @@ -42,11 +13,11 @@ agent.run( Thought: The user wants to save the content 'Hi this is Tony' in a file named 'hello.txt' to Google Drive. I can use the 'save_content_to_drive' - activity of the GoogleDriveClient tool to + activity of the GoogleDriveTool tool to accomplish this. Action: {"name": - "GoogleDriveClient", "path": + "GoogleDriveTool", "path": "save_content_to_drive", "input": {"values": {"path": "hello.txt", "content": "Hi this is Tony"}}} diff --git a/docs/griptape-tools/official-tools/google-gmail-client.md b/docs/griptape-tools/official-tools/google-gmail-tool.md similarity index 52% rename from docs/griptape-tools/official-tools/google-gmail-client.md rename to docs/griptape-tools/official-tools/google-gmail-tool.md index d012939a2..1a9e6ea47 100644 --- a/docs/griptape-tools/official-tools/google-gmail-client.md +++ b/docs/griptape-tools/official-tools/google-gmail-tool.md @@ -1,39 +1,9 @@ -# GoogleGmailClient +# Google Gmail Tool -The GoogleGmailClient tool provides a way to interact with the Gmail API. It can be used to create draft emails, send emails, and more. +The [GoogleGmailTool](../../reference/griptape/tools/google_gmail/tool.md) tool provides a way to interact with the Gmail API. It can be used to create draft emails, send emails, and more. ```python -from griptape.tools import GoogleGmailClient -from griptape.structures import Agent -import os - -# Create the GoogleGmailClient tool -gmail_tool = GoogleGmailClient( - service_account_credentials={ - "type": os.environ["GOOGLE_ACCOUNT_TYPE"], - "project_id": os.environ["GOOGLE_PROJECT_ID"], - "private_key_id": os.environ["GOOGLE_PRIVATE_KEY_ID"], - "private_key": os.environ["GOOGLE_PRIVATE_KEY"], - "client_email": os.environ["GOOGLE_CLIENT_EMAIL"], - "client_id": os.environ["GOOGLE_CLIENT_ID"], - "auth_uri": "https://accounts.google.com/o/oauth2/auth", - "token_uri": "https://oauth2.googleapis.com/token", - "auth_provider_x509_cert_url": "https://www.googleapis.com/oauth2/v1/certs", - "client_x509_cert_url": os.environ["GOOGLE_CERT_URL"] - }, - owner_email=os.environ["GOOGLE_OWNER_EMAIL"], -) - -# Set up an agent using the GoogleGmailClient tool -agent = Agent( - tools=[gmail_tool] -) - -# Task: Create a draft email in GMail -agent.run( - "Create a draft email in Gmail to example@email.com with the subject 'Test Draft', the body " - "'This is a test draft email.'", -) +--8<-- "docs/griptape-tools/official-tools/src/google_gmail_tool_1.py" ``` ``` [10/05/23 13:24:05] INFO ToolkitTask 1f190f823d584053bfe9942f41b6cb2d @@ -42,13 +12,13 @@ agent.run( the body 'This is a test draft email.' [10/05/23 13:24:15] INFO Subtask 7f2cce7e5b0e425ba696531561697b96 Thought: The user wants to create a draft email in - Gmail. I can use the GoogleGmailClient tool with + Gmail. I can use the GoogleGmailTool tool with the create_draft_email activity to accomplish this. I will need to provide the 'to', 'subject', and 'body' values as input. Action: {"name": - "GoogleGmailClient", "path": + "GoogleGmailTool", "path": "create_draft_email", "input": {"values": {"to": "example@email.com", "subject": "Test Draft", "body": "This is a test draft email."}}} diff --git a/docs/griptape-tools/official-tools/griptape-cloud-knowledge-base-client.md b/docs/griptape-tools/official-tools/griptape-cloud-knowledge-base-client.md deleted file mode 100644 index cd6e33ac9..000000000 --- a/docs/griptape-tools/official-tools/griptape-cloud-knowledge-base-client.md +++ /dev/null @@ -1,25 +0,0 @@ -## Overview - -The `GriptapeCloudKnowledgeBaseClient` is a lightweight Tool to retrieve data from a RAG pipeline and vector store hosted in [Griptape Cloud](https://cloud.griptape.ai). It enables searching across a centralized [Knowledge Base](https://cloud.griptape.ai/knowledge-bases) that can consist of various data sources such as Confluence, Google Docs, and web pages. - -**Note:** This tool requires a [Knowledge Base](https://cloud.griptape.ai/knowledge-bases) hosted in Griptape Cloud and an [API Key](https://cloud.griptape.ai/keys) for access. - -```python -import os -from griptape.structures import Agent -from griptape.tools import GriptapeCloudKnowledgeBaseClient - -knowledge_base_client = GriptapeCloudKnowledgeBaseClient( - description="Contains information about the company and its operations", - api_key=os.environ["GRIPTAPE_CLOUD_API_KEY"], - knowledge_base_id=os.environ["GRIPTAPE_CLOUD_KB_ID"], -) - -agent = Agent( - tools=[ - knowledge_base_client, - ] -) - -agent.run("What is the company's corporate travel policy?") -``` diff --git a/docs/griptape-tools/official-tools/griptape-cloud-knowledge-base-tool.md b/docs/griptape-tools/official-tools/griptape-cloud-knowledge-base-tool.md new file mode 100644 index 000000000..96af51782 --- /dev/null +++ b/docs/griptape-tools/official-tools/griptape-cloud-knowledge-base-tool.md @@ -0,0 +1,9 @@ +# Griptape Cloud Knowledge Base Tool + +The [GriptapeCloudKnowledgeBaseTool](../../reference/griptape/tools/griptape_cloud_knowledge_base/tool.md) is a lightweight Tool to retrieve data from a RAG pipeline and vector store hosted in [Griptape Cloud](https://cloud.griptape.ai). It enables searching across a centralized [Knowledge Base](https://cloud.griptape.ai/knowledge-bases) that can consist of various data sources such as Confluence, Google Docs, and web pages. + +**Note:** This tool requires a [Knowledge Base](https://cloud.griptape.ai/knowledge-bases) hosted in Griptape Cloud and an [API Key](https://cloud.griptape.ai/account/api-keys) for access. + +```python +--8<-- "docs/griptape-tools/official-tools/src/griptape_cloud_knowledge_base_tool_1.py" +``` diff --git a/docs/griptape-tools/official-tools/image-query-tool.md b/docs/griptape-tools/official-tools/image-query-tool.md new file mode 100644 index 000000000..781a279a8 --- /dev/null +++ b/docs/griptape-tools/official-tools/image-query-tool.md @@ -0,0 +1,7 @@ +# Image Query Tool + +This tool allows Agents to execute natural language queries on the contents of images using multimodal models. + +```python +--8<-- "docs/griptape-tools/official-tools/src/image_query_tool_1.py" +``` diff --git a/docs/griptape-tools/official-tools/inpainting-image-generation-client.md b/docs/griptape-tools/official-tools/inpainting-image-generation-client.md deleted file mode 100644 index 82c99adb7..000000000 --- a/docs/griptape-tools/official-tools/inpainting-image-generation-client.md +++ /dev/null @@ -1,32 +0,0 @@ -# InpaintingImageGenerationClient - -This tool allows LLMs to generate images using inpainting, where an input image is altered within the area specified by a mask image according to a prompt. The input and mask images can be provided either by their file path or by their [Task Memory](../../griptape-framework/structures/task-memory.md) references. - -```python -from griptape.structures import Agent -from griptape.engines import InpaintingImageGenerationEngine -from griptape.drivers import AmazonBedrockImageGenerationDriver, \ - BedrockStableDiffusionImageGenerationModelDriver -from griptape.tools import InpaintingImageGenerationClient - - -# Create a driver configured to use Stable Diffusion via Bedrock. -driver = AmazonBedrockImageGenerationDriver( - image_generation_model_driver=BedrockStableDiffusionImageGenerationModelDriver(), - model="stability.stable-diffusion-xl-v0", -) - -# Create an engine configured to use the driver. -engine = InpaintingImageGenerationEngine( - image_generation_driver=driver, -) - -# Create a tool configured to use the engine. -tool = InpaintingImageGenerationClient( - engine=engine, -) - -# Create an agent and provide the tool to it. -Agent(tools=[tool]).run("Generate an image of a castle built into the side of a mountain by inpainting the " - "image at tests/resources/mountain.png using the mask at tests/resources/mountain-mask.png.") -``` diff --git a/docs/griptape-tools/official-tools/inpainting-image-generation-tool.md b/docs/griptape-tools/official-tools/inpainting-image-generation-tool.md new file mode 100644 index 000000000..7abdc6238 --- /dev/null +++ b/docs/griptape-tools/official-tools/inpainting-image-generation-tool.md @@ -0,0 +1,7 @@ +# Inpainting Image Generation Tool + +This tool allows LLMs to generate images using inpainting, where an input image is altered within the area specified by a mask image according to a prompt. The input and mask images can be provided either by their file path or by their [Task Memory](../../griptape-framework/structures/task-memory.md) references. + +```python +--8<-- "docs/griptape-tools/official-tools/src/inpainting_image_generation_tool_1.py" +``` diff --git a/docs/griptape-tools/official-tools/openweather-client.md b/docs/griptape-tools/official-tools/openweather-client.md deleted file mode 100644 index 65d0975d4..000000000 --- a/docs/griptape-tools/official-tools/openweather-client.md +++ /dev/null @@ -1,19 +0,0 @@ -# OpenWeatherClient - -The [OpenWeatherClient](../../reference/griptape/tools/openweather_client/tool.md) enables LLMs to use [OpenWeatherMap](https://openweathermap.org/). - -```python -import os -from griptape.structures import Agent -from griptape.tools import OpenWeatherClient - -agent = Agent( - tools=[ - OpenWeatherClient( - api_key=os.environ["OPENWEATHER_API_KEY"], - ), - ] -) - -agent.run("What's the weather currently like in San Francisco?") -``` diff --git a/docs/griptape-tools/official-tools/openweather-tool.md b/docs/griptape-tools/official-tools/openweather-tool.md new file mode 100644 index 000000000..be1ed3972 --- /dev/null +++ b/docs/griptape-tools/official-tools/openweather-tool.md @@ -0,0 +1,7 @@ +# Open Weather Tool + +The [OpenWeatherTool](../../reference/griptape/tools/openweather/tool.md) enables LLMs to use [OpenWeatherMap](https://openweathermap.org/). + +```python +--8<-- "docs/griptape-tools/official-tools/src/openweather_tool_1.py" +``` diff --git a/docs/griptape-tools/official-tools/outpainting-image-generation-client.md b/docs/griptape-tools/official-tools/outpainting-image-generation-client.md deleted file mode 100644 index 8e8940332..000000000 --- a/docs/griptape-tools/official-tools/outpainting-image-generation-client.md +++ /dev/null @@ -1,32 +0,0 @@ -# OutpaintingImageGenerationClient - -This tool allows LLMs to generate images using outpainting, where an input image is altered outside of the area specified by a mask image according to a prompt. The input and mask images can be provided either by their file path or by their [Task Memory](../../griptape-framework/structures/task-memory.md) references. - -```python -from griptape.structures import Agent -from griptape.engines import OutpaintingImageGenerationEngine -from griptape.drivers import AmazonBedrockImageGenerationDriver, \ - BedrockStableDiffusionImageGenerationModelDriver -from griptape.tools import OutpaintingImageGenerationClient - - -# Create a driver configured to use Stable Diffusion via Bedrock. -driver = AmazonBedrockImageGenerationDriver( - image_generation_model_driver=BedrockStableDiffusionImageGenerationModelDriver(), - model="stability.stable-diffusion-xl-v0", -) - -# Create an engine configured to use the driver. -engine = OutpaintingImageGenerationEngine( - image_generation_driver=driver, -) - -# Create a tool configured to use the engine. -tool = OutpaintingImageGenerationClient( - engine=engine, -) - -# Create an agent and provide the tool to it. -Agent(tools=[tool]).run("Generate an image of a mountain shrouded by clouds by outpainting the " - "image at tests/resources/mountain.png using the mask at tests/resources/mountain-mask.png.") -``` diff --git a/docs/griptape-tools/official-tools/outpainting-image-generation-tool.md b/docs/griptape-tools/official-tools/outpainting-image-generation-tool.md new file mode 100644 index 000000000..ce97798bc --- /dev/null +++ b/docs/griptape-tools/official-tools/outpainting-image-generation-tool.md @@ -0,0 +1,7 @@ +# Outpainting Image Generation Tool + +This tool allows LLMs to generate images using outpainting, where an input image is altered outside of the area specified by a mask image according to a prompt. The input and mask images can be provided either by their file path or by their [Task Memory](../../griptape-framework/structures/task-memory.md) references. + +```python +--8<-- "docs/griptape-tools/official-tools/src/outpainting_image_generation_tool_1.py" +``` diff --git a/docs/griptape-tools/official-tools/prompt-image-generation-tool.md b/docs/griptape-tools/official-tools/prompt-image-generation-tool.md new file mode 100644 index 000000000..c764791dc --- /dev/null +++ b/docs/griptape-tools/official-tools/prompt-image-generation-tool.md @@ -0,0 +1,7 @@ +# Prompt Image Generation Tool + +This tool allows LLMs to generate images from a text prompt. + +```python +--8<-- "docs/griptape-tools/official-tools/src/prompt_image_generation_tool_1.py" +``` diff --git a/docs/griptape-tools/official-tools/prompt-summary-tool.md b/docs/griptape-tools/official-tools/prompt-summary-tool.md new file mode 100644 index 000000000..7afecf57b --- /dev/null +++ b/docs/griptape-tools/official-tools/prompt-summary-tool.md @@ -0,0 +1,105 @@ +The [PromptSummaryTool](../../reference/griptape/tools/prompt_summary/tool.md) enables LLMs summarize text data. + +```python +--8<-- "docs/griptape-tools/official-tools/src/prompt_summary_tool_1.py" +``` +``` +[08/12/24 15:54:46] INFO ToolkitTask 8be73eb542c44418ba880399044c017a + Input: How can I build Neovim from source for MacOS according to this https://github.com/neovim/neovim/blob/master/BUILD.md +[08/12/24 15:54:47] INFO Subtask cd362a149e1d400997be93c1342d1663 + Actions: [ + { + "tag": "call_DGsOHC4AVxhV7RPVA7q3rATX", + "name": "WebScraperTool", + "path": "get_content", + "input": { + "values": { + "url": "https://github.com/neovim/neovim/blob/master/BUILD.md" + } + } + } + ] +[08/12/24 15:54:49] INFO Subtask cd362a149e1d400997be93c1342d1663 + Response: Output of "WebScraperTool.get_content" was stored in memory with memory_name "TaskMemory" and artifact_namespace + "990b689c57de4581b8715963c0aecfe3" +[08/12/24 15:54:50] INFO Subtask 919a4a9eb900439ab9bfbf6e921feba3 + Actions: [ + { + "tag": "call_DK3a4MYoElJbaCrUJekBReIc", + "name": "PromptSummaryTool", + "path": "summarize", + "input": { + "values": { + "summary": { + "memory_name": "TaskMemory", + "artifact_namespace": "990b689c57de4581b8715963c0aecfe3" + } + } + } + } + ] +[08/12/24 15:54:56] INFO Subtask 919a4a9eb900439ab9bfbf6e921feba3 + Response: The text provides detailed instructions for building and installing Neovim from source on various operating systems, including Linux, + BSD, Windows, and macOS. Key points include: + + 1. **Pre-requisites**: Ensure all build dependencies are installed. + 2. **Cloning and Building**: + - Clone the Neovim repository. + - Use `make` with appropriate `CMAKE_BUILD_TYPE` (Release, Debug, RelWithDebInfo). + - For stable releases, checkout the stable branch. + - Use `gmake` on BSD and follow specific instructions for Windows using MSVC. + 3. **Installation**: + - Default install location is `/usr/local`. + - On Debian/Ubuntu, build a DEB package for clean installation. + - Custom install locations can be set using `CMAKE_INSTALL_PREFIX`. + 4. **Dependencies**: + - Third-party dependencies are automatically downloaded. + - Use `Ninja` and `ccache` for faster builds. + 5. **Platform-Specific Instructions**: + - Detailed steps for building on Windows using Visual Studio. + - Instructions for building on macOS using Homebrew or MacPorts. + - Specific commands for building on NixOS, Cygwin, and MSYS2. + 6. **Debugging and Development**: + - Use `RelWithDebInfo` for optimized builds with debug info. + - Instructions for using debuggers and handling build cache. + 7. **Localization**: + - Commands for checking and updating translations. + + The text also includes troubleshooting tips and links to additional resources for further guidance. +[08/12/24 15:55:01] INFO ToolkitTask 8be73eb542c44418ba880399044c017a + Output: To build Neovim from source on macOS, follow these steps: + + 1. **Install Dependencies**: + Ensure you have the necessary build dependencies installed. You can use Homebrew to install them: + ```sh + brew install ninja libtool automake cmake pkg-config gettext curl + ``` + + 2. **Clone the Neovim Repository**: + Clone the Neovim repository from GitHub: + ```sh + git clone https://github.com/neovim/neovim.git + cd neovim + ``` + + 3. **Checkout the Stable Branch (Optional)**: + If you want to build the stable release, checkout the stable branch: + ```sh + git checkout stable + ``` + + 4. **Build Neovim**: + Use `make` to build Neovim. You can specify the build type (Release, Debug, RelWithDebInfo): + ```sh + make CMAKE_BUILD_TYPE=Release + ``` + + 5. **Install Neovim**: + After building, install Neovim. The default install location is `/usr/local`: + ```sh + sudo make install + ``` + + By following these steps, you should be able to build and install Neovim from source on macOS. For more detailed instructions and + troubleshooting tips, refer to the [BUILD.md](https://github.com/neovim/neovim/blob/master/BUILD.md) file in the Neovim repository. +``` diff --git a/docs/griptape-tools/official-tools/query-tool.md b/docs/griptape-tools/official-tools/query-tool.md new file mode 100644 index 000000000..4a4f2bf33 --- /dev/null +++ b/docs/griptape-tools/official-tools/query-tool.md @@ -0,0 +1,85 @@ +The [QueryTool](../../reference/griptape/tools/query/tool.md) enables Agents to query unstructured data for specific information. + +```python +--8<-- "docs/griptape-tools/official-tools/src/query_tool_1.py" +``` +``` +[08/12/24 15:49:23] INFO ToolkitTask a88abda2e5324bdf81a3e2b99c26b9df + Input: Tell me about the architecture as described here: https://neovim.io/doc/user/vim_diff.html +[08/12/24 15:49:24] INFO Subtask 3dc9910bcac44c718b3aedd6222e372a + Actions: [ + { + "tag": "call_VY4r5YRc2QDjtBvn89z5PH8E", + "name": "WebScraperTool", + "path": "get_content", + "input": { + "values": { + "url": "https://neovim.io/doc/user/vim_diff.html" + } + } + } + ] +[08/12/24 15:49:25] INFO Subtask 3dc9910bcac44c718b3aedd6222e372a + Response: Output of "WebScraperTool.get_content" was stored in memory with memory_name "TaskMemory" and artifact_namespace + "bec6deeac5f84e369c41210e67905415" +[08/12/24 15:49:26] INFO Subtask f41d2189ecff4458acb8e6dadb5b13aa + Actions: [ + { + "tag": "call_GtBICZi6oIeL85Aj7q5szul9", + "name": "QueryTool", + "path": "query", + "input": { + "values": { + "query": "architecture", + "content": { + "memory_name": "TaskMemory", + "artifact_namespace": "bec6deeac5f84e369c41210e67905415" + } + } + } + } + ] +[08/12/24 15:49:33] INFO Subtask f41d2189ecff4458acb8e6dadb5b13aa + Response: The architecture of Neovim (Nvim) is designed to improve stability, performance, and extensibility. Here are some key points about + Nvim's architecture: + + 1. **Decoupled UI**: The Nvim UI is decoupled from the core editor. All UIs, including the built-in TUI (terminal user interface), are plugins + that connect to a Nvim server. Multiple Nvim UI clients can connect to the same Nvim editor server. + + 2. **External Plugins**: External plugins run in separate processes, which improves stability and allows those plugins to work without blocking + the editor. Even "legacy" Python and Ruby plugins, which use the old Vim interfaces, run out-of-process, so they cannot crash Nvim. + + 3. **Libuv**: Platform and I/O facilities are built upon libuv. Nvim benefits from libuv features and bug fixes, and other projects benefit from + improvements to libuv by Nvim developers. + + 4. **Robust API**: Nvim has a robust API, which is used instead of exposing internal test functions like Vim's `test_autochdir()`, + `test_settime()`, etc. + + 5. **Feature Inclusion**: Nvim always includes all features, in contrast to Vim, which ships various combinations of 100+ optional features. + This reduces the surface area for bugs and removes a common source of confusion and friction for users. + + 6. **External Plugins and Extensions**: Nvim avoids features that cannot be provided on all platforms, delegating those to external + plugins/extensions. + + These architectural decisions make Nvim more stable, extensible, and user-friendly compared to traditional Vim. +[08/12/24 15:49:37] INFO ToolkitTask a88abda2e5324bdf81a3e2b99c26b9df + Output: The architecture of Neovim (Nvim) is designed to enhance stability, performance, and extensibility. Here are the key points: + + 1. **Decoupled UI**: The user interface (UI) is separated from the core editor. All UIs, including the built-in terminal user interface (TUI), + are plugins that connect to a Nvim server. This allows multiple UI clients to connect to the same Nvim editor server. + + 2. **External Plugins**: Plugins run in separate processes, which improves stability and prevents them from blocking the editor. Even older + Python and Ruby plugins run out-of-process, ensuring they cannot crash Nvim. + + 3. **Libuv**: Nvim's platform and I/O facilities are built on libuv, benefiting from its features and bug fixes. Improvements made by Nvim + developers to libuv also benefit other projects. + + 4. **Robust API**: Nvim provides a robust API, avoiding the need to expose internal test functions like Vim does. + + 5. **Feature Inclusion**: Unlike Vim, which ships with various combinations of optional features, Nvim includes all features by default. This + reduces bugs and user confusion. + + 6. **External Plugins and Extensions**: Nvim delegates features that cannot be provided on all platforms to external plugins/extensions. + + These architectural choices make Nvim more stable, extensible, and user-friendly compared to traditional Vim. +``` diff --git a/docs/griptape-tools/official-tools/rag-client.md b/docs/griptape-tools/official-tools/rag-client.md deleted file mode 100644 index 8b1447768..000000000 --- a/docs/griptape-tools/official-tools/rag-client.md +++ /dev/null @@ -1,76 +0,0 @@ -The [RagClient](../../reference/griptape/tools/rag_client/tool.md) enables LLMs to query modular RAG engines. - -Here is an example of how it can be used with a local vector store driver: - -```python -from griptape.artifacts import TextArtifact -from griptape.drivers import LocalVectorStoreDriver, OpenAiEmbeddingDriver, OpenAiChatPromptDriver -from griptape.engines.rag import RagEngine -from griptape.engines.rag.modules import VectorStoreRetrievalRagModule, PromptResponseRagModule -from griptape.engines.rag.stages import RetrievalRagStage, ResponseRagStage -from griptape.structures import Agent -from griptape.tools import RagClient - - -vector_store_driver = LocalVectorStoreDriver(embedding_driver=OpenAiEmbeddingDriver()) - -artifact = TextArtifact( - "Griptape builds AI-powered applications that connect securely to your enterprise data and APIs." - "Griptape Agents provide incredible power and flexibility when working with large language models." -) - -vector_store_driver.upsert_text_artifact(artifact=artifact, namespace="griptape") - -rag_client = RagClient( - description="Contains information about Griptape", - off_prompt=False, - rag_engine=RagEngine( - retrieval_stage=RetrievalRagStage( - retrieval_modules=[ - VectorStoreRetrievalRagModule( - vector_store_driver=vector_store_driver, - query_params={ - "namespace": "griptape", - "top_n": 20 - } - ) - ] - ), - response_stage=ResponseRagStage( - response_module=PromptResponseRagModule( - prompt_driver=OpenAiChatPromptDriver(model="gpt-4o") - ) - ) - ) -) - -agent = Agent( - tools=[rag_client] -) - -agent.run("what is Griptape?") - -``` -``` -[07/11/24 13:30:43] INFO ToolkitTask a6d057d5c71d4e9cb6863a2adb64b76c - Input: what is Griptape? -[07/11/24 13:30:44] INFO Subtask 8fd89ed9eefe49b8892187f2fca3890a - Actions: [ - { - "tag": "call_4MaDzOuKnWAs2gmhK3KJhtjI", - "name": "RagClient", - "path": "search", - "input": { - "values": { - "query": "What is Griptape?" - } - } - } - ] -[07/11/24 13:30:49] INFO Subtask 8fd89ed9eefe49b8892187f2fca3890a - Response: Griptape builds AI-powered applications that connect securely to your enterprise data and APIs. Griptape Agents provide incredible - power and flexibility when working with large language models. - INFO ToolkitTask a6d057d5c71d4e9cb6863a2adb64b76c - Output: Griptape builds AI-powered applications that connect securely to your enterprise data and APIs. Griptape Agents provide incredible - power and flexibility when working with large language models. -``` diff --git a/docs/griptape-tools/official-tools/rag-tool.md b/docs/griptape-tools/official-tools/rag-tool.md new file mode 100644 index 000000000..71613beab --- /dev/null +++ b/docs/griptape-tools/official-tools/rag-tool.md @@ -0,0 +1,32 @@ +# Rag Tool + +The [RagTool](../../reference/griptape/tools/rag/tool.md) enables LLMs to query modular RAG engines. + +Here is an example of how it can be used with a local vector store driver: + +```python +--8<-- "docs/griptape-tools/official-tools/src/rag_tool_1.py" +``` +``` +[07/11/24 13:30:43] INFO ToolkitTask a6d057d5c71d4e9cb6863a2adb64b76c + Input: what is Griptape? +[07/11/24 13:30:44] INFO Subtask 8fd89ed9eefe49b8892187f2fca3890a + Actions: [ + { + "tag": "call_4MaDzOuKnWAs2gmhK3KJhtjI", + "name": "RagTool", + "path": "search", + "input": { + "values": { + "query": "What is Griptape?" + } + } + } + ] +[07/11/24 13:30:49] INFO Subtask 8fd89ed9eefe49b8892187f2fca3890a + Response: Griptape builds AI-powered applications that connect securely to your enterprise data and APIs. Griptape Agents provide incredible + power and flexibility when working with large language models. + INFO ToolkitTask a6d057d5c71d4e9cb6863a2adb64b76c + Output: Griptape builds AI-powered applications that connect securely to your enterprise data and APIs. Griptape Agents provide incredible + power and flexibility when working with large language models. +``` diff --git a/docs/griptape-tools/official-tools/rest-api-tool.md b/docs/griptape-tools/official-tools/rest-api-tool.md new file mode 100644 index 000000000..345f0589b --- /dev/null +++ b/docs/griptape-tools/official-tools/rest-api-tool.md @@ -0,0 +1,12 @@ +# Rest Api Tool + +This tool enables LLMs to call REST APIs. + +The [RestApiTool](../../reference/griptape/tools/rest_api/tool.md) tool uses the following parameters: + +### Example +The following example is built using [https://jsonplaceholder.typicode.com/guide/](https://jsonplaceholder.typicode.com/guide/). + +```python +--8<-- "docs/griptape-tools/official-tools/src/rest_api_tool_1.py" +``` diff --git a/docs/griptape-tools/official-tools/sql-client.md b/docs/griptape-tools/official-tools/sql-client.md deleted file mode 100644 index 7e5bf35e6..000000000 --- a/docs/griptape-tools/official-tools/sql-client.md +++ /dev/null @@ -1,71 +0,0 @@ -# SqlClient - -This tool enables LLMs to execute SQL statements via [SQLAlchemy](https://www.sqlalchemy.org/). Depending on your underlying SQL engine, [configure](https://docs.sqlalchemy.org/en/20/core/engines.html) your `engine_url` and give the LLM a hint about what engine you are using via `engine_name`, so that it can create engine-specific statements. - -```python -import os -import boto3 -from griptape.drivers import AmazonRedshiftSqlDriver -from griptape.loaders import SqlLoader -from griptape.structures import Agent -from griptape.tools import SqlClient - - -session = boto3.Session() - -sql_loader = SqlLoader( - sql_driver=AmazonRedshiftSqlDriver( - database=os.environ["REDSHIFT_DATABASE"], - session=session, - cluster_identifier=os.environ['REDSHIFT_CLUSTER_IDENTIFIER'], - ) -) - -sql_tool = SqlClient( - sql_loader=sql_loader, - table_name="people", - table_description="contains information about tech industry professionals", - engine_name="redshift" -) - -agent = Agent( - tools=[sql_tool] -) -agent.run("SELECT * FROM people;") -``` -``` -[09/11/23 17:02:55] INFO Task d8331f8705b64b4b9d9a88137ed73f3f - Input: SELECT * FROM people; -[09/11/23 17:03:02] INFO Subtask 46c2f8926ce9469e9ca6b1b3364e3e41 - Thought: The user wants to retrieve all records - from the 'people' table. I can use the SqlClient - tool to execute this query. - - Action: {"name": "SqlClient", - "path": "execute_query", "input": {"values": - {"sql_query": "SELECT * FROM people;"}}} -[09/11/23 17:03:03] INFO Subtask 46c2f8926ce9469e9ca6b1b3364e3e41 - Response: Output of "SqlClient.execute_query" - was stored in memory with memory_name - "TaskMemory" and artifact_namespace - "217715ba3e444e4985bee223df5716a8" -[09/11/23 17:03:11] INFO Subtask e51f05449647482caa3051378ab5cb8c - Thought: The output of the SQL query has been - stored in memory. I can retrieve this data using - the TaskMemory's 'summarize' activity. - Action: {"name": "TaskMemoryClient", "path": - "summarize", "input": {"values": {"memory_name": - "TaskMemory", "artifact_namespace": - "217715ba3e444e4985bee223df5716a8"}}} -[09/11/23 17:03:12] INFO Subtask e51f05449647482caa3051378ab5cb8c - Response: The text includes a list of employees - with their respective IDs, names, positions. There - are two employees named Tanya Cooley who are both - managers, and two employees named John Doe who are - both coders. -[09/11/23 17:03:17] INFO Task d8331f8705b64b4b9d9a88137ed73f3f - Output: The 'people' table contains records of - several employees. Notably, there are two employees - named Tanya Cooley who are both managers, and two - employees named John Doe who are both coders. -``` diff --git a/docs/griptape-tools/official-tools/sql-tool.md b/docs/griptape-tools/official-tools/sql-tool.md new file mode 100644 index 000000000..ed8aae2a0 --- /dev/null +++ b/docs/griptape-tools/official-tools/sql-tool.md @@ -0,0 +1,56 @@ +# Sql Tool + +This tool enables LLMs to execute SQL statements via [SQLAlchemy](https://www.sqlalchemy.org/). Depending on your underlying SQL engine, [configure](https://docs.sqlalchemy.org/en/20/core/engines.html) your `engine_url` and give the LLM a hint about what engine you are using via `engine_name`, so that it can create engine-specific statements. + +```python +--8<-- "docs/griptape-tools/official-tools/src/sql_tool_1.py" +``` +``` +[08/12/24 14:59:31] INFO ToolkitTask e302f7315d1a4f939e0125103ff4f09f + Input: SELECT * FROM people; +[08/12/24 14:59:34] INFO Subtask 809d1a281b85447f90706d431b77b845 + Actions: [ + { + "tag": "call_dCxHWwPwgmDvDKVd3QeOzyuT", + "name": "SqlClient", + "path": "execute_query", + "input": { + "values": { + "sql_query": "SELECT * FROM people" + } + } + } + ] +[08/12/24 14:59:35] INFO Subtask 809d1a281b85447f90706d431b77b845 + Response: 1,Lee,Andrews,"Engineer, electrical" + + 2,Michael,Woods,"Therapist, art" + + 3,Joshua,Allen,"Therapist, sports" + + 4,Eric,Foster,English as a second language teacher + + 5,John,Daniels,Printmaker + + 6,Matthew,Barton,Podiatrist + + 7,Audrey,Wilson,IT technical support officer + + 8,Leah,Knox,"Social research officer, government" + + 9,David,Macdonald,Public relations account executive + + 10,Erica,Ramos,"Accountant, chartered public finance" +[08/12/24 14:59:43] INFO ToolkitTask e302f7315d1a4f939e0125103ff4f09f + Output: + 1. Lee Andrews - Engineer, electrical + 2. Michael Woods - Therapist, art + 3. Joshua Allen - Therapist, sports + 4. Eric Foster - English as a second language teacher + 5. John Daniels - Printmaker + 6. Matthew Barton - Podiatrist + 7. Audrey Wilson - IT technical support officer + 8. Leah Knox - Social research officer, government + 9. David Macdonald - Public relations account executive + 10. Erica Ramos - Accountant, chartered public finance +``` diff --git a/docs/griptape-tools/official-tools/src/audio_transcription_tool_1.py b/docs/griptape-tools/official-tools/src/audio_transcription_tool_1.py new file mode 100644 index 000000000..bc25fd1fa --- /dev/null +++ b/docs/griptape-tools/official-tools/src/audio_transcription_tool_1.py @@ -0,0 +1,17 @@ +from griptape.drivers import OpenAiAudioTranscriptionDriver +from griptape.engines import AudioTranscriptionEngine +from griptape.structures import Agent +from griptape.tools.audio_transcription.tool import AudioTranscriptionTool + +driver = OpenAiAudioTranscriptionDriver(model="whisper-1") + +tool = AudioTranscriptionTool( + off_prompt=False, + engine=AudioTranscriptionEngine( + audio_transcription_driver=driver, + ), +) + +Agent(tools=[tool]).run( + "Transcribe the following audio file: /Users/andrew/code/griptape/tests/resources/sentences2.wav" +) diff --git a/docs/griptape-tools/official-tools/src/aws_iam_tool_1.py b/docs/griptape-tools/official-tools/src/aws_iam_tool_1.py new file mode 100644 index 000000000..89718010f --- /dev/null +++ b/docs/griptape-tools/official-tools/src/aws_iam_tool_1.py @@ -0,0 +1,13 @@ +import boto3 + +from griptape.structures import Agent +from griptape.tools import AwsIamTool + +# Initialize the AWS IAM client +aws_iam_client = AwsIamTool(session=boto3.Session()) + +# Create an agent with the AWS IAM client tool +agent = Agent(tools=[aws_iam_client]) + +# Run the agent with a high-level task +agent.run("List all my IAM users") diff --git a/docs/griptape-tools/official-tools/src/aws_s3_tool_1.py b/docs/griptape-tools/official-tools/src/aws_s3_tool_1.py new file mode 100644 index 000000000..3d9425534 --- /dev/null +++ b/docs/griptape-tools/official-tools/src/aws_s3_tool_1.py @@ -0,0 +1,13 @@ +import boto3 + +from griptape.structures import Agent +from griptape.tools import AwsS3Tool + +# Initialize the AWS S3 client +aws_s3_client = AwsS3Tool(session=boto3.Session(), off_prompt=True) + +# Create an agent with the AWS S3 client tool +agent = Agent(tools=[aws_s3_client]) + +# Task to list all the AWS S3 buckets +agent.run("List all my S3 buckets.") diff --git a/docs/griptape-tools/official-tools/src/calculator_tool_1.py b/docs/griptape-tools/official-tools/src/calculator_tool_1.py new file mode 100644 index 000000000..1263cad45 --- /dev/null +++ b/docs/griptape-tools/official-tools/src/calculator_tool_1.py @@ -0,0 +1,8 @@ +from griptape.structures import Agent +from griptape.tools import CalculatorTool + +# Create an agent with the CalculatorTool tool +agent = Agent(tools=[CalculatorTool()]) + +# Run the agent with a task to perform the arithmetic calculation of \(10^5\) +agent.run("What is 10 raised to the power of 5?") diff --git a/docs/griptape-tools/official-tools/src/computer_tool_1.py b/docs/griptape-tools/official-tools/src/computer_tool_1.py new file mode 100644 index 000000000..725210020 --- /dev/null +++ b/docs/griptape-tools/official-tools/src/computer_tool_1.py @@ -0,0 +1,10 @@ +from griptape.structures import Agent +from griptape.tools import ComputerTool + +# Initialize the ComputerTool tool +computer = ComputerTool() + +# Create an agent with the ComputerTool tool +agent = Agent(tools=[computer]) + +agent.run("Make 2 files and then list the files in the current directory") diff --git a/docs/griptape-tools/official-tools/src/date_time_tool_1.py b/docs/griptape-tools/official-tools/src/date_time_tool_1.py new file mode 100644 index 000000000..f806e5091 --- /dev/null +++ b/docs/griptape-tools/official-tools/src/date_time_tool_1.py @@ -0,0 +1,8 @@ +from griptape.structures import Agent +from griptape.tools import DateTimeTool + +# Create an agent with the DateTimeTool tool +agent = Agent(tools=[DateTimeTool()]) + +# Fetch the current date and time +agent.run("What is the current date and time?") diff --git a/docs/griptape-tools/official-tools/src/email_tool_1.py b/docs/griptape-tools/official-tools/src/email_tool_1.py new file mode 100644 index 000000000..e9d3b3cee --- /dev/null +++ b/docs/griptape-tools/official-tools/src/email_tool_1.py @@ -0,0 +1,11 @@ +import os + +from griptape.tools import EmailTool + +email_tool = EmailTool( + smtp_host=os.environ.get("SMTP_HOST"), + smtp_port=int(os.environ.get("SMTP_PORT", 465)), + smtp_password=os.environ.get("SMTP_PASSWORD"), + smtp_user=os.environ.get("FROM_EMAIL"), + smtp_use_ssl=bool(os.environ.get("SMTP_USE_SSL")), +) diff --git a/docs/griptape-tools/official-tools/src/extraction_tool_1.py b/docs/griptape-tools/official-tools/src/extraction_tool_1.py new file mode 100644 index 000000000..80e211f03 --- /dev/null +++ b/docs/griptape-tools/official-tools/src/extraction_tool_1.py @@ -0,0 +1,28 @@ +import schema + +from griptape.engines import JsonExtractionEngine +from griptape.structures import Agent +from griptape.tools import ExtractionTool, WebScraperTool + +agent = Agent( + input="Load {{ args[0] }} and extract key info", + tools=[ + WebScraperTool(off_prompt=True), + ExtractionTool( + off_prompt=False, + extraction_engine=JsonExtractionEngine( + template_schema=schema.Schema( + { + "company_name": str, + "industry": str, + schema.Literal( + "product_features", + description="List of key product features.", + ): list[str], + } + ).json_schema("Company Info"), + ), + ), + ], +) +agent.run("https://griptape.ai") diff --git a/docs/griptape-tools/official-tools/src/file_manager_tool_1.py b/docs/griptape-tools/official-tools/src/file_manager_tool_1.py new file mode 100644 index 000000000..0b5596d2b --- /dev/null +++ b/docs/griptape-tools/official-tools/src/file_manager_tool_1.py @@ -0,0 +1,19 @@ +from pathlib import Path + +from griptape.structures import Agent +from griptape.tools import FileManagerTool + +# Initialize the FileManagerTool tool with the current directory as its base +file_manager_tool = FileManagerTool() + +# Add the tool to the Agent +agent = Agent(tools=[file_manager_tool]) + +# Directly create a file named 'sample1.txt' with some content +filename = "sample1.txt" +content = "This is the content of sample1.txt" + +Path(filename).write_text(filename) + +# Now, read content from the file 'sample1.txt' using the agent's command +agent.run("Can you get me the sample1.txt file?") diff --git a/docs/griptape-tools/official-tools/google-cal-client.md b/docs/griptape-tools/official-tools/src/google_calendar_tool_1.py similarity index 66% rename from docs/griptape-tools/official-tools/google-cal-client.md rename to docs/griptape-tools/official-tools/src/google_calendar_tool_1.py index 4069d0246..afbb20c9f 100644 --- a/docs/griptape-tools/official-tools/google-cal-client.md +++ b/docs/griptape-tools/official-tools/src/google_calendar_tool_1.py @@ -1,15 +1,10 @@ -# GoogleCalendarClient - -The GoogleCalendarClient tool allows you to interact with Google Calendar. - - -```python import os -from griptape.tools import GoogleCalendarClient + from griptape.structures import Agent +from griptape.tools import GoogleCalendarTool -# Create the GoogleCalendarClient tool -google_calendar_tool = GoogleCalendarClient( +# Create the GoogleCalendarTool tool +google_calendarendar_tool = GoogleCalendarTool( service_account_credentials={ "type": os.environ["GOOGLE_ACCOUNT_TYPE"], "project_id": os.environ["GOOGLE_PROJECT_ID"], @@ -20,19 +15,15 @@ "auth_uri": "https://accounts.google.com/o/oauth2/auth", "token_uri": "https://oauth2.googleapis.com/token", "auth_provider_x509_cert_url": "https://www.googleapis.com/oauth2/v1/certs", - "client_x509_cert_url": os.environ["GOOGLE_CERT_URL"] + "client_x509_cert_url": os.environ["GOOGLE_CERT_URL"], }, owner_email=os.environ["GOOGLE_OWNER_EMAIL"], - ) -# Set up an agent using the GoogleCalendarClient tool -agent = Agent( - tools=[google_calendar_tool] -) +# Set up an agent using the GoogleCalendarTool tool +agent = Agent(tools=[google_calendarendar_tool]) # Task: Get upcoming events from a Google calendar agent.run( "Get me the details of the next upcoming event from my primary calendar.", ) -``` diff --git a/docs/griptape-tools/official-tools/src/google_docs_tool_1.py b/docs/griptape-tools/official-tools/src/google_docs_tool_1.py new file mode 100644 index 000000000..0d8e8a3cb --- /dev/null +++ b/docs/griptape-tools/official-tools/src/google_docs_tool_1.py @@ -0,0 +1,29 @@ +import os + +from griptape.structures import Agent +from griptape.tools import GoogleDocsTool + +# Create the GoogleDocsTool tool +google_docs_tool = GoogleDocsTool( + service_account_credentials={ + "type": os.environ["GOOGLE_ACCOUNT_TYPE"], + "project_id": os.environ["GOOGLE_PROJECT_ID"], + "private_key_id": os.environ["GOOGLE_PRIVATE_KEY_ID"], + "private_key": os.environ["GOOGLE_PRIVATE_KEY"], + "client_email": os.environ["GOOGLE_CLIENT_EMAIL"], + "client_id": os.environ["GOOGLE_CLIENT_ID"], + "auth_uri": "https://accounts.google.com/o/oauth2/auth", + "token_uri": "https://oauth2.googleapis.com/token", + "auth_provider_x509_cert_url": "https://www.googleapis.com/oauth2/v1/certs", + "client_x509_cert_url": os.environ["GOOGLE_CERT_URL"], + }, + owner_email=os.environ["GOOGLE_OWNER_EMAIL"], +) + +# Set up an agent using the GoogleDocsTool tool +agent = Agent(tools=[google_docs_tool]) + +# Task: Create a new Google Doc and save content to it +agent.run( + "Create doc with name 'test_creation' in test folder with content 'Hey, Tony.", +) diff --git a/docs/griptape-tools/official-tools/src/google_drive_tool_1.py b/docs/griptape-tools/official-tools/src/google_drive_tool_1.py new file mode 100644 index 000000000..d8e43a6db --- /dev/null +++ b/docs/griptape-tools/official-tools/src/google_drive_tool_1.py @@ -0,0 +1,29 @@ +import os + +from griptape.structures import Agent +from griptape.tools import GoogleDriveTool + +# Create the GoogleDriveTool tool +google_drive_tool = GoogleDriveTool( + service_account_credentials={ + "type": os.environ["GOOGLE_ACCOUNT_TYPE"], + "project_id": os.environ["GOOGLE_PROJECT_ID"], + "private_key_id": os.environ["GOOGLE_PRIVATE_KEY_ID"], + "private_key": os.environ["GOOGLE_PRIVATE_KEY"], + "client_email": os.environ["GOOGLE_CLIENT_EMAIL"], + "client_id": os.environ["GOOGLE_CLIENT_ID"], + "auth_uri": "https://accounts.google.com/o/oauth2/auth", + "token_uri": "https://oauth2.googleapis.com/token", + "auth_provider_x509_cert_url": "https://www.googleapis.com/oauth2/v1/certs", + "client_x509_cert_url": os.environ["GOOGLE_CERT_URL"], + }, + owner_email=os.environ["GOOGLE_OWNER_EMAIL"], +) + +# Set up an agent using the GoogleDriveTool tool +agent = Agent(tools=[google_drive_tool]) + +# Task: Save content to my Google Drive (default directory is root) +agent.run( + "Save the content 'Hi this is Tony' in a filed named 'hello.txt' to my Drive.", +) diff --git a/docs/griptape-tools/official-tools/src/google_gmail_tool_1.py b/docs/griptape-tools/official-tools/src/google_gmail_tool_1.py new file mode 100644 index 000000000..44e0ceb39 --- /dev/null +++ b/docs/griptape-tools/official-tools/src/google_gmail_tool_1.py @@ -0,0 +1,30 @@ +import os + +from griptape.structures import Agent +from griptape.tools import GoogleGmailTool + +# Create the GoogleGmailTool tool +gmail_tool = GoogleGmailTool( + service_account_credentials={ + "type": os.environ["GOOGLE_ACCOUNT_TYPE"], + "project_id": os.environ["GOOGLE_PROJECT_ID"], + "private_key_id": os.environ["GOOGLE_PRIVATE_KEY_ID"], + "private_key": os.environ["GOOGLE_PRIVATE_KEY"], + "client_email": os.environ["GOOGLE_CLIENT_EMAIL"], + "client_id": os.environ["GOOGLE_CLIENT_ID"], + "auth_uri": "https://accounts.google.com/o/oauth2/auth", + "token_uri": "https://oauth2.googleapis.com/token", + "auth_provider_x509_cert_url": "https://www.googleapis.com/oauth2/v1/certs", + "client_x509_cert_url": os.environ["GOOGLE_CERT_URL"], + }, + owner_email=os.environ["GOOGLE_OWNER_EMAIL"], +) + +# Set up an agent using the GoogleGmailTool tool +agent = Agent(tools=[gmail_tool]) + +# Task: Create a draft email in GMail +agent.run( + "Create a draft email in Gmail to example@email.com with the subject 'Test Draft', the body " + "'This is a test draft email.'", +) diff --git a/docs/griptape-tools/official-tools/src/griptape_cloud_knowledge_base_tool_1.py b/docs/griptape-tools/official-tools/src/griptape_cloud_knowledge_base_tool_1.py new file mode 100644 index 000000000..b8c294f6b --- /dev/null +++ b/docs/griptape-tools/official-tools/src/griptape_cloud_knowledge_base_tool_1.py @@ -0,0 +1,18 @@ +import os + +from griptape.structures import Agent +from griptape.tools import GriptapeCloudKnowledgeBaseTool + +knowledge_base_client = GriptapeCloudKnowledgeBaseTool( + description="Contains information about the company and its operations", + api_key=os.environ["GRIPTAPE_CLOUD_API_KEY"], + knowledge_base_id=os.environ["GRIPTAPE_CLOUD_KB_ID"], +) + +agent = Agent( + tools=[ + knowledge_base_client, + ] +) + +agent.run("What is the company's corporate travel policy?") diff --git a/docs/griptape-tools/official-tools/image-query-client.md b/docs/griptape-tools/official-tools/src/image_query_tool_1.py similarity index 59% rename from docs/griptape-tools/official-tools/image-query-client.md rename to docs/griptape-tools/official-tools/src/image_query_tool_1.py index a1044fb91..a4d69eafb 100644 --- a/docs/griptape-tools/official-tools/image-query-client.md +++ b/docs/griptape-tools/official-tools/src/image_query_tool_1.py @@ -1,28 +1,20 @@ -# ImageQueryClient - -This tool allows Agents to execute natural language queries on the contents of images using multimodal models. - -```python -from griptape.structures import Agent -from griptape.tools import ImageQueryClient from griptape.drivers import OpenAiImageQueryDriver from griptape.engines import ImageQueryEngine +from griptape.structures import Agent +from griptape.tools import ImageQueryTool # Create an Image Query Driver. -driver = OpenAiImageQueryDriver( - model="gpt-4o" -) +driver = OpenAiImageQueryDriver(model="gpt-4o") # Create an Image Query Engine configured to use the driver. engine = ImageQueryEngine( image_query_driver=driver, ) -# Create an Image Query Client configured to use the engine. -tool = ImageQueryClient( +# Create an Image Query Tool configured to use the engine. +tool = ImageQueryTool( image_query_engine=engine, ) # Create an agent and provide the tool to it. Agent(tools=[tool]).run("Describe the weather in the image tests/resources/mountain.png in one word.") -``` diff --git a/docs/griptape-tools/official-tools/src/inpainting_image_generation_tool_1.py b/docs/griptape-tools/official-tools/src/inpainting_image_generation_tool_1.py new file mode 100644 index 000000000..5821e1b40 --- /dev/null +++ b/docs/griptape-tools/official-tools/src/inpainting_image_generation_tool_1.py @@ -0,0 +1,26 @@ +from griptape.drivers import AmazonBedrockImageGenerationDriver, BedrockStableDiffusionImageGenerationModelDriver +from griptape.engines import InpaintingImageGenerationEngine +from griptape.structures import Agent +from griptape.tools import InpaintingImageGenerationTool + +# Create a driver configured to use Stable Diffusion via Bedrock. +driver = AmazonBedrockImageGenerationDriver( + image_generation_model_driver=BedrockStableDiffusionImageGenerationModelDriver(), + model="stability.stable-diffusion-xl-v0", +) + +# Create an engine configured to use the driver. +engine = InpaintingImageGenerationEngine( + image_generation_driver=driver, +) + +# Create a tool configured to use the engine. +tool = InpaintingImageGenerationTool( + engine=engine, +) + +# Create an agent and provide the tool to it. +Agent(tools=[tool]).run( + "Generate an image of a castle built into the side of a mountain by inpainting the " + "image at tests/resources/mountain.png using the mask at tests/resources/mountain-mask.png." +) diff --git a/docs/griptape-tools/official-tools/src/openweather_tool_1.py b/docs/griptape-tools/official-tools/src/openweather_tool_1.py new file mode 100644 index 000000000..b592620fa --- /dev/null +++ b/docs/griptape-tools/official-tools/src/openweather_tool_1.py @@ -0,0 +1,14 @@ +import os + +from griptape.structures import Agent +from griptape.tools import OpenWeatherTool + +agent = Agent( + tools=[ + OpenWeatherTool( + api_key=os.environ["OPENWEATHER_API_KEY"], + ), + ] +) + +agent.run("What's the weather currently like in San Francisco?") diff --git a/docs/griptape-tools/official-tools/src/outpainting_image_generation_tool_1.py b/docs/griptape-tools/official-tools/src/outpainting_image_generation_tool_1.py new file mode 100644 index 000000000..79606a965 --- /dev/null +++ b/docs/griptape-tools/official-tools/src/outpainting_image_generation_tool_1.py @@ -0,0 +1,26 @@ +from griptape.drivers import AmazonBedrockImageGenerationDriver, BedrockStableDiffusionImageGenerationModelDriver +from griptape.engines import OutpaintingImageGenerationEngine +from griptape.structures import Agent +from griptape.tools import OutpaintingImageGenerationTool + +# Create a driver configured to use Stable Diffusion via Bedrock. +driver = AmazonBedrockImageGenerationDriver( + image_generation_model_driver=BedrockStableDiffusionImageGenerationModelDriver(), + model="stability.stable-diffusion-xl-v0", +) + +# Create an engine configured to use the driver. +engine = OutpaintingImageGenerationEngine( + image_generation_driver=driver, +) + +# Create a tool configured to use the engine. +tool = OutpaintingImageGenerationTool( + engine=engine, +) + +# Create an agent and provide the tool to it. +Agent(tools=[tool]).run( + "Generate an image of a mountain shrouded by clouds by outpainting the " + "image at tests/resources/mountain.png using the mask at tests/resources/mountain-mask.png." +) diff --git a/docs/griptape-tools/official-tools/prompt-image-generation-client.md b/docs/griptape-tools/official-tools/src/prompt_image_generation_tool_1.py similarity index 73% rename from docs/griptape-tools/official-tools/prompt-image-generation-client.md rename to docs/griptape-tools/official-tools/src/prompt_image_generation_tool_1.py index b2045bdd5..0173cc185 100644 --- a/docs/griptape-tools/official-tools/prompt-image-generation-client.md +++ b/docs/griptape-tools/official-tools/src/prompt_image_generation_tool_1.py @@ -1,14 +1,7 @@ -# PromptImageGenerationClient - -This tool allows LLMs to generate images from a text prompt. - -```python -from griptape.structures import Agent +from griptape.drivers import AmazonBedrockImageGenerationDriver, BedrockStableDiffusionImageGenerationModelDriver from griptape.engines import PromptImageGenerationEngine -from griptape.drivers import AmazonBedrockImageGenerationDriver, \ - BedrockStableDiffusionImageGenerationModelDriver -from griptape.tools import PromptImageGenerationClient - +from griptape.structures import Agent +from griptape.tools import PromptImageGenerationTool # Create a driver configured to use Stable Diffusion via Bedrock. driver = AmazonBedrockImageGenerationDriver( @@ -22,10 +15,9 @@ ) # Create a tool configured to use the engine. -tool = PromptImageGenerationClient( +tool = PromptImageGenerationTool( engine=engine, ) # Create an agent and provide the tool to it. Agent(tools=[tool]).run("Generate an image of a mountain on a summer day.") -``` diff --git a/docs/griptape-tools/official-tools/src/prompt_summary_tool_1.py b/docs/griptape-tools/official-tools/src/prompt_summary_tool_1.py new file mode 100644 index 000000000..3b4846439 --- /dev/null +++ b/docs/griptape-tools/official-tools/src/prompt_summary_tool_1.py @@ -0,0 +1,8 @@ +from griptape.structures import Agent +from griptape.tools import PromptSummaryTool, WebScraperTool + +agent = Agent(tools=[WebScraperTool(off_prompt=True), PromptSummaryTool()]) + +agent.run( + "How can I build Neovim from source for MacOS according to this https://github.com/neovim/neovim/blob/master/BUILD.md" +) diff --git a/docs/griptape-tools/official-tools/src/query_tool_1.py b/docs/griptape-tools/official-tools/src/query_tool_1.py new file mode 100644 index 000000000..0c612eee9 --- /dev/null +++ b/docs/griptape-tools/official-tools/src/query_tool_1.py @@ -0,0 +1,6 @@ +from griptape.structures import Agent +from griptape.tools import QueryTool, WebScraperTool + +agent = Agent(tools=[WebScraperTool(off_prompt=True), QueryTool()]) + +agent.run("Tell me about the architecture as described here: https://neovim.io/doc/user/vim_diff.html") diff --git a/docs/griptape-tools/official-tools/src/rag_tool_1.py b/docs/griptape-tools/official-tools/src/rag_tool_1.py new file mode 100644 index 000000000..7cefd065b --- /dev/null +++ b/docs/griptape-tools/official-tools/src/rag_tool_1.py @@ -0,0 +1,37 @@ +from griptape.artifacts import TextArtifact +from griptape.drivers import LocalVectorStoreDriver, OpenAiChatPromptDriver, OpenAiEmbeddingDriver +from griptape.engines.rag import RagEngine +from griptape.engines.rag.modules import PromptResponseRagModule, VectorStoreRetrievalRagModule +from griptape.engines.rag.stages import ResponseRagStage, RetrievalRagStage +from griptape.structures import Agent +from griptape.tools import RagTool + +vector_store_driver = LocalVectorStoreDriver(embedding_driver=OpenAiEmbeddingDriver()) + +artifact = TextArtifact( + "Griptape builds AI-powered applications that connect securely to your enterprise data and APIs." + "Griptape Agents provide incredible power and flexibility when working with large language models." +) + +vector_store_driver.upsert_text_artifact(artifact=artifact, namespace="griptape") + +rag_tool = RagTool( + description="Contains information about Griptape", + off_prompt=False, + rag_engine=RagEngine( + retrieval_stage=RetrievalRagStage( + retrieval_modules=[ + VectorStoreRetrievalRagModule( + vector_store_driver=vector_store_driver, query_params={"namespace": "griptape", "top_n": 20} + ) + ] + ), + response_stage=ResponseRagStage( + response_modules=[PromptResponseRagModule(prompt_driver=OpenAiChatPromptDriver(model="gpt-4o"))] + ), + ), +) + +agent = Agent(tools=[rag_tool]) + +agent.run("what is Griptape?") diff --git a/docs/griptape-tools/official-tools/rest-api-client.md b/docs/griptape-tools/official-tools/src/rest_api_tool_1.py similarity index 87% rename from docs/griptape-tools/official-tools/rest-api-client.md rename to docs/griptape-tools/official-tools/src/rest_api_tool_1.py index 07ddccf86..3f6b3b663 100644 --- a/docs/griptape-tools/official-tools/rest-api-client.md +++ b/docs/griptape-tools/official-tools/src/rest_api_tool_1.py @@ -1,22 +1,18 @@ -# RestApiClient - -This tool enables LLMs to call REST APIs. - -The [RestApiClient](../../reference/griptape/tools/rest_api_client/tool.md) tool uses the following parameters: - -### Example -The following example is built using [https://jsonplaceholder.typicode.com/guide/](https://jsonplaceholder.typicode.com/guide/). - -```python from json import dumps + +from griptape.configs import Defaults +from griptape.configs.drivers import DriversConfig from griptape.drivers import OpenAiChatPromptDriver from griptape.memory.structure import ConversationMemory from griptape.structures import Pipeline from griptape.tasks import ToolkitTask -from griptape.tools import RestApiClient -from griptape.config import StructureConfig +from griptape.tools import RestApiTool -posts_client = RestApiClient( +Defaults.drivers_config = DriversConfig( + prompt_driver=OpenAiChatPromptDriver(model="gpt-4o", temperature=0.1), +) + +posts_client = RestApiTool( base_url="https://jsonplaceholder.typicode.com", path="posts", description="Allows for creating, updating, deleting, patching, and getting posts.", @@ -117,12 +113,6 @@ pipeline = Pipeline( conversation_memory=ConversationMemory(), - config = StructureConfig( - prompt_driver=OpenAiChatPromptDriver( - model="gpt-4o", - temperature=0.1 - ), - ), ) pipeline.add_tasks( @@ -153,4 +143,3 @@ ) pipeline.run() -``` diff --git a/docs/griptape-tools/official-tools/src/sql_tool_1.py b/docs/griptape-tools/official-tools/src/sql_tool_1.py new file mode 100644 index 000000000..f7630891f --- /dev/null +++ b/docs/griptape-tools/official-tools/src/sql_tool_1.py @@ -0,0 +1,28 @@ +import os + +import boto3 + +from griptape.drivers import AmazonRedshiftSqlDriver +from griptape.loaders import SqlLoader +from griptape.structures import Agent +from griptape.tools import SqlTool + +session = boto3.Session() + +sql_loader = SqlLoader( + sql_driver=AmazonRedshiftSqlDriver( + database=os.environ["REDSHIFT_DATABASE"], + session=session, + cluster_identifier=os.environ["REDSHIFT_CLUSTER_IDENTIFIER"], + ) +) + +sql_tool = SqlTool( + sql_loader=sql_loader, + table_name="people", + table_description="contains information about tech industry professionals", + engine_name="redshift", +) + +agent = Agent(tools=[sql_tool]) +agent.run("SELECT * FROM people;") diff --git a/docs/griptape-tools/official-tools/src/structure_run_tool_1.py b/docs/griptape-tools/official-tools/src/structure_run_tool_1.py new file mode 100644 index 000000000..575092ce6 --- /dev/null +++ b/docs/griptape-tools/official-tools/src/structure_run_tool_1.py @@ -0,0 +1,24 @@ +import os + +from griptape.drivers import GriptapeCloudStructureRunDriver +from griptape.structures import Agent +from griptape.tools import StructureRunTool + +base_url = os.environ["GRIPTAPE_CLOUD_BASE_URL"] +api_key = os.environ["GRIPTAPE_CLOUD_API_KEY"] +structure_id = os.environ["GRIPTAPE_CLOUD_STRUCTURE_ID"] + +structure_run_tool = StructureRunTool( + description="RAG Expert Agent - Structure to invoke with natural language queries about the topic of Retrieval Augmented Generation", + driver=GriptapeCloudStructureRunDriver( + base_url=base_url, + api_key=api_key, + structure_id=structure_id, + ), +) + +# Set up an agent using the StructureRunTool tool +agent = Agent(tools=[structure_run_tool]) + +# Task: Ask the Griptape Cloud Hosted Structure about modular RAG +agent.run("what is modular RAG?") diff --git a/docs/griptape-tools/official-tools/src/text_to_speech_tool_1.py b/docs/griptape-tools/official-tools/src/text_to_speech_tool_1.py new file mode 100644 index 000000000..376113d63 --- /dev/null +++ b/docs/griptape-tools/official-tools/src/text_to_speech_tool_1.py @@ -0,0 +1,20 @@ +import os + +from griptape.drivers import ElevenLabsTextToSpeechDriver +from griptape.engines import TextToSpeechEngine +from griptape.structures import Agent +from griptape.tools.text_to_speech.tool import TextToSpeechTool + +driver = ElevenLabsTextToSpeechDriver( + api_key=os.environ["ELEVEN_LABS_API_KEY"], + model="eleven_multilingual_v2", + voice="Matilda", +) + +tool = TextToSpeechTool( + engine=TextToSpeechEngine( + text_to_speech_driver=driver, + ), +) + +Agent(tools=[tool]).run("Generate audio from this text: 'Hello, world!'") diff --git a/docs/griptape-tools/official-tools/src/variation_image_generation_tool_1.py b/docs/griptape-tools/official-tools/src/variation_image_generation_tool_1.py new file mode 100644 index 000000000..209d97a7b --- /dev/null +++ b/docs/griptape-tools/official-tools/src/variation_image_generation_tool_1.py @@ -0,0 +1,27 @@ +from griptape.drivers import AmazonBedrockImageGenerationDriver, BedrockStableDiffusionImageGenerationModelDriver +from griptape.engines import VariationImageGenerationEngine +from griptape.structures import Agent +from griptape.tools import VariationImageGenerationTool + +# Create a driver configured to use Stable Diffusion via Bedrock. +driver = AmazonBedrockImageGenerationDriver( + image_generation_model_driver=BedrockStableDiffusionImageGenerationModelDriver( + style_preset="pixel-art", + ), + model="stability.stable-diffusion-xl-v0", +) + +# Create an engine configured to use the driver. +engine = VariationImageGenerationEngine( + image_generation_driver=driver, +) + +# Create a tool configured to use the engine. +tool = VariationImageGenerationTool( + engine=engine, +) + +# Create an agent and provide the tool to it. +Agent(tools=[tool]).run( + "Generate a variation of the image located at tests/resources/mountain.png " "depicting a mountain on a winter day" +) diff --git a/docs/griptape-tools/official-tools/src/variation_image_generation_tool_2.py b/docs/griptape-tools/official-tools/src/variation_image_generation_tool_2.py new file mode 100644 index 000000000..036b75d48 --- /dev/null +++ b/docs/griptape-tools/official-tools/src/variation_image_generation_tool_2.py @@ -0,0 +1,42 @@ +from griptape.drivers import AmazonBedrockImageGenerationDriver, BedrockStableDiffusionImageGenerationModelDriver +from griptape.engines import PromptImageGenerationEngine, VariationImageGenerationEngine +from griptape.structures import Agent +from griptape.tools import PromptImageGenerationTool, VariationImageGenerationTool + +# Create a driver configured to use Stable Diffusion via Bedrock. +driver = AmazonBedrockImageGenerationDriver( + image_generation_model_driver=BedrockStableDiffusionImageGenerationModelDriver( + style_preset="pixel-art", + ), + model="stability.stable-diffusion-xl-v0", +) + +# Create an prompt image generation engine configured to use the driver. +prompt_engine = PromptImageGenerationEngine( + image_generation_driver=driver, +) + +# Create a prompt image generation client configured to use the engine. +prompt_tool = PromptImageGenerationTool( + engine=prompt_engine, +) + +# Create an variation image generation engine configured to use the driver. +variation_engine = VariationImageGenerationEngine( + image_generation_driver=driver, +) + +# Create a variation image generation client configured to use the engine. +variation_tool = VariationImageGenerationTool( + engine=variation_engine, +) + +# Create an agent and provide the tools to it. +agent = Agent(tools=[prompt_tool, variation_tool]) + +# Run the agent using a prompt motivating it to generate an image, then +# create a variation of the image present in task memory. +agent.run( + "Generate an image of a mountain on a summer day. Then, generate a " + "variation of this image depicting the same mountain scene on a winter day." +) diff --git a/docs/griptape-tools/official-tools/src/vector_store_tool_1.py b/docs/griptape-tools/official-tools/src/vector_store_tool_1.py new file mode 100644 index 000000000..266398d5e --- /dev/null +++ b/docs/griptape-tools/official-tools/src/vector_store_tool_1.py @@ -0,0 +1,25 @@ +from griptape.artifacts.error_artifact import ErrorArtifact +from griptape.drivers import LocalVectorStoreDriver, OpenAiEmbeddingDriver +from griptape.loaders import WebLoader +from griptape.structures import Agent +from griptape.tools import PromptSummaryTool, VectorStoreTool + +vector_store_driver = LocalVectorStoreDriver( + embedding_driver=OpenAiEmbeddingDriver(), +) + +artifacts = WebLoader().load("https://www.griptape.ai") +if isinstance(artifacts, ErrorArtifact): + raise Exception(artifacts.value) + +vector_store_driver.upsert_text_artifacts({"griptape": artifacts}) +vector_db = VectorStoreTool( + description="This DB has information about the Griptape Python framework", + vector_store_driver=vector_store_driver, + query_params={"namespace": "griptape"}, + off_prompt=True, +) + +agent = Agent(tools=[vector_db, PromptSummaryTool()]) + +agent.run("what is Griptape?") diff --git a/docs/griptape-tools/official-tools/src/web_scraper_1.py b/docs/griptape-tools/official-tools/src/web_scraper_1.py new file mode 100644 index 000000000..0e6f55011 --- /dev/null +++ b/docs/griptape-tools/official-tools/src/web_scraper_1.py @@ -0,0 +1,6 @@ +from griptape.structures import Agent +from griptape.tools import PromptSummaryTool, WebScraperTool + +agent = Agent(tools=[WebScraperTool(off_prompt=True), PromptSummaryTool(off_prompt=False)]) + +agent.run("Based on https://www.griptape.ai/, tell me what griptape is") diff --git a/docs/griptape-tools/official-tools/src/web_scraper_tool_1.py b/docs/griptape-tools/official-tools/src/web_scraper_tool_1.py new file mode 100644 index 000000000..0e6f55011 --- /dev/null +++ b/docs/griptape-tools/official-tools/src/web_scraper_tool_1.py @@ -0,0 +1,6 @@ +from griptape.structures import Agent +from griptape.tools import PromptSummaryTool, WebScraperTool + +agent = Agent(tools=[WebScraperTool(off_prompt=True), PromptSummaryTool(off_prompt=False)]) + +agent.run("Based on https://www.griptape.ai/, tell me what griptape is") diff --git a/docs/griptape-tools/official-tools/src/web_search_tool_1.py b/docs/griptape-tools/official-tools/src/web_search_tool_1.py new file mode 100644 index 000000000..3469ad7f9 --- /dev/null +++ b/docs/griptape-tools/official-tools/src/web_search_tool_1.py @@ -0,0 +1,22 @@ +import os + +from griptape.drivers import GoogleWebSearchDriver +from griptape.structures import Agent +from griptape.tools import WebSearchTool + +# Initialize the WebSearchTool tool with necessary parameters +web_search_tool = WebSearchTool( + web_search_driver=GoogleWebSearchDriver( + api_key=os.environ["GOOGLE_API_KEY"], + search_id=os.environ["GOOGLE_API_SEARCH_ID"], + results_count=5, + language="en", + country="us", + ), +) + +# Set up an agent using the WebSearchTool tool +agent = Agent(tools=[web_search_tool]) + +# Task: Search the web for a specific query +agent.run("Tell me how photosynthesis works") diff --git a/docs/griptape-tools/official-tools/src/web_search_tool_2.py b/docs/griptape-tools/official-tools/src/web_search_tool_2.py new file mode 100644 index 000000000..dd9c32655 --- /dev/null +++ b/docs/griptape-tools/official-tools/src/web_search_tool_2.py @@ -0,0 +1,28 @@ +import os + +import schema + +from griptape.drivers import GoogleWebSearchDriver +from griptape.structures import Agent +from griptape.tools import WebSearchTool + +agent = Agent( + tools=[ + WebSearchTool( + web_search_driver=GoogleWebSearchDriver( + api_key=os.environ["GOOGLE_API_KEY"], + search_id=os.environ["GOOGLE_API_SEARCH_ID"], + ), + extra_schema_properties={ + "search": { + schema.Literal( + "sort", + description="Date range to search within. Format: date:r:YYYYMMDD:YYYYMMDD", + ): str + } + }, + ) + ], +) + +agent.run("Search for articles about the history of the internet from 1990 to 2000") diff --git a/docs/griptape-tools/official-tools/structure-run-client.md b/docs/griptape-tools/official-tools/structure-run-tool.md similarity index 74% rename from docs/griptape-tools/official-tools/structure-run-client.md rename to docs/griptape-tools/official-tools/structure-run-tool.md index 0907fa06a..7b73e5b52 100644 --- a/docs/griptape-tools/official-tools/structure-run-client.md +++ b/docs/griptape-tools/official-tools/structure-run-tool.md @@ -1,43 +1,20 @@ -# StructureRunClient +# Structure Run Tool -The StructureRunClient Tool provides a way to run Structures via a Tool. +The [StructureRunTool](../../reference/griptape/tools/structure_run/tool.md) Tool provides a way to run Structures via a Tool. It requires you to provide a [Structure Run Driver](../../griptape-framework/drivers/structure-run-drivers.md) to run the Structure in the desired environment. ```python -import os - -from griptape.drivers import GriptapeCloudStructureRunDriver -from griptape.structures import Agent -from griptape.tools import StructureRunClient - -base_url = os.environ["GRIPTAPE_CLOUD_BASE_URL"] -api_key = os.environ["GRIPTAPE_CLOUD_API_KEY"] -structure_id = os.environ["GRIPTAPE_CLOUD_STRUCTURE_ID"] - -structure_run_tool = StructureRunClient( - description="RAG Expert Agent - Structure to invoke with natural language queries about the topic of Retrieval Augmented Generation", - driver=GriptapeCloudStructureRunDriver( - base_url=base_url, - api_key=api_key, - structure_id=structure_id, - ), -) - -# Set up an agent using the StructureRunClient tool -agent = Agent(tools=[structure_run_tool]) - -# Task: Ask the Griptape Cloud Hosted Structure about modular RAG -agent.run("what is modular RAG?") +--8<-- "docs/griptape-tools/official-tools/src/structure_run_tool_1.py" ``` ``` [05/02/24 13:50:03] INFO ToolkitTask 4e9458375bda4fbcadb77a94624ed64c Input: what is modular RAG? [05/02/24 13:50:10] INFO Subtask 5ef2d72028fc495aa7faf6f46825b004 - Thought: To answer this question, I need to run a search for the term "modular RAG". I will use the StructureRunClient action to execute a + Thought: To answer this question, I need to run a search for the term "modular RAG". I will use the StructureRunTool action to execute a search structure. Actions: [ { - "name": "StructureRunClient", + "name": "StructureRunTool", "path": "run_structure", "input": { "values": { diff --git a/docs/griptape-tools/official-tools/task-memory-client.md b/docs/griptape-tools/official-tools/task-memory-client.md deleted file mode 100644 index f91bee39a..000000000 --- a/docs/griptape-tools/official-tools/task-memory-client.md +++ /dev/null @@ -1,11 +0,0 @@ -# TaskMemoryClient - -This tool enables LLMs to query and summarize task outputs that are stored in short-term tool memory. This tool uniquely requires the user to set the `off_prompt` property explicitly for usability reasons (Griptape doesn't provide the default `True` value). - -```python -from griptape.structures import Agent -from griptape.tools import WebScraper, TaskMemoryClient - - -Agent(tools=[WebScraper(off_prompt=True), TaskMemoryClient(off_prompt=False)]) -``` diff --git a/docs/griptape-tools/official-tools/text-to-speech-client.md b/docs/griptape-tools/official-tools/text-to-speech-client.md deleted file mode 100644 index 622b5bf3a..000000000 --- a/docs/griptape-tools/official-tools/text-to-speech-client.md +++ /dev/null @@ -1,27 +0,0 @@ -# TextToSpeechClient - -This Tool enables LLMs to synthesize speech from text using [Text to Speech Engines](../../reference/griptape/engines/audio/text_to_speech_engine.md) and [Text to Speech Drivers](../../reference/griptape/drivers/text_to_speech/index.md). - -```python -import os - -from griptape.drivers import ElevenLabsTextToSpeechDriver -from griptape.engines import TextToSpeechEngine -from griptape.tools.text_to_speech_client.tool import TextToSpeechClient -from griptape.structures import Agent - - -driver = ElevenLabsTextToSpeechDriver( - api_key=os.getenv("ELEVEN_LABS_API_KEY"), - model="eleven_multilingual_v2", - voice="Matilda", -) - -tool = TextToSpeechClient( - engine=TextToSpeechEngine( - text_to_speech_driver=driver, - ), -) - -Agent(tools=[tool]).run("Generate audio from this text: 'Hello, world!'") -``` \ No newline at end of file diff --git a/docs/griptape-tools/official-tools/text-to-speech-tool.md b/docs/griptape-tools/official-tools/text-to-speech-tool.md new file mode 100644 index 000000000..ac3f54f8e --- /dev/null +++ b/docs/griptape-tools/official-tools/text-to-speech-tool.md @@ -0,0 +1,7 @@ +# Text To Speech Tool + +This Tool enables LLMs to synthesize speech from text using [Text to Speech Engines](../../reference/griptape/engines/audio/text_to_speech_engine.md) and [Text to Speech Drivers](../../reference/griptape/drivers/text_to_speech/index.md). + +```python +--8<-- "docs/griptape-tools/official-tools/src/text_to_speech_tool_1.py" +``` diff --git a/docs/griptape-tools/official-tools/variation-image-generation-client.md b/docs/griptape-tools/official-tools/variation-image-generation-client.md deleted file mode 100644 index a9f703b57..000000000 --- a/docs/griptape-tools/official-tools/variation-image-generation-client.md +++ /dev/null @@ -1,83 +0,0 @@ -# VariationImageGenerationEngine - -This Tool allows LLMs to generate variations of an input image from a text prompt. The input image can be provided either by its file path or by its [Task Memory](../../griptape-framework/structures/task-memory.md) reference. - -## Referencing an Image by File Path - -```python -from griptape.structures import Agent -from griptape.engines import VariationImageGenerationEngine -from griptape.drivers import AmazonBedrockImageGenerationDriver, \ - BedrockStableDiffusionImageGenerationModelDriver -from griptape.tools import VariationImageGenerationClient - - -# Create a driver configured to use Stable Diffusion via Bedrock. -driver = AmazonBedrockImageGenerationDriver( - image_generation_model_driver=BedrockStableDiffusionImageGenerationModelDriver( - style_preset="pixel-art", - ), - model="stability.stable-diffusion-xl-v0", -) - -# Create an engine configured to use the driver. -engine = VariationImageGenerationEngine( - image_generation_driver=driver, -) - -# Create a tool configured to use the engine. -tool = VariationImageGenerationClient( - engine=engine, -) - -# Create an agent and provide the tool to it. -Agent(tools=[tool]).run("Generate a variation of the image located at tests/resources/mountain.png " - "depicting a mountain on a winter day") -``` - -## Referencing an Image in Task Memory - -```python -from griptape.structures import Agent -from griptape.engines import VariationImageGenerationEngine, PromptImageGenerationEngine -from griptape.drivers import AmazonBedrockImageGenerationDriver, \ - BedrockStableDiffusionImageGenerationModelDriver -from griptape.tools import VariationImageGenerationClient, PromptImageGenerationClient - - -# Create a driver configured to use Stable Diffusion via Bedrock. -driver = AmazonBedrockImageGenerationDriver( - image_generation_model_driver=BedrockStableDiffusionImageGenerationModelDriver( - style_preset="pixel-art", - ), - model="stability.stable-diffusion-xl-v0", -) - -# Create an prompt image generation engine configured to use the driver. -prompt_engine = PromptImageGenerationEngine( - image_generation_driver=driver, -) - -# Create a prompt image generation client configured to use the engine. -prompt_tool = PromptImageGenerationClient( - engine=prompt_engine, -) - -# Create an variation image generation engine configured to use the driver. -variation_engine = VariationImageGenerationEngine( - image_generation_driver=driver, -) - -# Create a variation image generation client configured to use the engine. -variation_tool = VariationImageGenerationClient( - engine=variation_engine, -) - -# Create an agent and provide the tools to it. -agent = Agent(tools=[prompt_tool, variation_tool]) - -# Run the agent using a prompt motivating it to generate an image, then -# create a variation of the image present in task memory. -agent.run("Generate an image of a mountain on a summer day. Then, generate a " - "variation of this image depicting the same mountain scene on a winter day.") -``` diff --git a/docs/griptape-tools/official-tools/variation-image-generation-tool.md b/docs/griptape-tools/official-tools/variation-image-generation-tool.md new file mode 100644 index 000000000..bcc8c3f61 --- /dev/null +++ b/docs/griptape-tools/official-tools/variation-image-generation-tool.md @@ -0,0 +1,15 @@ +# Variation Image Generation Engine Tool + +This Tool allows LLMs to generate variations of an input image from a text prompt. The input image can be provided either by its file path or by its [Task Memory](../../griptape-framework/structures/task-memory.md) reference. + +## Referencing an Image by File Path + +```python +--8<-- "docs/griptape-tools/official-tools/src/variation_image_generation_tool_1.py" +``` + +## Referencing an Image in Task Memory + +```python +--8<-- "docs/griptape-tools/official-tools/src/variation_image_generation_client_tool_2.py" +``` diff --git a/docs/griptape-tools/official-tools/vector-store-client.md b/docs/griptape-tools/official-tools/vector-store-client.md deleted file mode 100644 index f3cab2065..000000000 --- a/docs/griptape-tools/official-tools/vector-store-client.md +++ /dev/null @@ -1,35 +0,0 @@ -The [VectorStoreClient](../../reference/griptape/tools/vector_store_client/tool.md) enables LLMs to query vector stores. - -Here is an example of how it can be used with a local vector store driver: - -```python -from griptape.structures import Agent -from griptape.tools import VectorStoreClient, TaskMemoryClient -from griptape.loaders import WebLoader -from griptape.drivers import LocalVectorStoreDriver, OpenAiEmbeddingDriver - -vector_store_driver = LocalVectorStoreDriver( - embedding_driver=OpenAiEmbeddingDriver(), -) - -vector_store_driver.upsert_text_artifacts( - { - "griptape": WebLoader().load("https://www.griptape.ai") - } -) - -vector_db = VectorStoreClient( - description="This DB has information about the Griptape Python framework", - vector_store_driver=vector_store_driver, - query_params={"namespace": "griptape"}, - off_prompt=True -) - -agent = Agent( - tools=[vector_db, TaskMemoryClient(off_prompt=False)] -) - -agent.run( - "what is Griptape?" -) -``` diff --git a/docs/griptape-tools/official-tools/vector-store-tool.md b/docs/griptape-tools/official-tools/vector-store-tool.md new file mode 100644 index 000000000..7317c25db --- /dev/null +++ b/docs/griptape-tools/official-tools/vector-store-tool.md @@ -0,0 +1,9 @@ +# Vector Store Tool + +The [VectorStoreTool](../../reference/griptape/tools/vector_store/tool.md) enables LLMs to query vector stores. + +Here is an example of how it can be used with a local vector store driver: + +```python +--8<-- "docs/griptape-tools/official-tools/src/vector_store_tool_1.py" +``` diff --git a/docs/griptape-tools/official-tools/web-scraper-tool.md b/docs/griptape-tools/official-tools/web-scraper-tool.md new file mode 100644 index 000000000..26b83f6e3 --- /dev/null +++ b/docs/griptape-tools/official-tools/web-scraper-tool.md @@ -0,0 +1,97 @@ +# Web Scraper Tool + +This tool enables LLMs to scrape web pages for full text, summaries, authors, titles, and keywords. It can also execute search queries to answer specific questions about the page. This tool uses OpenAI APIs for some of its activities, so in order to use it provide a valid API key in `openai_api_key`. + +```python +--8<-- "docs/griptape-tools/official-tools/src/web_scraper_tool_1.py" +``` +``` +[08/12/24 15:32:08] INFO ToolkitTask b14a4305365f4b17a4dcf235f84397e2 + Input: Based on https://www.griptape.ai/, tell me what griptape is +[08/12/24 15:32:10] INFO Subtask bf396977ea634eb28f55388d3f828f5d + Actions: [ + { + "tag": "call_ExEzJDZuBfnsa9pZMSr6mtsS", + "name": "WebScraperTool", + "path": "get_content", + "input": { + "values": { + "url": "https://www.griptape.ai/" + } + } + } + ] + INFO Subtask bf396977ea634eb28f55388d3f828f5d + Response: Output of "WebScraperTool.get_content" was stored in memory with memory_name "TaskMemory" and artifact_namespace + "a55c85bf1aa944d5b69bbe8d61382179" +[08/12/24 15:32:11] INFO Subtask 31852039bd274b71bf46feaf22b68112 + Actions: [ + { + "tag": "call_6Dovx2GKE2GLjaYIuwXvBxVn", + "name": "PromptSummaryTool", + "path": "summarize", + "input": { + "values": { + "summary": { + "memory_name": "TaskMemory", + "artifact_namespace": "a55c85bf1aa944d5b69bbe8d61382179" + } + } + } + } + ] +[08/12/24 15:32:15] INFO Subtask 31852039bd274b71bf46feaf22b68112 + Response: Griptape offers a comprehensive solution for building, deploying, and scaling AI applications in the cloud. It provides developers + with a framework and cloud services to create retrieval-driven AI-powered applications without needing extensive knowledge in AI or prompt + engineering. + + **Griptape Framework:** + - Enables developers to build AI applications using Python. + - Offers better security, performance, and cost-efficiency with Off-Prompt™ technology. + - Facilitates the creation of Gen AI Agents, Systems of Agents, Pipelines, Workflows, and RAG implementations. + + **Griptape Cloud:** + - Simplifies deployment and execution of ETL, RAG, and other structures. + - Provides API abstractions and eliminates the need for infrastructure management. + - Supports seamless scaling to accommodate growing workloads. + + **Solutions & Applications:** + - Custom project development. + - Turnkey SaaS offerings for non-tech businesses. + - Ready-made apps and options to offer apps to customers. + + **Key Features:** + - Automated Data Prep (ETL): Connect, extract, transform, and load data into a vector database index. + - Retrieval as a Service (RAG): Generate answers, summaries, and details from your data using customizable retrieval patterns. + - Structure Runtime (RUN): Build and integrate AI agents, pipelines, and workflows into client applications. +[08/12/24 15:32:21] INFO ToolkitTask b14a4305365f4b17a4dcf235f84397e2 + Output: Griptape is a comprehensive solution designed to facilitate the building, deploying, and scaling of AI applications in the cloud. It + provides developers with a framework and cloud services that simplify the creation of retrieval-driven AI-powered applications, even for those + without extensive AI or prompt engineering expertise. + + ### Key Components of Griptape: + + 1. **Griptape Framework:** + - **Development:** Allows developers to build AI applications using Python. + - **Technology:** Utilizes Off-Prompt™ technology for enhanced security, performance, and cost-efficiency. + - **Capabilities:** Supports the creation of Gen AI Agents, Systems of Agents, Pipelines, Workflows, and Retrieval-Augmented Generation (RAG) + implementations. + + 2. **Griptape Cloud:** + - **Deployment:** Simplifies the deployment and execution of ETL (Extract, Transform, Load), RAG, and other structures. + - **API Abstractions:** Provides API abstractions to eliminate the need for infrastructure management. + - **Scalability:** Supports seamless scaling to accommodate growing workloads. + + ### Solutions & Applications: + - **Custom Projects:** Development of tailored AI solutions. + - **Turnkey SaaS:** Ready-to-use SaaS offerings for non-technical businesses. + - **Ready-made Apps:** Pre-built applications and options to offer apps to customers. + + ### Key Features: + - **Automated Data Prep (ETL):** Connects, extracts, transforms, and loads data into a vector database index. + - **Retrieval as a Service (RAG):** Generates answers, summaries, and details from data using customizable retrieval patterns. + - **Structure Runtime (RUN):** Facilitates the building and integration of AI agents, pipelines, and workflows into client applications. + + In summary, Griptape provides a robust platform for developing and managing AI applications, making it accessible for developers and businesses + to leverage AI technology effectively. +``` diff --git a/docs/griptape-tools/official-tools/web-scraper.md b/docs/griptape-tools/official-tools/web-scraper.md deleted file mode 100644 index dcd767b35..000000000 --- a/docs/griptape-tools/official-tools/web-scraper.md +++ /dev/null @@ -1,76 +0,0 @@ -# WebScraper - -This tool enables LLMs to scrape web pages for full text, summaries, authors, titles, and keywords. It can also execute search queries to answer specific questions about the page. This tool uses OpenAI APIs for some of its activities, so in order to use it provide a valid API key in `openai_api_key`. - -```python -from griptape.structures import Agent -from griptape.tools import WebScraper, TaskMemoryClient - -agent = Agent( - tools=[WebScraper(off_prompt=True), TaskMemoryClient(off_prompt=False)] -) - -agent.run( - "Based on https://www.griptape.ai/, tell me what griptape is" -) -``` -``` -[09/11/23 15:27:39] INFO Task dd9ad12c5c1e4280a6e20d7c116303ed - Input: Based on https://www.griptape.ai/, tell me - what griptape is -[09/11/23 15:27:47] INFO Subtask 4b34be74b06a47ba9cb3a4b62aa35907 - Thought: I need to find out what griptape is based - on the information provided on the website - https://www.griptape.ai/. I can use the WebScraper - tool with the get_content activity to load the - content of the website. - - Action: {"name": "WebScraper", - "path": "get_content", "input": {"values": - {"url": "https://www.griptape.ai/"}}} -[09/11/23 15:27:48] INFO Subtask 4b34be74b06a47ba9cb3a4b62aa35907 - Response: Output of "WebScraper.get_content" was - stored in memory with memory_name "TaskMemory" - and artifact_namespace - "02da5930b8d74f7ca30aecc3760a3318" -[09/11/23 15:27:59] INFO Subtask 5b255e3e98aa401295f77532bc779390 - Thought: The content of the website has been stored - in memory. I can use the TaskMemory tool with - the summarize activity to get a summary of the - content. - Action: {"name": "TaskMemoryClient", "path": - "summarize", "input": {"values": {"memory_name": - "TaskMemory", "artifact_namespace": - "02da5930b8d74f7ca30aecc3760a3318"}}} -[09/11/23 15:28:03] INFO Subtask 5b255e3e98aa401295f77532bc779390 - Response: Griptape is an open source framework - that allows developers to build and deploy AI - applications using large language models (LLMs). It - provides the ability to create conversational and - event-driven apps that can access and manipulate - data securely. Griptape enforces structures like - sequential pipelines and DAG-based workflows for - predictability, while also allowing for creativity - by safely prompting LLMs with external APIs and - data stores. The framework can be used to create AI - systems that operate across both dimensions. - Griptape Cloud is a managed platform for deploying - and managing AI apps, and it offers features like - scheduling and connecting to data stores and APIs. -[09/11/23 15:28:12] INFO Task dd9ad12c5c1e4280a6e20d7c116303ed - Output: Griptape is an open source framework that - enables developers to build and deploy AI - applications using large language models (LLMs). It - allows the creation of conversational and - event-driven apps that can securely access and - manipulate data. Griptape enforces structures like - sequential pipelines and DAG-based workflows for - predictability, while also allowing for creativity - by safely prompting LLMs with external APIs and - data stores. The framework can be used to create AI - systems that operate across both dimensions. - Additionally, Griptape Cloud is a managed platform - for deploying and managing AI apps, offering - features like scheduling and connecting to data - stores and APIs. -``` diff --git a/docs/griptape-tools/official-tools/web-search.md b/docs/griptape-tools/official-tools/web-search-tool.md similarity index 91% rename from docs/griptape-tools/official-tools/web-search.md rename to docs/griptape-tools/official-tools/web-search-tool.md index b30c76038..3f31fd4fd 100644 --- a/docs/griptape-tools/official-tools/web-search.md +++ b/docs/griptape-tools/official-tools/web-search-tool.md @@ -1,31 +1,9 @@ -# WebSearch +# Web Search Tool This tool enables LLMs to search the web. ```python -import os -from griptape.tools import WebSearch -from griptape.structures import Agent -from griptape.drivers import GoogleWebSearchDriver - -# Initialize the WebSearch tool with necessary parameters -web_search_tool = WebSearch( - web_search_driver=GoogleWebSearchDriver( - api_key=os.environ["GOOGLE_API_KEY"], - search_id=os.environ["GOOGLE_API_SEARCH_ID"], - results_count=5, - language="en", - country="us", - ), -) - -# Set up an agent using the WebSearch tool -agent = Agent( - tools=[web_search_tool] -) - -# Task: Search the web for a specific query -agent.run("Tell me how photosynthesis works") +--8<-- "docs/griptape-tools/official-tools/src/web_search_tool_1.py" ``` ``` [09/08/23 15:37:25] INFO Task 2cf557f7f7cd4a20a7fa2f0c46af2f71 @@ -110,3 +88,10 @@ agent.run("Tell me how photosynthesis works") in the atmosphere and forms the basis of the food chain. ``` + +Extra schema properties can be added to the Tool to allow for more customization if the Driver supports it. +In this example, we add a `sort` property to the `search` Activity which will be added as a [Google custom search query parameter](https://developers.google.com/custom-search/v1/reference/rest/v1/cse/list). + +```python +--8<-- "docs/griptape-tools/official-tools/src/web_search_tool_2.py" +``` diff --git a/docs/griptape-tools/src/index_1.py b/docs/griptape-tools/src/index_1.py new file mode 100644 index 000000000..7929574d4 --- /dev/null +++ b/docs/griptape-tools/src/index_1.py @@ -0,0 +1,23 @@ +import random + +from schema import Literal, Optional, Schema + +from griptape.artifacts import TextArtifact +from griptape.tools import BaseTool +from griptape.utils.decorators import activity + + +class RandomNumberGenerator(BaseTool): + @activity( + config={ + "description": "Can be used to generate random numbers", + "schema": Schema( + {Optional(Literal("decimals", description="Number of decimals to round the random number to")): int} + ), + } + ) + def generate(self, params: dict) -> TextArtifact: + return TextArtifact(str(round(random.random(), params["values"].get("decimals")))) + + +RandomNumberGenerator() diff --git a/docs/plugins/swagger_ui_plugin.py b/docs/plugins/swagger_ui_plugin.py index 499d74cf5..2f2ca2c4e 100644 --- a/docs/plugins/swagger_ui_plugin.py +++ b/docs/plugins/swagger_ui_plugin.py @@ -20,8 +20,7 @@ def generate_page_contents(page: Any) -> str: env.filters["markdown"] = lambda text: Markup(md.convert(text)) template = env.get_template(tmpl_url) - tmpl_out = template.render(spec_url=spec_url) - return tmpl_out + return template.render(spec_url=spec_url) def on_config(config: Any) -> None: @@ -32,5 +31,5 @@ def on_page_read_source(page: Any, config: Any) -> Any: index_path = os.path.join(config["docs_dir"], config_scheme["outfile"]) page_path = os.path.join(config["docs_dir"], page.file.src_path) if index_path == page_path: - contents = generate_page_contents(page) - return contents + return generate_page_contents(page) + return None diff --git a/griptape/artifacts/__init__.py b/griptape/artifacts/__init__.py index e9fc6daba..f39bfea8d 100644 --- a/griptape/artifacts/__init__.py +++ b/griptape/artifacts/__init__.py @@ -2,6 +2,7 @@ from .error_artifact import ErrorArtifact from .info_artifact import InfoArtifact from .text_artifact import TextArtifact +from .json_artifact import JsonArtifact from .blob_artifact import BlobArtifact from .boolean_artifact import BooleanArtifact from .csv_row_artifact import CsvRowArtifact @@ -18,6 +19,7 @@ "ErrorArtifact", "InfoArtifact", "TextArtifact", + "JsonArtifact", "BlobArtifact", "BooleanArtifact", "CsvRowArtifact", diff --git a/griptape/artifacts/json_artifact.py b/griptape/artifacts/json_artifact.py new file mode 100644 index 000000000..b292879a9 --- /dev/null +++ b/griptape/artifacts/json_artifact.py @@ -0,0 +1,21 @@ +from __future__ import annotations + +import json +from typing import Union + +from attrs import define, field + +from griptape.artifacts import BaseArtifact + +Json = Union[dict[str, "Json"], list["Json"], str, int, float, bool, None] + + +@define +class JsonArtifact(BaseArtifact): + value: Json = field(converter=lambda v: json.loads(json.dumps(v)), metadata={"serializable": True}) + + def to_text(self) -> str: + return json.dumps(self.value) + + def __add__(self, other: BaseArtifact) -> JsonArtifact: + raise NotImplementedError diff --git a/griptape/common/prompt_stack/prompt_stack.py b/griptape/common/prompt_stack/prompt_stack.py index 3186dac89..c9f71aa20 100644 --- a/griptape/common/prompt_stack/prompt_stack.py +++ b/griptape/common/prompt_stack/prompt_stack.py @@ -4,7 +4,15 @@ from attrs import define, field -from griptape.artifacts import ActionArtifact, BaseArtifact, GenericArtifact, ImageArtifact, ListArtifact, TextArtifact +from griptape.artifacts import ( + ActionArtifact, + BaseArtifact, + ErrorArtifact, + GenericArtifact, + ImageArtifact, + ListArtifact, + TextArtifact, +) from griptape.common import ( ActionCallMessageContent, ActionResultMessageContent, @@ -62,6 +70,8 @@ def __to_message_content(self, artifact: str | BaseArtifact) -> list[BaseMessage return [ImageMessageContent(artifact)] elif isinstance(artifact, GenericArtifact): return [GenericMessageContent(artifact)] + elif isinstance(artifact, ErrorArtifact): + return [TextMessageContent(TextArtifact(artifact.to_text()))] elif isinstance(artifact, ActionArtifact): action = artifact.value output = action.output @@ -71,10 +81,6 @@ def __to_message_content(self, artifact: str | BaseArtifact) -> list[BaseMessage return [ActionResultMessageContent(output, action=action)] elif isinstance(artifact, ListArtifact): processed_contents = [self.__to_message_content(artifact) for artifact in artifact.value] - flattened_content = [ - sub_content for processed_content in processed_contents for sub_content in processed_content - ] - - return flattened_content + return [sub_content for processed_content in processed_contents for sub_content in processed_content] else: raise ValueError(f"Unsupported artifact type: {type(artifact)}") diff --git a/griptape/config/__init__.py b/griptape/config/__init__.py deleted file mode 100644 index 541eb0db0..000000000 --- a/griptape/config/__init__.py +++ /dev/null @@ -1,24 +0,0 @@ -from .base_config import BaseConfig - -from .base_structure_config import BaseStructureConfig - -from .structure_config import StructureConfig -from .openai_structure_config import OpenAiStructureConfig -from .azure_openai_structure_config import AzureOpenAiStructureConfig -from .amazon_bedrock_structure_config import AmazonBedrockStructureConfig -from .anthropic_structure_config import AnthropicStructureConfig -from .google_structure_config import GoogleStructureConfig -from .cohere_structure_config import CohereStructureConfig - - -__all__ = [ - "BaseConfig", - "BaseStructureConfig", - "StructureConfig", - "OpenAiStructureConfig", - "AzureOpenAiStructureConfig", - "AmazonBedrockStructureConfig", - "AnthropicStructureConfig", - "GoogleStructureConfig", - "CohereStructureConfig", -] diff --git a/griptape/config/amazon_bedrock_structure_config.py b/griptape/config/amazon_bedrock_structure_config.py deleted file mode 100644 index 3ad7f8f48..000000000 --- a/griptape/config/amazon_bedrock_structure_config.py +++ /dev/null @@ -1,82 +0,0 @@ -from __future__ import annotations - -from typing import TYPE_CHECKING - -from attrs import Factory, define, field - -from griptape.config import StructureConfig -from griptape.drivers import ( - AmazonBedrockImageGenerationDriver, - AmazonBedrockImageQueryDriver, - AmazonBedrockPromptDriver, - AmazonBedrockTitanEmbeddingDriver, - BaseEmbeddingDriver, - BaseImageGenerationDriver, - BasePromptDriver, - BaseVectorStoreDriver, - BedrockClaudeImageQueryModelDriver, - BedrockTitanImageGenerationModelDriver, - LocalVectorStoreDriver, -) -from griptape.utils import import_optional_dependency - -if TYPE_CHECKING: - import boto3 - - -@define -class AmazonBedrockStructureConfig(StructureConfig): - session: boto3.Session = field( - default=Factory(lambda: import_optional_dependency("boto3").Session()), - kw_only=True, - metadata={"serializable": False}, - ) - - prompt_driver: BasePromptDriver = field( - default=Factory( - lambda self: AmazonBedrockPromptDriver( - session=self.session, - model="anthropic.claude-3-5-sonnet-20240620-v1:0", - ), - takes_self=True, - ), - kw_only=True, - metadata={"serializable": True}, - ) - embedding_driver: BaseEmbeddingDriver = field( - default=Factory( - lambda self: AmazonBedrockTitanEmbeddingDriver(session=self.session, model="amazon.titan-embed-text-v1"), - takes_self=True, - ), - kw_only=True, - metadata={"serializable": True}, - ) - image_generation_driver: BaseImageGenerationDriver = field( - default=Factory( - lambda self: AmazonBedrockImageGenerationDriver( - session=self.session, - model="amazon.titan-image-generator-v1", - image_generation_model_driver=BedrockTitanImageGenerationModelDriver(), - ), - takes_self=True, - ), - kw_only=True, - metadata={"serializable": True}, - ) - image_query_driver: BaseImageGenerationDriver = field( - default=Factory( - lambda self: AmazonBedrockImageQueryDriver( - session=self.session, - model="anthropic.claude-3-5-sonnet-20240620-v1:0", - image_query_model_driver=BedrockClaudeImageQueryModelDriver(), - ), - takes_self=True, - ), - kw_only=True, - metadata={"serializable": True}, - ) - vector_store_driver: BaseVectorStoreDriver = field( - default=Factory(lambda self: LocalVectorStoreDriver(embedding_driver=self.embedding_driver), takes_self=True), - kw_only=True, - metadata={"serializable": True}, - ) diff --git a/griptape/config/anthropic_structure_config.py b/griptape/config/anthropic_structure_config.py deleted file mode 100644 index 1bb5bf49b..000000000 --- a/griptape/config/anthropic_structure_config.py +++ /dev/null @@ -1,39 +0,0 @@ -from attrs import Factory, define, field - -from griptape.config import StructureConfig -from griptape.drivers import ( - AnthropicImageQueryDriver, - AnthropicPromptDriver, - BaseEmbeddingDriver, - BaseImageQueryDriver, - BasePromptDriver, - BaseVectorStoreDriver, - LocalVectorStoreDriver, - VoyageAiEmbeddingDriver, -) - - -@define -class AnthropicStructureConfig(StructureConfig): - prompt_driver: BasePromptDriver = field( - default=Factory(lambda: AnthropicPromptDriver(model="claude-3-5-sonnet-20240620")), - metadata={"serializable": True}, - kw_only=True, - ) - embedding_driver: BaseEmbeddingDriver = field( - default=Factory(lambda: VoyageAiEmbeddingDriver(model="voyage-large-2")), - metadata={"serializable": True}, - kw_only=True, - ) - vector_store_driver: BaseVectorStoreDriver = field( - default=Factory( - lambda: LocalVectorStoreDriver(embedding_driver=VoyageAiEmbeddingDriver(model="voyage-large-2")), - ), - kw_only=True, - metadata={"serializable": True}, - ) - image_query_driver: BaseImageQueryDriver = field( - default=Factory(lambda: AnthropicImageQueryDriver(model="claude-3-5-sonnet-20240620")), - kw_only=True, - metadata={"serializable": True}, - ) diff --git a/griptape/config/azure_openai_structure_config.py b/griptape/config/azure_openai_structure_config.py deleted file mode 100644 index ce0303e34..000000000 --- a/griptape/config/azure_openai_structure_config.py +++ /dev/null @@ -1,107 +0,0 @@ -from __future__ import annotations - -from typing import Callable, Optional - -from attrs import Factory, define, field - -from griptape.config import StructureConfig -from griptape.drivers import ( - AzureOpenAiChatPromptDriver, - AzureOpenAiEmbeddingDriver, - AzureOpenAiImageGenerationDriver, - AzureOpenAiImageQueryDriver, - BaseEmbeddingDriver, - BaseImageGenerationDriver, - BaseImageQueryDriver, - BasePromptDriver, - BaseVectorStoreDriver, - LocalVectorStoreDriver, -) - - -@define -class AzureOpenAiStructureConfig(StructureConfig): - """Azure OpenAI Structure Configuration. - - Attributes: - azure_endpoint: The endpoint for the Azure OpenAI instance. - azure_ad_token: An optional Azure Active Directory token. - azure_ad_token_provider: An optional Azure Active Directory token provider. - api_key: An optional Azure API key. - prompt_driver: An Azure OpenAI Chat Prompt Driver. - image_generation_driver: An Azure OpenAI Image Generation Driver. - image_query_driver: An Azure OpenAI Vision Image Query Driver. - embedding_driver: An Azure OpenAI Embedding Driver. - vector_store_driver: A Local Vector Store Driver. - """ - - azure_endpoint: str = field(kw_only=True, metadata={"serializable": True}) - azure_ad_token: Optional[str] = field(kw_only=True, default=None, metadata={"serializable": False}) - azure_ad_token_provider: Optional[Callable[[], str]] = field( - kw_only=True, - default=None, - metadata={"serializable": False}, - ) - api_key: Optional[str] = field(kw_only=True, default=None, metadata={"serializable": False}) - prompt_driver: BasePromptDriver = field( - default=Factory( - lambda self: AzureOpenAiChatPromptDriver( - model="gpt-4o", - azure_endpoint=self.azure_endpoint, - api_key=self.api_key, - azure_ad_token=self.azure_ad_token, - azure_ad_token_provider=self.azure_ad_token_provider, - ), - takes_self=True, - ), - metadata={"serializable": True}, - kw_only=True, - ) - image_generation_driver: BaseImageGenerationDriver = field( - default=Factory( - lambda self: AzureOpenAiImageGenerationDriver( - model="dall-e-2", - azure_endpoint=self.azure_endpoint, - api_key=self.api_key, - azure_ad_token=self.azure_ad_token, - azure_ad_token_provider=self.azure_ad_token_provider, - image_size="512x512", - ), - takes_self=True, - ), - metadata={"serializable": True}, - kw_only=True, - ) - image_query_driver: BaseImageQueryDriver = field( - default=Factory( - lambda self: AzureOpenAiImageQueryDriver( - model="gpt-4o", - azure_endpoint=self.azure_endpoint, - api_key=self.api_key, - azure_ad_token=self.azure_ad_token, - azure_ad_token_provider=self.azure_ad_token_provider, - ), - takes_self=True, - ), - metadata={"serializable": True}, - kw_only=True, - ) - embedding_driver: BaseEmbeddingDriver = field( - default=Factory( - lambda self: AzureOpenAiEmbeddingDriver( - model="text-embedding-3-small", - azure_endpoint=self.azure_endpoint, - api_key=self.api_key, - azure_ad_token=self.azure_ad_token, - azure_ad_token_provider=self.azure_ad_token_provider, - ), - takes_self=True, - ), - metadata={"serializable": True}, - kw_only=True, - ) - vector_store_driver: BaseVectorStoreDriver = field( - default=Factory(lambda self: LocalVectorStoreDriver(embedding_driver=self.embedding_driver), takes_self=True), - metadata={"serializable": True}, - kw_only=True, - ) diff --git a/griptape/config/base_structure_config.py b/griptape/config/base_structure_config.py deleted file mode 100644 index 31949cd2f..000000000 --- a/griptape/config/base_structure_config.py +++ /dev/null @@ -1,83 +0,0 @@ -from __future__ import annotations - -from abc import ABC -from typing import TYPE_CHECKING, Optional - -from attrs import define, field - -from griptape.config import BaseConfig -from griptape.events import EventListener -from griptape.mixins.event_publisher_mixin import EventPublisherMixin -from griptape.utils import dict_merge - -if TYPE_CHECKING: - from griptape.drivers import ( - BaseAudioTranscriptionDriver, - BaseConversationMemoryDriver, - BaseEmbeddingDriver, - BaseImageGenerationDriver, - BaseImageQueryDriver, - BasePromptDriver, - BaseTextToSpeechDriver, - BaseVectorStoreDriver, - ) - from griptape.structures import Structure - - -@define -class BaseStructureConfig(BaseConfig, ABC): - prompt_driver: BasePromptDriver = field(kw_only=True, metadata={"serializable": True}) - image_generation_driver: BaseImageGenerationDriver = field(kw_only=True, metadata={"serializable": True}) - image_query_driver: BaseImageQueryDriver = field(kw_only=True, metadata={"serializable": True}) - embedding_driver: BaseEmbeddingDriver = field(kw_only=True, metadata={"serializable": True}) - vector_store_driver: BaseVectorStoreDriver = field(kw_only=True, metadata={"serializable": True}) - conversation_memory_driver: Optional[BaseConversationMemoryDriver] = field( - default=None, - kw_only=True, - metadata={"serializable": True}, - ) - text_to_speech_driver: BaseTextToSpeechDriver = field(kw_only=True, metadata={"serializable": True}) - audio_transcription_driver: BaseAudioTranscriptionDriver = field(kw_only=True, metadata={"serializable": True}) - - _structure: Structure = field(default=None, kw_only=True, alias="structure") - _event_listener: Optional[EventListener] = field(default=None, kw_only=True, alias="event_listener") - - @property - def drivers(self) -> list: - return [ - self.prompt_driver, - self.image_generation_driver, - self.image_query_driver, - self.embedding_driver, - self.vector_store_driver, - self.conversation_memory_driver, - self.text_to_speech_driver, - self.audio_transcription_driver, - ] - - @property - def structure(self) -> Optional[Structure]: - return self._structure - - @structure.setter - def structure(self, structure: Structure) -> None: - if structure != self.structure: - event_publisher_drivers = [ - driver for driver in self.drivers if driver is not None and isinstance(driver, EventPublisherMixin) - ] - - for driver in event_publisher_drivers: - if self._event_listener is not None: - driver.remove_event_listener(self._event_listener) - - self._event_listener = EventListener(structure.publish_event) - for driver in event_publisher_drivers: - driver.add_event_listener(self._event_listener) - - self._structure = structure - - def merge_config(self, config: dict) -> BaseStructureConfig: - base_config = self.to_dict() - merged_config = dict_merge(base_config, config) - - return BaseStructureConfig.from_dict(merged_config) diff --git a/griptape/config/cohere_structure_config.py b/griptape/config/cohere_structure_config.py deleted file mode 100644 index 2e896b9b0..000000000 --- a/griptape/config/cohere_structure_config.py +++ /dev/null @@ -1,39 +0,0 @@ -from attrs import Factory, define, field - -from griptape.config import StructureConfig -from griptape.drivers import ( - BaseEmbeddingDriver, - BasePromptDriver, - BaseVectorStoreDriver, - CohereEmbeddingDriver, - CoherePromptDriver, - LocalVectorStoreDriver, -) - - -@define -class CohereStructureConfig(StructureConfig): - api_key: str = field(metadata={"serializable": False}, kw_only=True) - - prompt_driver: BasePromptDriver = field( - default=Factory(lambda self: CoherePromptDriver(model="command-r", api_key=self.api_key), takes_self=True), - metadata={"serializable": True}, - kw_only=True, - ) - embedding_driver: BaseEmbeddingDriver = field( - default=Factory( - lambda self: CohereEmbeddingDriver( - model="embed-english-v3.0", - api_key=self.api_key, - input_type="search_document", - ), - takes_self=True, - ), - metadata={"serializable": True}, - kw_only=True, - ) - vector_store_driver: BaseVectorStoreDriver = field( - default=Factory(lambda self: LocalVectorStoreDriver(embedding_driver=self.embedding_driver), takes_self=True), - kw_only=True, - metadata={"serializable": True}, - ) diff --git a/griptape/config/google_structure_config.py b/griptape/config/google_structure_config.py deleted file mode 100644 index 66ed90b4b..000000000 --- a/griptape/config/google_structure_config.py +++ /dev/null @@ -1,32 +0,0 @@ -from attrs import Factory, define, field - -from griptape.config import StructureConfig -from griptape.drivers import ( - BaseEmbeddingDriver, - BasePromptDriver, - BaseVectorStoreDriver, - GoogleEmbeddingDriver, - GooglePromptDriver, - LocalVectorStoreDriver, -) - - -@define -class GoogleStructureConfig(StructureConfig): - prompt_driver: BasePromptDriver = field( - default=Factory(lambda: GooglePromptDriver(model="gemini-1.5-pro")), - kw_only=True, - metadata={"serializable": True}, - ) - embedding_driver: BaseEmbeddingDriver = field( - default=Factory(lambda: GoogleEmbeddingDriver(model="models/embedding-001")), - kw_only=True, - metadata={"serializable": True}, - ) - vector_store_driver: BaseVectorStoreDriver = field( - default=Factory( - lambda: LocalVectorStoreDriver(embedding_driver=GoogleEmbeddingDriver(model="models/embedding-001")), - ), - kw_only=True, - metadata={"serializable": True}, - ) diff --git a/griptape/config/openai_structure_config.py b/griptape/config/openai_structure_config.py deleted file mode 100644 index 63806dfc9..000000000 --- a/griptape/config/openai_structure_config.py +++ /dev/null @@ -1,60 +0,0 @@ -from attrs import Factory, define, field - -from griptape.config import StructureConfig -from griptape.drivers import ( - BaseAudioTranscriptionDriver, - BaseEmbeddingDriver, - BaseImageGenerationDriver, - BaseImageQueryDriver, - BasePromptDriver, - BaseTextToSpeechDriver, - BaseVectorStoreDriver, - LocalVectorStoreDriver, - OpenAiAudioTranscriptionDriver, - OpenAiChatPromptDriver, - OpenAiEmbeddingDriver, - OpenAiImageGenerationDriver, - OpenAiImageQueryDriver, - OpenAiTextToSpeechDriver, -) - - -@define -class OpenAiStructureConfig(StructureConfig): - prompt_driver: BasePromptDriver = field( - default=Factory(lambda: OpenAiChatPromptDriver(model="gpt-4o")), - metadata={"serializable": True}, - kw_only=True, - ) - image_generation_driver: BaseImageGenerationDriver = field( - default=Factory(lambda: OpenAiImageGenerationDriver(model="dall-e-2", image_size="512x512")), - kw_only=True, - metadata={"serializable": True}, - ) - image_query_driver: BaseImageQueryDriver = field( - default=Factory(lambda: OpenAiImageQueryDriver(model="gpt-4o")), - kw_only=True, - metadata={"serializable": True}, - ) - embedding_driver: BaseEmbeddingDriver = field( - default=Factory(lambda: OpenAiEmbeddingDriver(model="text-embedding-3-small")), - metadata={"serializable": True}, - kw_only=True, - ) - vector_store_driver: BaseVectorStoreDriver = field( - default=Factory( - lambda: LocalVectorStoreDriver(embedding_driver=OpenAiEmbeddingDriver(model="text-embedding-3-small")), - ), - kw_only=True, - metadata={"serializable": True}, - ) - text_to_speech_driver: BaseTextToSpeechDriver = field( - default=Factory(lambda: OpenAiTextToSpeechDriver(model="tts")), - kw_only=True, - metadata={"serializable": True}, - ) - audio_transcription_driver: BaseAudioTranscriptionDriver = field( - default=Factory(lambda: OpenAiAudioTranscriptionDriver(model="whisper-1")), - kw_only=True, - metadata={"serializable": True}, - ) diff --git a/griptape/config/structure_config.py b/griptape/config/structure_config.py deleted file mode 100644 index ef95012ce..000000000 --- a/griptape/config/structure_config.py +++ /dev/null @@ -1,68 +0,0 @@ -from __future__ import annotations - -from typing import Optional - -from attrs import Factory, define, field - -from griptape.config import BaseStructureConfig -from griptape.drivers import ( - BaseAudioTranscriptionDriver, - BaseConversationMemoryDriver, - BaseEmbeddingDriver, - BaseImageGenerationDriver, - BaseImageQueryDriver, - BasePromptDriver, - BaseTextToSpeechDriver, - BaseVectorStoreDriver, - DummyAudioTranscriptionDriver, - DummyEmbeddingDriver, - DummyImageGenerationDriver, - DummyImageQueryDriver, - DummyPromptDriver, - DummyTextToSpeechDriver, - DummyVectorStoreDriver, -) - - -@define -class StructureConfig(BaseStructureConfig): - prompt_driver: BasePromptDriver = field( - kw_only=True, - default=Factory(lambda: DummyPromptDriver()), - metadata={"serializable": True}, - ) - image_generation_driver: BaseImageGenerationDriver = field( - kw_only=True, - default=Factory(lambda: DummyImageGenerationDriver()), - metadata={"serializable": True}, - ) - image_query_driver: BaseImageQueryDriver = field( - kw_only=True, - default=Factory(lambda: DummyImageQueryDriver()), - metadata={"serializable": True}, - ) - embedding_driver: BaseEmbeddingDriver = field( - kw_only=True, - default=Factory(lambda: DummyEmbeddingDriver()), - metadata={"serializable": True}, - ) - vector_store_driver: BaseVectorStoreDriver = field( - default=Factory(lambda: DummyVectorStoreDriver()), - kw_only=True, - metadata={"serializable": True}, - ) - conversation_memory_driver: Optional[BaseConversationMemoryDriver] = field( - default=None, - kw_only=True, - metadata={"serializable": True}, - ) - text_to_speech_driver: BaseTextToSpeechDriver = field( - default=Factory(lambda: DummyTextToSpeechDriver()), - kw_only=True, - metadata={"serializable": True}, - ) - audio_transcription_driver: BaseAudioTranscriptionDriver = field( - default=Factory(lambda: DummyAudioTranscriptionDriver()), - kw_only=True, - metadata={"serializable": True}, - ) diff --git a/griptape/configs/__init__.py b/griptape/configs/__init__.py new file mode 100644 index 000000000..bd12c7836 --- /dev/null +++ b/griptape/configs/__init__.py @@ -0,0 +1,8 @@ +from .base_config import BaseConfig +from .defaults_config import Defaults + + +__all__ = [ + "BaseConfig", + "Defaults", +] diff --git a/griptape/config/base_config.py b/griptape/configs/base_config.py similarity index 73% rename from griptape/config/base_config.py rename to griptape/configs/base_config.py index 241efadcd..09d230016 100644 --- a/griptape/config/base_config.py +++ b/griptape/configs/base_config.py @@ -1,3 +1,5 @@ +from __future__ import annotations + from abc import ABC from attrs import define @@ -5,5 +7,5 @@ from griptape.mixins.serializable_mixin import SerializableMixin -@define +@define(kw_only=True) class BaseConfig(SerializableMixin, ABC): ... diff --git a/griptape/configs/defaults_config.py b/griptape/configs/defaults_config.py new file mode 100644 index 000000000..b81f50cdc --- /dev/null +++ b/griptape/configs/defaults_config.py @@ -0,0 +1,23 @@ +from __future__ import annotations + +from typing import TYPE_CHECKING + +from attrs import Factory, define, field + +from griptape.mixins.singleton_mixin import SingletonMixin + +from .base_config import BaseConfig +from .drivers.openai_drivers_config import OpenAiDriversConfig +from .logging.logging_config import LoggingConfig + +if TYPE_CHECKING: + from .drivers.base_drivers_config import BaseDriversConfig + + +@define(kw_only=True) +class _DefaultsConfig(BaseConfig, SingletonMixin): + logging_config: LoggingConfig = field(default=Factory(lambda: LoggingConfig())) + drivers_config: BaseDriversConfig = field(default=Factory(lambda: OpenAiDriversConfig())) + + +Defaults = _DefaultsConfig() diff --git a/griptape/configs/drivers/__init__.py b/griptape/configs/drivers/__init__.py new file mode 100644 index 000000000..d407814e8 --- /dev/null +++ b/griptape/configs/drivers/__init__.py @@ -0,0 +1,20 @@ +from .base_drivers_config import BaseDriversConfig +from .drivers_config import DriversConfig + +from .openai_drivers_config import OpenAiDriversConfig +from .azure_openai_drivers_config import AzureOpenAiDriversConfig +from .amazon_bedrock_drivers_config import AmazonBedrockDriversConfig +from .anthropic_drivers_config import AnthropicDriversConfig +from .google_drivers_config import GoogleDriversConfig +from .cohere_drivers_config import CohereDriversConfig + +__all__ = [ + "BaseDriversConfig", + "DriversConfig", + "OpenAiDriversConfig", + "AzureOpenAiDriversConfig", + "AmazonBedrockDriversConfig", + "AnthropicDriversConfig", + "GoogleDriversConfig", + "CohereDriversConfig", +] diff --git a/griptape/configs/drivers/amazon_bedrock_drivers_config.py b/griptape/configs/drivers/amazon_bedrock_drivers_config.py new file mode 100644 index 000000000..7a54ac522 --- /dev/null +++ b/griptape/configs/drivers/amazon_bedrock_drivers_config.py @@ -0,0 +1,60 @@ +from __future__ import annotations + +from typing import TYPE_CHECKING + +from attrs import Factory, define, field + +from griptape.configs.drivers import DriversConfig +from griptape.drivers import ( + AmazonBedrockImageGenerationDriver, + AmazonBedrockImageQueryDriver, + AmazonBedrockPromptDriver, + AmazonBedrockTitanEmbeddingDriver, + BedrockClaudeImageQueryModelDriver, + BedrockTitanImageGenerationModelDriver, + LocalVectorStoreDriver, +) +from griptape.utils import import_optional_dependency +from griptape.utils.decorators import lazy_property + +if TYPE_CHECKING: + import boto3 + + +@define +class AmazonBedrockDriversConfig(DriversConfig): + session: boto3.Session = field( + default=Factory(lambda: import_optional_dependency("boto3").Session()), + kw_only=True, + metadata={"serializable": False}, + ) + + @lazy_property() + def prompt_driver(self) -> AmazonBedrockPromptDriver: + return AmazonBedrockPromptDriver(session=self.session, model="anthropic.claude-3-5-sonnet-20240620-v1:0") + + @lazy_property() + def embedding_driver(self) -> AmazonBedrockTitanEmbeddingDriver: + return AmazonBedrockTitanEmbeddingDriver(session=self.session, model="amazon.titan-embed-text-v1") + + @lazy_property() + def image_generation_driver(self) -> AmazonBedrockImageGenerationDriver: + return AmazonBedrockImageGenerationDriver( + session=self.session, + model="amazon.titan-image-generator-v1", + image_generation_model_driver=BedrockTitanImageGenerationModelDriver(), + ) + + @lazy_property() + def image_query_driver(self) -> AmazonBedrockImageQueryDriver: + return AmazonBedrockImageQueryDriver( + session=self.session, + model="anthropic.claude-3-5-sonnet-20240620-v1:0", + image_query_model_driver=BedrockClaudeImageQueryModelDriver(), + ) + + @lazy_property() + def vector_store_driver(self) -> LocalVectorStoreDriver: + return LocalVectorStoreDriver( + embedding_driver=AmazonBedrockTitanEmbeddingDriver(session=self.session, model="amazon.titan-embed-text-v1") + ) diff --git a/griptape/configs/drivers/anthropic_drivers_config.py b/griptape/configs/drivers/anthropic_drivers_config.py new file mode 100644 index 000000000..e5a1f2719 --- /dev/null +++ b/griptape/configs/drivers/anthropic_drivers_config.py @@ -0,0 +1,29 @@ +from attrs import define + +from griptape.configs.drivers import DriversConfig +from griptape.drivers import ( + AnthropicImageQueryDriver, + AnthropicPromptDriver, + LocalVectorStoreDriver, + VoyageAiEmbeddingDriver, +) +from griptape.utils.decorators import lazy_property + + +@define +class AnthropicDriversConfig(DriversConfig): + @lazy_property() + def prompt_driver(self) -> AnthropicPromptDriver: + return AnthropicPromptDriver(model="claude-3-5-sonnet-20240620") + + @lazy_property() + def embedding_driver(self) -> VoyageAiEmbeddingDriver: + return VoyageAiEmbeddingDriver(model="voyage-large-2") + + @lazy_property() + def vector_store_driver(self) -> LocalVectorStoreDriver: + return LocalVectorStoreDriver(embedding_driver=VoyageAiEmbeddingDriver(model="voyage-large-2")) + + @lazy_property() + def image_query_driver(self) -> AnthropicImageQueryDriver: + return AnthropicImageQueryDriver(model="claude-3-5-sonnet-20240620") diff --git a/griptape/configs/drivers/azure_openai_drivers_config.py b/griptape/configs/drivers/azure_openai_drivers_config.py new file mode 100644 index 000000000..a29ba3c2f --- /dev/null +++ b/griptape/configs/drivers/azure_openai_drivers_config.py @@ -0,0 +1,94 @@ +from __future__ import annotations + +from typing import Callable, Optional + +from attrs import define, field + +from griptape.configs.drivers import DriversConfig +from griptape.drivers import ( + AzureOpenAiChatPromptDriver, + AzureOpenAiEmbeddingDriver, + AzureOpenAiImageGenerationDriver, + AzureOpenAiImageQueryDriver, + LocalVectorStoreDriver, +) +from griptape.utils.decorators import lazy_property + + +@define +class AzureOpenAiDriversConfig(DriversConfig): + """Azure OpenAI Drivers Configuration. + + Attributes: + azure_endpoint: The endpoint for the Azure OpenAI instance. + azure_ad_token: An optional Azure Active Directory token. + azure_ad_token_provider: An optional Azure Active Directory token provider. + api_key: An optional Azure API key. + prompt_driver: An Azure OpenAI Chat Prompt Driver. + image_generation_driver: An Azure OpenAI Image Generation Driver. + image_query_driver: An Azure OpenAI Vision Image Query Driver. + embedding_driver: An Azure OpenAI Embedding Driver. + vector_store_driver: A Local Vector Store Driver. + """ + + azure_endpoint: str = field(kw_only=True, metadata={"serializable": True}) + azure_ad_token: Optional[str] = field(kw_only=True, default=None, metadata={"serializable": False}) + azure_ad_token_provider: Optional[Callable[[], str]] = field( + kw_only=True, + default=None, + metadata={"serializable": False}, + ) + api_key: Optional[str] = field(kw_only=True, default=None, metadata={"serializable": False}) + + @lazy_property() + def prompt_driver(self) -> AzureOpenAiChatPromptDriver: + return AzureOpenAiChatPromptDriver( + model="gpt-4o", + azure_endpoint=self.azure_endpoint, + api_key=self.api_key, + azure_ad_token=self.azure_ad_token, + azure_ad_token_provider=self.azure_ad_token_provider, + ) + + @lazy_property() + def embedding_driver(self) -> AzureOpenAiEmbeddingDriver: + return AzureOpenAiEmbeddingDriver( + model="text-embedding-3-small", + azure_endpoint=self.azure_endpoint, + api_key=self.api_key, + azure_ad_token=self.azure_ad_token, + azure_ad_token_provider=self.azure_ad_token_provider, + ) + + @lazy_property() + def image_generation_driver(self) -> AzureOpenAiImageGenerationDriver: + return AzureOpenAiImageGenerationDriver( + model="dall-e-2", + azure_endpoint=self.azure_endpoint, + api_key=self.api_key, + azure_ad_token=self.azure_ad_token, + azure_ad_token_provider=self.azure_ad_token_provider, + image_size="512x512", + ) + + @lazy_property() + def image_query_driver(self) -> AzureOpenAiImageQueryDriver: + return AzureOpenAiImageQueryDriver( + model="gpt-4o", + azure_endpoint=self.azure_endpoint, + api_key=self.api_key, + azure_ad_token=self.azure_ad_token, + azure_ad_token_provider=self.azure_ad_token_provider, + ) + + @lazy_property() + def vector_store_driver(self) -> LocalVectorStoreDriver: + return LocalVectorStoreDriver( + embedding_driver=AzureOpenAiEmbeddingDriver( + model="text-embedding-3-small", + azure_endpoint=self.azure_endpoint, + api_key=self.api_key, + azure_ad_token=self.azure_ad_token, + azure_ad_token_provider=self.azure_ad_token_provider, + ) + ) diff --git a/griptape/configs/drivers/base_drivers_config.py b/griptape/configs/drivers/base_drivers_config.py new file mode 100644 index 000000000..ec7503478 --- /dev/null +++ b/griptape/configs/drivers/base_drivers_config.py @@ -0,0 +1,81 @@ +from __future__ import annotations + +from abc import ABC, abstractmethod +from typing import TYPE_CHECKING, Optional + +from attrs import define, field + +from griptape.mixins import SerializableMixin +from griptape.utils.decorators import lazy_property + +if TYPE_CHECKING: + from griptape.drivers import ( + BaseAudioTranscriptionDriver, + BaseConversationMemoryDriver, + BaseEmbeddingDriver, + BaseImageGenerationDriver, + BaseImageQueryDriver, + BasePromptDriver, + BaseTextToSpeechDriver, + BaseVectorStoreDriver, + ) + + +@define +class BaseDriversConfig(ABC, SerializableMixin): + _prompt_driver: BasePromptDriver = field( + kw_only=True, default=None, metadata={"serializable": True}, alias="prompt_driver" + ) + _image_generation_driver: BaseImageGenerationDriver = field( + kw_only=True, default=None, metadata={"serializable": True}, alias="image_generation_driver" + ) + _image_query_driver: BaseImageQueryDriver = field( + kw_only=True, default=None, metadata={"serializable": True}, alias="image_query_driver" + ) + _embedding_driver: BaseEmbeddingDriver = field( + kw_only=True, default=None, metadata={"serializable": True}, alias="embedding_driver" + ) + _vector_store_driver: BaseVectorStoreDriver = field( + default=None, kw_only=True, metadata={"serializable": True}, alias="vector_store_driver" + ) + _conversation_memory_driver: Optional[BaseConversationMemoryDriver] = field( + default=None, kw_only=True, metadata={"serializable": True}, alias="conversation_memory_driver" + ) + _text_to_speech_driver: BaseTextToSpeechDriver = field( + default=None, kw_only=True, metadata={"serializable": True}, alias="text_to_speech_driver" + ) + _audio_transcription_driver: BaseAudioTranscriptionDriver = field( + default=None, kw_only=True, metadata={"serializable": True}, alias="audio_transcription_driver" + ) + + @lazy_property() + @abstractmethod + def prompt_driver(self) -> BasePromptDriver: ... + + @lazy_property() + @abstractmethod + def image_generation_driver(self) -> BaseImageGenerationDriver: ... + + @lazy_property() + @abstractmethod + def image_query_driver(self) -> BaseImageQueryDriver: ... + + @lazy_property() + @abstractmethod + def embedding_driver(self) -> BaseEmbeddingDriver: ... + + @lazy_property() + @abstractmethod + def vector_store_driver(self) -> BaseVectorStoreDriver: ... + + @lazy_property() + @abstractmethod + def conversation_memory_driver(self) -> Optional[BaseConversationMemoryDriver]: ... + + @lazy_property() + @abstractmethod + def text_to_speech_driver(self) -> BaseTextToSpeechDriver: ... + + @lazy_property() + @abstractmethod + def audio_transcription_driver(self) -> BaseAudioTranscriptionDriver: ... diff --git a/griptape/configs/drivers/cohere_drivers_config.py b/griptape/configs/drivers/cohere_drivers_config.py new file mode 100644 index 000000000..b5d8da8b0 --- /dev/null +++ b/griptape/configs/drivers/cohere_drivers_config.py @@ -0,0 +1,36 @@ +from attrs import define, field + +from griptape.configs.drivers import DriversConfig +from griptape.drivers import ( + CohereEmbeddingDriver, + CoherePromptDriver, + LocalVectorStoreDriver, +) +from griptape.utils.decorators import lazy_property + + +@define +class CohereDriversConfig(DriversConfig): + api_key: str = field(metadata={"serializable": False}, kw_only=True) + + @lazy_property() + def prompt_driver(self) -> CoherePromptDriver: + return CoherePromptDriver(model="command-r", api_key=self.api_key) + + @lazy_property() + def embedding_driver(self) -> CohereEmbeddingDriver: + return CohereEmbeddingDriver( + model="embed-english-v3.0", + api_key=self.api_key, + input_type="search_document", + ) + + @lazy_property() + def vector_store_driver(self) -> LocalVectorStoreDriver: + return LocalVectorStoreDriver( + embedding_driver=CohereEmbeddingDriver( + model="embed-english-v3.0", + api_key=self.api_key, + input_type="search_document", + ) + ) diff --git a/griptape/configs/drivers/drivers_config.py b/griptape/configs/drivers/drivers_config.py new file mode 100644 index 000000000..ed68bcf8c --- /dev/null +++ b/griptape/configs/drivers/drivers_config.py @@ -0,0 +1,64 @@ +from __future__ import annotations + +from typing import TYPE_CHECKING, Optional + +from attrs import define + +from griptape.configs.drivers import BaseDriversConfig +from griptape.drivers import ( + DummyAudioTranscriptionDriver, + DummyEmbeddingDriver, + DummyImageGenerationDriver, + DummyImageQueryDriver, + DummyPromptDriver, + DummyTextToSpeechDriver, + DummyVectorStoreDriver, +) +from griptape.utils.decorators import lazy_property + +if TYPE_CHECKING: + from griptape.drivers import ( + BaseAudioTranscriptionDriver, + BaseConversationMemoryDriver, + BaseEmbeddingDriver, + BaseImageGenerationDriver, + BaseImageQueryDriver, + BasePromptDriver, + BaseTextToSpeechDriver, + BaseVectorStoreDriver, + ) + + +@define +class DriversConfig(BaseDriversConfig): + @lazy_property() + def prompt_driver(self) -> BasePromptDriver: + return DummyPromptDriver() + + @lazy_property() + def image_generation_driver(self) -> BaseImageGenerationDriver: + return DummyImageGenerationDriver() + + @lazy_property() + def image_query_driver(self) -> BaseImageQueryDriver: + return DummyImageQueryDriver() + + @lazy_property() + def embedding_driver(self) -> BaseEmbeddingDriver: + return DummyEmbeddingDriver() + + @lazy_property() + def vector_store_driver(self) -> BaseVectorStoreDriver: + return DummyVectorStoreDriver(embedding_driver=self.embedding_driver) + + @lazy_property() + def conversation_memory_driver(self) -> Optional[BaseConversationMemoryDriver]: + return None + + @lazy_property() + def text_to_speech_driver(self) -> BaseTextToSpeechDriver: + return DummyTextToSpeechDriver() + + @lazy_property() + def audio_transcription_driver(self) -> BaseAudioTranscriptionDriver: + return DummyAudioTranscriptionDriver() diff --git a/griptape/configs/drivers/google_drivers_config.py b/griptape/configs/drivers/google_drivers_config.py new file mode 100644 index 000000000..8d5325235 --- /dev/null +++ b/griptape/configs/drivers/google_drivers_config.py @@ -0,0 +1,24 @@ +from attrs import define + +from griptape.configs.drivers import DriversConfig +from griptape.drivers import ( + GoogleEmbeddingDriver, + GooglePromptDriver, + LocalVectorStoreDriver, +) +from griptape.utils.decorators import lazy_property + + +@define +class GoogleDriversConfig(DriversConfig): + @lazy_property() + def prompt_driver(self) -> GooglePromptDriver: + return GooglePromptDriver(model="gemini-1.5-pro") + + @lazy_property() + def embedding_driver(self) -> GoogleEmbeddingDriver: + return GoogleEmbeddingDriver(model="models/embedding-001") + + @lazy_property() + def vector_store_driver(self) -> LocalVectorStoreDriver: + return LocalVectorStoreDriver(embedding_driver=GoogleEmbeddingDriver(model="models/embedding-001")) diff --git a/griptape/configs/drivers/openai_drivers_config.py b/griptape/configs/drivers/openai_drivers_config.py new file mode 100644 index 000000000..205cfb0e1 --- /dev/null +++ b/griptape/configs/drivers/openai_drivers_config.py @@ -0,0 +1,44 @@ +from attrs import define + +from griptape.configs.drivers import DriversConfig +from griptape.drivers import ( + LocalVectorStoreDriver, + OpenAiAudioTranscriptionDriver, + OpenAiChatPromptDriver, + OpenAiEmbeddingDriver, + OpenAiImageGenerationDriver, + OpenAiImageQueryDriver, + OpenAiTextToSpeechDriver, +) +from griptape.utils.decorators import lazy_property + + +@define +class OpenAiDriversConfig(DriversConfig): + @lazy_property() + def prompt_driver(self) -> OpenAiChatPromptDriver: + return OpenAiChatPromptDriver(model="gpt-4o") + + @lazy_property() + def image_generation_driver(self) -> OpenAiImageGenerationDriver: + return OpenAiImageGenerationDriver(model="dall-e-2", image_size="512x512") + + @lazy_property() + def image_query_driver(self) -> OpenAiImageQueryDriver: + return OpenAiImageQueryDriver(model="gpt-4o") + + @lazy_property() + def embedding_driver(self) -> OpenAiEmbeddingDriver: + return OpenAiEmbeddingDriver(model="text-embedding-3-small") + + @lazy_property() + def vector_store_driver(self) -> LocalVectorStoreDriver: + return LocalVectorStoreDriver(embedding_driver=OpenAiEmbeddingDriver(model="text-embedding-3-small")) + + @lazy_property() + def text_to_speech_driver(self) -> OpenAiTextToSpeechDriver: + return OpenAiTextToSpeechDriver(model="tts") + + @lazy_property() + def audio_transcription_driver(self) -> OpenAiAudioTranscriptionDriver: + return OpenAiAudioTranscriptionDriver(model="whisper-1") diff --git a/griptape/configs/logging/__init__.py b/griptape/configs/logging/__init__.py new file mode 100644 index 000000000..de7726060 --- /dev/null +++ b/griptape/configs/logging/__init__.py @@ -0,0 +1,5 @@ +from .logging_config import LoggingConfig +from .truncate_logging_filter import TruncateLoggingFilter +from .newline_logging_filter import NewlineLoggingFilter + +__all__ = ["LoggingConfig", "TruncateLoggingFilter", "NewlineLoggingFilter"] diff --git a/griptape/configs/logging/logging_config.py b/griptape/configs/logging/logging_config.py new file mode 100644 index 000000000..80497d7c8 --- /dev/null +++ b/griptape/configs/logging/logging_config.py @@ -0,0 +1,17 @@ +from __future__ import annotations + +import logging + +from attrs import define, field +from rich.logging import RichHandler + + +@define +class LoggingConfig: + logger_name: str = field(default="griptape", kw_only=True) + + def __attrs_post_init__(self) -> None: + logger = logging.getLogger(self.logger_name) + logger.setLevel(logging.INFO) + logger.propagate = False + logger.addHandler(RichHandler(show_time=True, show_path=False)) diff --git a/griptape/configs/logging/newline_logging_filter.py b/griptape/configs/logging/newline_logging_filter.py new file mode 100644 index 000000000..bae08265f --- /dev/null +++ b/griptape/configs/logging/newline_logging_filter.py @@ -0,0 +1,13 @@ +import logging +from typing import Any + +from attrs import define, field + + +@define +class NewlineLoggingFilter(logging.Filter): + replace_str: str = field(default=" ", kw_only=True) + + def filter(self, record: Any) -> bool: + record.msg = record.msg.replace("\n", self.replace_str) + return True diff --git a/griptape/configs/logging/truncate_logging_filter.py b/griptape/configs/logging/truncate_logging_filter.py new file mode 100644 index 000000000..9888fc169 --- /dev/null +++ b/griptape/configs/logging/truncate_logging_filter.py @@ -0,0 +1,17 @@ +import logging +from typing import Any + +from attrs import define, field + + +@define +class TruncateLoggingFilter(logging.Filter): + max_log_length: int = field(default=1000, kw_only=True) + + def filter(self, record: Any) -> bool: + message = record.getMessage() + + if len(message) > self.max_log_length: + record.msg = f"{message[:self.max_log_length]}... [{len(message) - self.max_log_length} more characters]" + record.args = () + return True diff --git a/griptape/drivers/__init__.py b/griptape/drivers/__init__.py index f948f1be1..f19ec7d10 100644 --- a/griptape/drivers/__init__.py +++ b/griptape/drivers/__init__.py @@ -15,6 +15,7 @@ from .memory.conversation.local_conversation_memory_driver import LocalConversationMemoryDriver from .memory.conversation.amazon_dynamodb_conversation_memory_driver import AmazonDynamoDbConversationMemoryDriver from .memory.conversation.redis_conversation_memory_driver import RedisConversationMemoryDriver +from .memory.conversation.griptape_cloud_conversation_memory_driver import GriptapeCloudConversationMemoryDriver from .embedding.base_embedding_driver import BaseEmbeddingDriver from .embedding.openai_embedding_driver import OpenAiEmbeddingDriver @@ -41,6 +42,7 @@ from .vector.azure_mongodb_vector_store_driver import AzureMongoDbVectorStoreDriver from .vector.dummy_vector_store_driver import DummyVectorStoreDriver from .vector.qdrant_vector_store_driver import QdrantVectorStoreDriver +from .vector.astradb_vector_store_driver import AstraDbVectorStoreDriver from .vector.griptape_cloud_knowledge_base_vector_store_driver import GriptapeCloudKnowledgeBaseVectorStoreDriver from .sql.base_sql_driver import BaseSqlDriver @@ -148,6 +150,7 @@ "LocalConversationMemoryDriver", "AmazonDynamoDbConversationMemoryDriver", "RedisConversationMemoryDriver", + "GriptapeCloudConversationMemoryDriver", "BaseEmbeddingDriver", "OpenAiEmbeddingDriver", "AzureOpenAiEmbeddingDriver", @@ -171,6 +174,7 @@ "AmazonOpenSearchVectorStoreDriver", "PgVectorVectorStoreDriver", "QdrantVectorStoreDriver", + "AstraDbVectorStoreDriver", "DummyVectorStoreDriver", "GriptapeCloudKnowledgeBaseVectorStoreDriver", "BaseSqlDriver", diff --git a/griptape/drivers/audio_transcription/base_audio_transcription_driver.py b/griptape/drivers/audio_transcription/base_audio_transcription_driver.py index c81ea1d5b..ae46c474c 100644 --- a/griptape/drivers/audio_transcription/base_audio_transcription_driver.py +++ b/griptape/drivers/audio_transcription/base_audio_transcription_driver.py @@ -5,22 +5,22 @@ from attrs import define, field -from griptape.events import FinishAudioTranscriptionEvent, StartAudioTranscriptionEvent -from griptape.mixins import EventPublisherMixin, ExponentialBackoffMixin, SerializableMixin +from griptape.events import EventBus, FinishAudioTranscriptionEvent, StartAudioTranscriptionEvent +from griptape.mixins import ExponentialBackoffMixin, SerializableMixin if TYPE_CHECKING: from griptape.artifacts import AudioArtifact, TextArtifact @define -class BaseAudioTranscriptionDriver(EventPublisherMixin, SerializableMixin, ExponentialBackoffMixin, ABC): +class BaseAudioTranscriptionDriver(SerializableMixin, ExponentialBackoffMixin, ABC): model: str = field(kw_only=True, metadata={"serializable": True}) def before_run(self) -> None: - self.publish_event(StartAudioTranscriptionEvent()) + EventBus.publish_event(StartAudioTranscriptionEvent()) def after_run(self) -> None: - self.publish_event(FinishAudioTranscriptionEvent()) + EventBus.publish_event(FinishAudioTranscriptionEvent()) def run(self, audio: AudioArtifact, prompts: Optional[list[str]] = None) -> TextArtifact: for attempt in self.retrying(): diff --git a/griptape/drivers/audio_transcription/openai_audio_transcription_driver.py b/griptape/drivers/audio_transcription/openai_audio_transcription_driver.py index 9240c3a4f..312fa8318 100644 --- a/griptape/drivers/audio_transcription/openai_audio_transcription_driver.py +++ b/griptape/drivers/audio_transcription/openai_audio_transcription_driver.py @@ -12,7 +12,7 @@ @define class OpenAiAudioTranscriptionDriver(BaseAudioTranscriptionDriver): - api_type: str = field(default=openai.api_type, kw_only=True) + api_type: Optional[str] = field(default=openai.api_type, kw_only=True) api_version: Optional[str] = field(default=openai.api_version, kw_only=True, metadata={"serializable": True}) base_url: Optional[str] = field(default=None, kw_only=True, metadata={"serializable": True}) api_key: Optional[str] = field(default=None, kw_only=True, metadata={"serializable": False}) diff --git a/griptape/drivers/embedding/base_embedding_driver.py b/griptape/drivers/embedding/base_embedding_driver.py index 690726060..8998f00e5 100644 --- a/griptape/drivers/embedding/base_embedding_driver.py +++ b/griptape/drivers/embedding/base_embedding_driver.py @@ -7,7 +7,7 @@ from attrs import define, field from griptape.chunkers import BaseChunker, TextChunker -from griptape.mixins import EventPublisherMixin, ExponentialBackoffMixin, SerializableMixin +from griptape.mixins import ExponentialBackoffMixin, SerializableMixin if TYPE_CHECKING: from griptape.artifacts import TextArtifact @@ -15,7 +15,7 @@ @define -class BaseEmbeddingDriver(EventPublisherMixin, SerializableMixin, ExponentialBackoffMixin, ABC): +class BaseEmbeddingDriver(SerializableMixin, ExponentialBackoffMixin, ABC): """Base Embedding Driver. Attributes: diff --git a/griptape/drivers/event_listener/base_event_listener_driver.py b/griptape/drivers/event_listener/base_event_listener_driver.py index 9f7cb79fb..0af57f0f3 100644 --- a/griptape/drivers/event_listener/base_event_listener_driver.py +++ b/griptape/drivers/event_listener/base_event_listener_driver.py @@ -2,11 +2,12 @@ import logging from abc import ABC, abstractmethod -from concurrent import futures -from typing import TYPE_CHECKING, Callable +from typing import TYPE_CHECKING from attrs import Factory, define, field +from griptape.mixins import FuturesExecutorMixin + if TYPE_CHECKING: from griptape.events import BaseEvent @@ -14,11 +15,7 @@ @define -class BaseEventListenerDriver(ABC): - futures_executor_fn: Callable[[], futures.Executor] = field( - default=Factory(lambda: lambda: futures.ThreadPoolExecutor()), - kw_only=True, - ) +class BaseEventListenerDriver(FuturesExecutorMixin, ABC): batched: bool = field(default=True, kw_only=True) batch_size: int = field(default=10, kw_only=True) @@ -29,8 +26,7 @@ def batch(self) -> list[dict]: return self._batch def publish_event(self, event: BaseEvent | dict, *, flush: bool = False) -> None: - with self.futures_executor_fn() as executor: - executor.submit(self._safe_try_publish_event, event, flush=flush) + self.futures_executor.submit(self._safe_try_publish_event, event, flush=flush) @abstractmethod def try_publish_event_payload(self, event_payload: dict) -> None: ... diff --git a/griptape/drivers/event_listener/griptape_cloud_event_listener_driver.py b/griptape/drivers/event_listener/griptape_cloud_event_listener_driver.py index 733f0baa2..98f52b914 100644 --- a/griptape/drivers/event_listener/griptape_cloud_event_listener_driver.py +++ b/griptape/drivers/event_listener/griptape_cloud_event_listener_driver.py @@ -1,6 +1,7 @@ from __future__ import annotations import os +from typing import Optional from urllib.parse import urljoin import requests @@ -25,12 +26,14 @@ class GriptapeCloudEventListenerDriver(BaseEventListenerDriver): default=Factory(lambda: os.getenv("GT_CLOUD_BASE_URL", "https://cloud.griptape.ai")), kw_only=True, ) - api_key: str = field(default=Factory(lambda: os.getenv("GT_CLOUD_API_KEY")), kw_only=True) + api_key: Optional[str] = field(default=Factory(lambda: os.getenv("GT_CLOUD_API_KEY")), kw_only=True) headers: dict = field( default=Factory(lambda self: {"Authorization": f"Bearer {self.api_key}"}, takes_self=True), kw_only=True, ) - structure_run_id: str = field(default=Factory(lambda: os.getenv("GT_CLOUD_STRUCTURE_RUN_ID")), kw_only=True) + structure_run_id: Optional[str] = field( + default=Factory(lambda: os.getenv("GT_CLOUD_STRUCTURE_RUN_ID")), kw_only=True + ) @structure_run_id.validator # pyright: ignore[reportAttributeAccessIssue] def validate_run_id(self, _: Attribute, structure_run_id: str) -> None: diff --git a/griptape/drivers/file_manager/amazon_s3_file_manager_driver.py b/griptape/drivers/file_manager/amazon_s3_file_manager_driver.py index e58e46d37..20e432c0b 100644 --- a/griptape/drivers/file_manager/amazon_s3_file_manager_driver.py +++ b/griptape/drivers/file_manager/amazon_s3_file_manager_driver.py @@ -40,8 +40,7 @@ def try_list_files(self, path: str) -> list[str]: if len(files_and_dirs) == 0: if len(self._list_files_and_dirs(full_key.rstrip("/"), max_items=1)) > 0: raise NotADirectoryError - else: - raise FileNotFoundError + raise FileNotFoundError return files_and_dirs def try_load_file(self, path: str) -> bytes: @@ -57,8 +56,7 @@ def try_load_file(self, path: str) -> bytes: except botocore.exceptions.ClientError as e: if e.response["Error"]["Code"] in {"NoSuchKey", "404"}: raise FileNotFoundError from e - else: - raise e + raise e def try_save_file(self, path: str, value: bytes) -> None: full_key = self._to_full_key(path) @@ -141,5 +139,4 @@ def _normpath(self, path: str) -> str: else: stack.append(part) - normalized_path = "/".join(stack) - return normalized_path + return "/".join(stack) diff --git a/griptape/drivers/image_generation/base_image_generation_driver.py b/griptape/drivers/image_generation/base_image_generation_driver.py index f500d6d09..8dfca5945 100644 --- a/griptape/drivers/image_generation/base_image_generation_driver.py +++ b/griptape/drivers/image_generation/base_image_generation_driver.py @@ -5,22 +5,22 @@ from attrs import define, field -from griptape.events import FinishImageGenerationEvent, StartImageGenerationEvent -from griptape.mixins import EventPublisherMixin, ExponentialBackoffMixin, SerializableMixin +from griptape.events import EventBus, FinishImageGenerationEvent, StartImageGenerationEvent +from griptape.mixins import ExponentialBackoffMixin, SerializableMixin if TYPE_CHECKING: from griptape.artifacts import ImageArtifact @define -class BaseImageGenerationDriver(EventPublisherMixin, SerializableMixin, ExponentialBackoffMixin, ABC): +class BaseImageGenerationDriver(SerializableMixin, ExponentialBackoffMixin, ABC): model: str = field(kw_only=True, metadata={"serializable": True}) def before_run(self, prompts: list[str], negative_prompts: Optional[list[str]] = None) -> None: - self.publish_event(StartImageGenerationEvent(prompts=prompts, negative_prompts=negative_prompts)) + EventBus.publish_event(StartImageGenerationEvent(prompts=prompts, negative_prompts=negative_prompts)) def after_run(self) -> None: - self.publish_event(FinishImageGenerationEvent()) + EventBus.publish_event(FinishImageGenerationEvent()) def run_text_to_image(self, prompts: list[str], negative_prompts: Optional[list[str]] = None) -> ImageArtifact: for attempt in self.retrying(): diff --git a/griptape/drivers/image_generation/openai_image_generation_driver.py b/griptape/drivers/image_generation/openai_image_generation_driver.py index 54eab48ec..0ee50a1e2 100644 --- a/griptape/drivers/image_generation/openai_image_generation_driver.py +++ b/griptape/drivers/image_generation/openai_image_generation_driver.py @@ -33,7 +33,7 @@ class OpenAiImageGenerationDriver(BaseImageGenerationDriver): a base64 encoded image in a JSON object. """ - api_type: str = field(default=openai.api_type, kw_only=True) + api_type: Optional[str] = field(default=openai.api_type, kw_only=True) api_version: Optional[str] = field(default=openai.api_version, kw_only=True, metadata={"serializable": True}) base_url: Optional[str] = field(default=None, kw_only=True, metadata={"serializable": True}) api_key: Optional[str] = field(default=None, kw_only=True, metadata={"serializable": False}) diff --git a/griptape/drivers/image_generation_model/bedrock_stable_diffusion_image_generation_model_driver.py b/griptape/drivers/image_generation_model/bedrock_stable_diffusion_image_generation_model_driver.py index 92428e157..0ec7d03d2 100644 --- a/griptape/drivers/image_generation_model/bedrock_stable_diffusion_image_generation_model_driver.py +++ b/griptape/drivers/image_generation_model/bedrock_stable_diffusion_image_generation_model_driver.py @@ -139,9 +139,7 @@ def _request_parameters( request["mask_source"] = mask_source request["mask_image"] = mask.base64 - request = {k: v for k, v in request.items() if v is not None} - - return request + return {k: v for k, v in request.items() if v is not None} def get_generated_image(self, response: dict) -> bytes: image_response = response["artifacts"][0] @@ -149,7 +147,7 @@ def get_generated_image(self, response: dict) -> bytes: # finishReason may be SUCCESS, CONTENT_FILTERED, or ERROR. if image_response.get("finishReason") == "ERROR": raise Exception(f"Image generation failed: {image_response.get('finishReason')}") - elif image_response.get("finishReason") == "CONTENT_FILTERED": + if image_response.get("finishReason") == "CONTENT_FILTERED": logging.warning("Image generation triggered content filter and may be blurred") return base64.decodebytes(bytes(image_response.get("base64"), "utf-8")) diff --git a/griptape/drivers/image_query/anthropic_image_query_driver.py b/griptape/drivers/image_query/anthropic_image_query_driver.py index bd19862ec..a50685724 100644 --- a/griptape/drivers/image_query/anthropic_image_query_driver.py +++ b/griptape/drivers/image_query/anthropic_image_query_driver.py @@ -47,9 +47,7 @@ def _base_params(self, text_query: str, images: list[ImageArtifact]) -> dict: content = [self._construct_image_message(image) for image in images] content.append(self._construct_text_message(text_query)) messages = self._construct_messages(content) - params = {"model": self.model, "messages": messages, "max_tokens": self.max_tokens} - - return params + return {"model": self.model, "messages": messages, "max_tokens": self.max_tokens} def _construct_image_message(self, image_data: ImageArtifact) -> dict: data = image_data.base64 diff --git a/griptape/drivers/image_query/base_image_query_driver.py b/griptape/drivers/image_query/base_image_query_driver.py index b39f198d4..28c571328 100644 --- a/griptape/drivers/image_query/base_image_query_driver.py +++ b/griptape/drivers/image_query/base_image_query_driver.py @@ -5,24 +5,24 @@ from attrs import define, field -from griptape.events import FinishImageQueryEvent, StartImageQueryEvent -from griptape.mixins import EventPublisherMixin, ExponentialBackoffMixin, SerializableMixin +from griptape.events import EventBus, FinishImageQueryEvent, StartImageQueryEvent +from griptape.mixins import ExponentialBackoffMixin, SerializableMixin if TYPE_CHECKING: from griptape.artifacts import ImageArtifact, TextArtifact @define -class BaseImageQueryDriver(EventPublisherMixin, SerializableMixin, ExponentialBackoffMixin, ABC): +class BaseImageQueryDriver(SerializableMixin, ExponentialBackoffMixin, ABC): max_tokens: int = field(default=256, kw_only=True, metadata={"serializable": True}) def before_run(self, query: str, images: list[ImageArtifact]) -> None: - self.publish_event( + EventBus.publish_event( StartImageQueryEvent(query=query, images_info=[image.to_text() for image in images]), ) def after_run(self, result: str) -> None: - self.publish_event(FinishImageQueryEvent(result=result)) + EventBus.publish_event(FinishImageQueryEvent(result=result)) def query(self, query: str, images: list[ImageArtifact]) -> TextArtifact: for attempt in self.retrying(): diff --git a/griptape/drivers/image_query/openai_image_query_driver.py b/griptape/drivers/image_query/openai_image_query_driver.py index b607c97f5..6399efa95 100644 --- a/griptape/drivers/image_query/openai_image_query_driver.py +++ b/griptape/drivers/image_query/openai_image_query_driver.py @@ -18,7 +18,7 @@ @define class OpenAiImageQueryDriver(BaseImageQueryDriver): model: str = field(kw_only=True, metadata={"serializable": True}) - api_type: str = field(default=openai.api_type, kw_only=True) + api_type: Optional[str] = field(default=openai.api_type, kw_only=True) api_version: Optional[str] = field(default=openai.api_version, kw_only=True, metadata={"serializable": True}) base_url: Optional[str] = field(default=None, kw_only=True, metadata={"serializable": True}) api_key: Optional[str] = field(default=None, kw_only=True) diff --git a/griptape/drivers/image_query_model/bedrock_claude_image_query_model_driver.py b/griptape/drivers/image_query_model/bedrock_claude_image_query_model_driver.py index 8260ce3d5..1785550a0 100644 --- a/griptape/drivers/image_query_model/bedrock_claude_image_query_model_driver.py +++ b/griptape/drivers/image_query_model/bedrock_claude_image_query_model_driver.py @@ -14,9 +14,7 @@ def image_query_request_parameters(self, query: str, images: list[ImageArtifact] content = [self._construct_image_message(image) for image in images] content.append(self._construct_text_message(query)) messages = self._construct_messages(content) - input_params = {"messages": messages, "anthropic_version": self.ANTHROPIC_VERSION, "max_tokens": max_tokens} - - return input_params + return {"messages": messages, "anthropic_version": self.ANTHROPIC_VERSION, "max_tokens": max_tokens} def process_output(self, output: dict) -> TextArtifact: content_blocks = output["content"] diff --git a/griptape/drivers/memory/conversation/amazon_dynamodb_conversation_memory_driver.py b/griptape/drivers/memory/conversation/amazon_dynamodb_conversation_memory_driver.py index e52174c28..b0c2485d6 100644 --- a/griptape/drivers/memory/conversation/amazon_dynamodb_conversation_memory_driver.py +++ b/griptape/drivers/memory/conversation/amazon_dynamodb_conversation_memory_driver.py @@ -1,16 +1,18 @@ from __future__ import annotations +import json from typing import TYPE_CHECKING, Any, Optional from attrs import Factory, define, field from griptape.drivers import BaseConversationMemoryDriver -from griptape.memory.structure import BaseConversationMemory from griptape.utils import import_optional_dependency if TYPE_CHECKING: import boto3 + from griptape.memory.structure import BaseConversationMemory + @define class AmazonDynamoDbConversationMemoryDriver(BaseConversationMemoryDriver): @@ -38,12 +40,16 @@ def store(self, memory: BaseConversationMemory) -> None: ) def load(self) -> Optional[BaseConversationMemory]: + from griptape.memory.structure import BaseConversationMemory + response = self.table.get_item(Key=self._get_key()) if "Item" in response and self.value_attribute_key in response["Item"]: - memory_value = response["Item"][self.value_attribute_key] + memory_dict = json.loads(response["Item"][self.value_attribute_key]) + # needed to avoid recursive method calls + memory_dict["autoload"] = False - memory = BaseConversationMemory.from_json(memory_value) + memory = BaseConversationMemory.from_dict(memory_dict) memory.driver = self diff --git a/griptape/drivers/memory/conversation/base_conversation_memory_driver.py b/griptape/drivers/memory/conversation/base_conversation_memory_driver.py index f13b82c29..1caeb902f 100644 --- a/griptape/drivers/memory/conversation/base_conversation_memory_driver.py +++ b/griptape/drivers/memory/conversation/base_conversation_memory_driver.py @@ -3,13 +3,13 @@ from abc import ABC, abstractmethod from typing import TYPE_CHECKING, Optional -from griptape.mixins import EventPublisherMixin, SerializableMixin +from griptape.mixins import SerializableMixin if TYPE_CHECKING: from griptape.memory.structure import BaseConversationMemory -class BaseConversationMemoryDriver(EventPublisherMixin, SerializableMixin, ABC): +class BaseConversationMemoryDriver(SerializableMixin, ABC): @abstractmethod def store(self, memory: BaseConversationMemory) -> None: ... diff --git a/griptape/drivers/memory/conversation/griptape_cloud_conversation_memory_driver.py b/griptape/drivers/memory/conversation/griptape_cloud_conversation_memory_driver.py new file mode 100644 index 000000000..2ea1d0d1a --- /dev/null +++ b/griptape/drivers/memory/conversation/griptape_cloud_conversation_memory_driver.py @@ -0,0 +1,124 @@ +from __future__ import annotations + +import os +import uuid +from typing import TYPE_CHECKING, Optional +from urllib.parse import urljoin + +import requests +from attrs import Attribute, Factory, define, field + +from griptape.artifacts import BaseArtifact +from griptape.drivers import BaseConversationMemoryDriver + +if TYPE_CHECKING: + from griptape.memory.structure import BaseConversationMemory + + +@define(kw_only=True) +class GriptapeCloudConversationMemoryDriver(BaseConversationMemoryDriver): + """A driver for storing conversation memory in the Griptape Cloud. + + Attributes: + thread_id: The ID of the Thread to store the conversation memory in. If not provided, the driver will attempt to + retrieve the ID from the environment variable `GT_CLOUD_THREAD_ID`. If that is not set, a new Thread will be + created. + base_url: The base URL of the Griptape Cloud API. Defaults to the value of the environment variable + `GT_CLOUD_BASE_URL` or `https://cloud.griptape.ai`. + api_key: The API key to use for authenticating with the Griptape Cloud API. If not provided, the driver will + attempt to retrieve the API key from the environment variable `GT_CLOUD_API_KEY`. + + Raises: + ValueError: If `api_key` is not provided. + """ + + thread_id: str = field( + default=None, + metadata={"serializable": True}, + ) + base_url: str = field( + default=Factory(lambda: os.getenv("GT_CLOUD_BASE_URL", "https://cloud.griptape.ai")), + ) + api_key: Optional[str] = field(default=Factory(lambda: os.getenv("GT_CLOUD_API_KEY"))) + headers: dict = field( + default=Factory(lambda self: {"Authorization": f"Bearer {self.api_key}"}, takes_self=True), + init=False, + ) + + def __attrs_post_init__(self) -> None: + if self.thread_id is None: + self.thread_id = os.getenv("GT_CLOUD_THREAD_ID", self._get_thread_id()) + + @api_key.validator # pyright: ignore[reportAttributeAccessIssue] + def validate_api_key(self, _: Attribute, value: Optional[str]) -> str: + if value is None: + raise ValueError(f"{self.__class__.__name__} requires an API key") + return value + + def store(self, memory: BaseConversationMemory) -> None: + # serliaze the run artifacts to json strings + messages = [{"input": run.input.to_json(), "output": run.output.to_json()} for run in memory.runs] + + # serialize the metadata to a json string + # remove runs because they are already stored as Messages + metadata = memory.to_dict() + del metadata["runs"] + + # patch the Thread with the new messages and metadata + # all old Messages are replaced with the new ones + response = requests.patch( + self._get_url(f"/threads/{self.thread_id}"), + json={"messages": messages, "metadata": metadata}, + headers=self.headers, + ) + response.raise_for_status() + + def load(self) -> BaseConversationMemory: + from griptape.memory.structure import BaseConversationMemory, ConversationMemory, Run + + # get the Messages from the Thread + messages_response = requests.get(self._get_url(f"/threads/{self.thread_id}/messages"), headers=self.headers) + messages_response.raise_for_status() + messages_response = messages_response.json() + + # retrieve the Thread to get the metadata + thread_response = requests.get(self._get_url(f"/threads/{self.thread_id}"), headers=self.headers) + thread_response.raise_for_status() + thread_response = thread_response.json() + + messages = messages_response.get("messages", []) + + runs = [ + Run( + id=m["message_id"], + input=BaseArtifact.from_json(m["input"]), + output=BaseArtifact.from_json(m["output"]), + ) + for m in messages + ] + metadata = thread_response.get("metadata") + + # the metadata will contain the serialized + # ConversationMemory object with the runs removed + # autoload=False to prevent recursively loading the memory + if metadata is not None and metadata != {}: + memory = BaseConversationMemory.from_dict( + { + **metadata, + "runs": [run.to_dict() for run in runs], + "autoload": False, + } + ) + memory.driver = self + return memory + # no metadata found, return a new ConversationMemory object + return ConversationMemory(runs=runs, autoload=False, driver=self) + + def _get_thread_id(self) -> str: + res = requests.post(self._get_url("/threads"), json={"name": uuid.uuid4().hex}, headers=self.headers) + res.raise_for_status() + return res.json().get("thread_id") + + def _get_url(self, path: str) -> str: + path = path.lstrip("/") + return urljoin(self.base_url, f"/api/{path}") diff --git a/griptape/drivers/memory/conversation/local_conversation_memory_driver.py b/griptape/drivers/memory/conversation/local_conversation_memory_driver.py index 8d6399e13..9a79accc3 100644 --- a/griptape/drivers/memory/conversation/local_conversation_memory_driver.py +++ b/griptape/drivers/memory/conversation/local_conversation_memory_driver.py @@ -1,13 +1,16 @@ from __future__ import annotations +import json import os from pathlib import Path -from typing import Optional +from typing import TYPE_CHECKING, Optional from attrs import define, field from griptape.drivers import BaseConversationMemoryDriver -from griptape.memory.structure import BaseConversationMemory + +if TYPE_CHECKING: + from griptape.memory.structure import BaseConversationMemory @define @@ -18,9 +21,15 @@ def store(self, memory: BaseConversationMemory) -> None: Path(self.file_path).write_text(memory.to_json()) def load(self) -> Optional[BaseConversationMemory]: + from griptape.memory.structure import BaseConversationMemory + if not os.path.exists(self.file_path): return None - memory = BaseConversationMemory.from_json(Path(self.file_path).read_text()) + + memory_dict = json.loads(Path(self.file_path).read_text()) + # needed to avoid recursive method calls + memory_dict["autoload"] = False + memory = BaseConversationMemory.from_dict(memory_dict) memory.driver = self diff --git a/griptape/drivers/memory/conversation/redis_conversation_memory_driver.py b/griptape/drivers/memory/conversation/redis_conversation_memory_driver.py index 2ba3737e8..8741cda50 100644 --- a/griptape/drivers/memory/conversation/redis_conversation_memory_driver.py +++ b/griptape/drivers/memory/conversation/redis_conversation_memory_driver.py @@ -1,17 +1,19 @@ from __future__ import annotations +import json import uuid from typing import TYPE_CHECKING, Optional from attrs import Factory, define, field from griptape.drivers import BaseConversationMemoryDriver -from griptape.memory.structure import BaseConversationMemory from griptape.utils.import_utils import import_optional_dependency if TYPE_CHECKING: from redis import Redis + from griptape.memory.structure import BaseConversationMemory + @define class RedisConversationMemoryDriver(BaseConversationMemoryDriver): @@ -54,10 +56,15 @@ def store(self, memory: BaseConversationMemory) -> None: self.client.hset(self.index, self.conversation_id, memory.to_json()) def load(self) -> Optional[BaseConversationMemory]: + from griptape.memory.structure import BaseConversationMemory + key = self.index memory_json = self.client.hget(key, self.conversation_id) - if memory_json: - memory = BaseConversationMemory.from_json(memory_json) + if memory_json is not None: + memory_dict = json.loads(memory_json) + # needed to avoid recursive method calls + memory_dict["autoload"] = False + memory = BaseConversationMemory.from_dict(memory_dict) memory.driver = self return memory return None diff --git a/griptape/drivers/observability/griptape_cloud_observability_driver.py b/griptape/drivers/observability/griptape_cloud_observability_driver.py index 6c99bd231..b2f13ba27 100644 --- a/griptape/drivers/observability/griptape_cloud_observability_driver.py +++ b/griptape/drivers/observability/griptape_cloud_observability_driver.py @@ -23,11 +23,13 @@ class GriptapeCloudObservabilityDriver(OpenTelemetryObservabilityDriver): base_url: str = field( default=Factory(lambda: os.getenv("GT_CLOUD_BASE_URL", "https://cloud.griptape.ai")), kw_only=True ) - api_key: str = field(default=Factory(lambda: os.getenv("GT_CLOUD_API_KEY")), kw_only=True) + api_key: Optional[str] = field(default=Factory(lambda: os.getenv("GT_CLOUD_API_KEY")), kw_only=True) headers: dict = field( default=Factory(lambda self: {"Authorization": f"Bearer {self.api_key}"}, takes_self=True), kw_only=True ) - structure_run_id: str = field(default=Factory(lambda: os.getenv("GT_CLOUD_STRUCTURE_RUN_ID")), kw_only=True) + structure_run_id: Optional[str] = field( + default=Factory(lambda: os.getenv("GT_CLOUD_STRUCTURE_RUN_ID")), kw_only=True + ) span_processor: SpanProcessor = field( default=Factory( lambda self: import_optional_dependency("opentelemetry.sdk.trace.export").BatchSpanProcessor( diff --git a/griptape/drivers/prompt/base_prompt_driver.py b/griptape/drivers/prompt/base_prompt_driver.py index e5fd0408d..c07980c9e 100644 --- a/griptape/drivers/prompt/base_prompt_driver.py +++ b/griptape/drivers/prompt/base_prompt_driver.py @@ -16,8 +16,8 @@ TextMessageContent, observable, ) -from griptape.events import CompletionChunkEvent, FinishPromptEvent, StartPromptEvent -from griptape.mixins import EventPublisherMixin, ExponentialBackoffMixin, SerializableMixin +from griptape.events import CompletionChunkEvent, EventBus, FinishPromptEvent, StartPromptEvent +from griptape.mixins import ExponentialBackoffMixin, SerializableMixin if TYPE_CHECKING: from collections.abc import Iterator @@ -26,7 +26,7 @@ @define(kw_only=True) -class BasePromptDriver(SerializableMixin, ExponentialBackoffMixin, EventPublisherMixin, ABC): +class BasePromptDriver(SerializableMixin, ExponentialBackoffMixin, ABC): """Base class for the Prompt Drivers. Attributes: @@ -49,10 +49,10 @@ class BasePromptDriver(SerializableMixin, ExponentialBackoffMixin, EventPublishe use_native_tools: bool = field(default=False, kw_only=True, metadata={"serializable": True}) def before_run(self, prompt_stack: PromptStack) -> None: - self.publish_event(StartPromptEvent(model=self.model, prompt_stack=prompt_stack)) + EventBus.publish_event(StartPromptEvent(model=self.model, prompt_stack=prompt_stack)) def after_run(self, result: Message) -> None: - self.publish_event( + EventBus.publish_event( FinishPromptEvent( model=self.model, result=result.value, @@ -108,9 +108,7 @@ def try_run(self, prompt_stack: PromptStack) -> Message: ... def try_stream(self, prompt_stack: PromptStack) -> Iterator[DeltaMessage]: ... def __process_run(self, prompt_stack: PromptStack) -> Message: - result = self.try_run(prompt_stack) - - return result + return self.try_run(prompt_stack) def __process_stream(self, prompt_stack: PromptStack) -> Message: delta_contents: dict[int, list[BaseDeltaMessageContent]] = {} @@ -128,17 +126,15 @@ def __process_stream(self, prompt_stack: PromptStack) -> Message: else: delta_contents[content.index] = [content] if isinstance(content, TextDeltaMessageContent): - self.publish_event(CompletionChunkEvent(token=content.text)) + EventBus.publish_event(CompletionChunkEvent(token=content.text)) elif isinstance(content, ActionCallDeltaMessageContent): if content.tag is not None and content.name is not None and content.path is not None: - self.publish_event(CompletionChunkEvent(token=str(content))) + EventBus.publish_event(CompletionChunkEvent(token=str(content))) elif content.partial_input is not None: - self.publish_event(CompletionChunkEvent(token=content.partial_input)) + EventBus.publish_event(CompletionChunkEvent(token=content.partial_input)) # Build a complete content from the content deltas - result = self.__build_message(list(delta_contents.values()), usage) - - return result + return self.__build_message(list(delta_contents.values()), usage) def __build_message( self, delta_contents: list[list[BaseDeltaMessageContent]], usage: DeltaMessage.Usage @@ -153,10 +149,8 @@ def __build_message( if action_deltas: content.append(ActionCallMessageContent.from_deltas(action_deltas)) - result = Message( + return Message( content=content, role=Message.ASSISTANT_ROLE, usage=Message.Usage(input_tokens=usage.input_tokens, output_tokens=usage.output_tokens), ) - - return result diff --git a/griptape/drivers/prompt/cohere_prompt_driver.py b/griptape/drivers/prompt/cohere_prompt_driver.py index ff1a8b482..05be5b7f2 100644 --- a/griptape/drivers/prompt/cohere_prompt_driver.py +++ b/griptape/drivers/prompt/cohere_prompt_driver.py @@ -1,5 +1,6 @@ from __future__ import annotations +import warnings from typing import TYPE_CHECKING, Any from attrs import Factory, define, field @@ -24,6 +25,9 @@ from griptape.tokenizers import BaseTokenizer, CohereTokenizer from griptape.utils import import_optional_dependency +# TODO Remove once https://github.com/cohere-ai/cohere-python/issues/559 is resolved +warnings.filterwarnings("ignore", module="pydantic") + if TYPE_CHECKING: from collections.abc import Iterator diff --git a/griptape/drivers/prompt/google_prompt_driver.py b/griptape/drivers/prompt/google_prompt_driver.py index 06f9dfbe6..bbba4e0f9 100644 --- a/griptape/drivers/prompt/google_prompt_driver.py +++ b/griptape/drivers/prompt/google_prompt_driver.py @@ -155,7 +155,7 @@ def _default_model_client(self) -> GenerativeModel: def __to_google_messages(self, prompt_stack: PromptStack) -> ContentsType: types = import_optional_dependency("google.generativeai.types") - inputs = [ + return [ types.ContentDict( { "role": self.__to_google_role(message), @@ -166,8 +166,6 @@ def __to_google_messages(self, prompt_stack: PromptStack) -> ContentsType: if not message.is_system() ] - return inputs - def __to_google_role(self, message: Message) -> str: if message.is_assistant(): return "model" @@ -227,6 +225,8 @@ def __to_google_message_content(self, content: BaseMessageContent) -> ContentDic raise ValueError(f"Unsupported prompt stack content type: {type(content)}") def __to_prompt_stack_message_content(self, content: Part) -> BaseMessageContent: + json_format = import_optional_dependency("google.protobuf.json_format") + if content.text: return TextMessageContent(TextArtifact(content.text)) elif content.function_call: @@ -234,7 +234,7 @@ def __to_prompt_stack_message_content(self, content: Part) -> BaseMessageContent name, path = ToolAction.from_native_tool_name(function_call.name) - args = dict(function_call.args.items()) + args = json_format.MessageToDict(function_call._pb).get("args", {}) return ActionCallMessageContent( artifact=ActionArtifact(value=ToolAction(tag=function_call.name, name=name, path=path, input=args)), ) @@ -242,6 +242,8 @@ def __to_prompt_stack_message_content(self, content: Part) -> BaseMessageContent raise ValueError(f"Unsupported message content type {content}") def __to_prompt_stack_delta_message_content(self, content: Part) -> BaseDeltaMessageContent: + json_format = import_optional_dependency("google.protobuf.json_format") + if content.text: return TextDeltaMessageContent(content.text) elif content.function_call: @@ -249,7 +251,7 @@ def __to_prompt_stack_delta_message_content(self, content: Part) -> BaseDeltaMes name, path = ToolAction.from_native_tool_name(function_call.name) - args = dict(function_call.args.items()) + args = json_format.MessageToDict(function_call._pb).get("args", {}) return ActionCallDeltaMessageContent( tag=function_call.name, name=name, diff --git a/griptape/drivers/sql/amazon_redshift_sql_driver.py b/griptape/drivers/sql/amazon_redshift_sql_driver.py index 5ae85c495..837405e83 100644 --- a/griptape/drivers/sql/amazon_redshift_sql_driver.py +++ b/griptape/drivers/sql/amazon_redshift_sql_driver.py @@ -29,7 +29,7 @@ class AmazonRedshiftSqlDriver(BaseSqlDriver): def validate_params(self, _: Attribute, workgroup_name: Optional[str]) -> None: if not self.cluster_identifier and not self.workgroup_name: raise ValueError("Provide a value for one of `cluster_identifier` or `workgroup_name`") - elif self.cluster_identifier and self.workgroup_name: + if self.cluster_identifier and self.workgroup_name: raise ValueError("Provide a value for either `cluster_identifier` or `workgroup_name`, but not both") @classmethod @@ -92,6 +92,7 @@ def execute_query_raw(self, query: str) -> Optional[list[dict[str, Optional[Any] elif statement["Status"] in ["FAILED", "ABORTED"]: return None + return None def get_table_schema(self, table_name: str, schema: Optional[str] = None) -> Optional[str]: function_kwargs = {"Database": self.database, "Table": table_name} diff --git a/griptape/drivers/sql/sql_driver.py b/griptape/drivers/sql/sql_driver.py index 0e3d1d4b7..d2293f94d 100644 --- a/griptape/drivers/sql/sql_driver.py +++ b/griptape/drivers/sql/sql_driver.py @@ -41,6 +41,7 @@ def execute_query_raw(self, query: str) -> Optional[list[dict[str, Optional[Any] return [dict(result._mapping) for result in results] else: con.commit() + return None else: raise ValueError("No result found") diff --git a/griptape/drivers/structure_run/griptape_cloud_structure_run_driver.py b/griptape/drivers/structure_run/griptape_cloud_structure_run_driver.py index 00b90a819..305d14995 100644 --- a/griptape/drivers/structure_run/griptape_cloud_structure_run_driver.py +++ b/griptape/drivers/structure_run/griptape_cloud_structure_run_driver.py @@ -6,7 +6,7 @@ from attrs import Factory, define, field -from griptape.artifacts import BaseArtifact, ErrorArtifact, InfoArtifact, TextArtifact +from griptape.artifacts import BaseArtifact, ErrorArtifact, InfoArtifact from griptape.drivers.structure_run.base_structure_run_driver import BaseStructureRunDriver @@ -44,7 +44,7 @@ def try_run(self, *args: BaseArtifact) -> BaseArtifact: except (exceptions.RequestException, HTTPError) as err: return ErrorArtifact(str(err)) - def _get_structure_run_result(self, structure_run_id: str) -> InfoArtifact | TextArtifact | ErrorArtifact: + def _get_structure_run_result(self, structure_run_id: str) -> InfoArtifact | BaseArtifact | ErrorArtifact: url = urljoin(self.base_url.strip("/"), f"/api/structure-runs/{structure_run_id}") result = self._get_structure_run_result_attempt(url) @@ -67,7 +67,7 @@ def _get_structure_run_result(self, structure_run_id: str) -> InfoArtifact | Tex return ErrorArtifact(result) if "output" in result: - return TextArtifact.from_dict(result["output"]) + return BaseArtifact.from_dict(result["output"]) else: return InfoArtifact("No output found in response") diff --git a/griptape/drivers/text_to_speech/base_text_to_speech_driver.py b/griptape/drivers/text_to_speech/base_text_to_speech_driver.py index 788d92974..cb11cc498 100644 --- a/griptape/drivers/text_to_speech/base_text_to_speech_driver.py +++ b/griptape/drivers/text_to_speech/base_text_to_speech_driver.py @@ -5,23 +5,24 @@ from attrs import define, field +from griptape.events import EventBus from griptape.events.finish_text_to_speech_event import FinishTextToSpeechEvent from griptape.events.start_text_to_speech_event import StartTextToSpeechEvent -from griptape.mixins import EventPublisherMixin, ExponentialBackoffMixin, SerializableMixin +from griptape.mixins import ExponentialBackoffMixin, SerializableMixin if TYPE_CHECKING: from griptape.artifacts.audio_artifact import AudioArtifact @define -class BaseTextToSpeechDriver(SerializableMixin, ExponentialBackoffMixin, EventPublisherMixin, ABC): +class BaseTextToSpeechDriver(SerializableMixin, ExponentialBackoffMixin, ABC): model: str = field(kw_only=True, metadata={"serializable": True}) def before_run(self, prompts: list[str]) -> None: - self.publish_event(StartTextToSpeechEvent(prompts=prompts)) + EventBus.publish_event(StartTextToSpeechEvent(prompts=prompts)) def after_run(self) -> None: - self.publish_event(FinishTextToSpeechEvent()) + EventBus.publish_event(FinishTextToSpeechEvent()) def run_text_to_audio(self, prompts: list[str]) -> AudioArtifact: for attempt in self.retrying(): diff --git a/griptape/drivers/text_to_speech/openai_text_to_speech_driver.py b/griptape/drivers/text_to_speech/openai_text_to_speech_driver.py index cb0c5340d..543ef1ec7 100644 --- a/griptape/drivers/text_to_speech/openai_text_to_speech_driver.py +++ b/griptape/drivers/text_to_speech/openai_text_to_speech_driver.py @@ -18,7 +18,7 @@ class OpenAiTextToSpeechDriver(BaseTextToSpeechDriver): metadata={"serializable": True}, ) format: Literal["mp3", "opus", "aac", "flac"] = field(default="mp3", kw_only=True, metadata={"serializable": True}) - api_type: str = field(default=openai.api_type, kw_only=True) + api_type: Optional[str] = field(default=openai.api_type, kw_only=True) api_version: Optional[str] = field(default=openai.api_version, kw_only=True, metadata={"serializable": True}) base_url: Optional[str] = field(default=None, kw_only=True, metadata={"serializable": True}) api_key: Optional[str] = field(default=None, kw_only=True) diff --git a/griptape/drivers/vector/astradb_vector_store_driver.py b/griptape/drivers/vector/astradb_vector_store_driver.py new file mode 100644 index 000000000..029fa382d --- /dev/null +++ b/griptape/drivers/vector/astradb_vector_store_driver.py @@ -0,0 +1,184 @@ +from __future__ import annotations + +from typing import TYPE_CHECKING, Any, Optional + +from attrs import define, field + +from griptape.drivers import BaseVectorStoreDriver +from griptape.utils import import_optional_dependency + +if TYPE_CHECKING: + from astrapy import Collection + from astrapy.authentication import TokenProvider + + +@define +class AstraDbVectorStoreDriver(BaseVectorStoreDriver): + """A Vector Store Driver for Astra DB. + + Attributes: + embedding_driver: a `griptape.drivers.BaseEmbeddingDriver` for embedding computations within the store + api_endpoint: the "API Endpoint" for the Astra DB instance. + token: a Database Token ("AstraCS:...") secret to access Astra DB. An instance of `astrapy.authentication.TokenProvider` is also accepted. + collection_name: the name of the collection on Astra DB. The collection must have been created beforehand, + and support vectors with a vector dimension matching the embeddings being used by this driver. + environment: the environment ("prod", "hcd", ...) hosting the target Data API. + It can be omitted for production Astra DB targets. See `astrapy.constants.Environment` for allowed values. + astra_db_namespace: optional specification of the namespace (in the Astra database) for the data. + *Note*: not to be confused with the "namespace" mentioned elsewhere, which is a grouping within this vector store. + """ + + api_endpoint: str = field(kw_only=True, metadata={"serializable": True}) + token: Optional[str | TokenProvider] = field(kw_only=True, default=None, metadata={"serializable": False}) + collection_name: str = field(kw_only=True, metadata={"serializable": True}) + environment: Optional[str] = field(kw_only=True, default=None, metadata={"serializable": True}) + astra_db_namespace: Optional[str] = field(default=None, kw_only=True, metadata={"serializable": True}) + + collection: Collection = field(init=False) + + def __attrs_post_init__(self) -> None: + astrapy = import_optional_dependency("astrapy") + self.collection = ( + astrapy.DataAPIClient( + caller_name="griptape", + environment=self.environment, + ) + .get_database( + self.api_endpoint, + token=self.token, + namespace=self.astra_db_namespace, + ) + .get_collection( + name=self.collection_name, + ) + ) + + def delete_vector(self, vector_id: str) -> None: + """Delete a vector from Astra DB store. + + The method succeeds regardless of whether a vector with the provided ID + was actually stored or not in the first place. + + Args: + vector_id: ID of the vector to delete. + """ + self.collection.delete_one({"_id": vector_id}) + + def upsert_vector( + self, + vector: list[float], + *, + vector_id: Optional[str] = None, + namespace: Optional[str] = None, + meta: Optional[dict] = None, + **kwargs: Any, + ) -> str: + """Write a vector to the Astra DB store. + + In case the provided ID exists already, an overwrite will take place. + + Args: + vector: the vector to be upserted. + vector_id: the ID for the vector to store. If omitted, a server-provided new ID will be employed. + namespace: a namespace (a grouping within the vector store) to assign the vector to. + meta: a metadata dictionary associated to the vector. + kwargs: additional keyword arguments. Currently none is used: if they are passed, they will be ignored with a warning. + + Returns: + the ID of the written vector (str). + """ + document = { + k: v + for k, v in {"$vector": vector, "_id": vector_id, "namespace": namespace, "meta": meta}.items() + if v is not None + } + if vector_id is not None: + self.collection.find_one_and_replace({"_id": vector_id}, document, upsert=True) + return vector_id + else: + insert_result = self.collection.insert_one(document) + return insert_result.inserted_id + + def load_entry(self, vector_id: str, *, namespace: Optional[str] = None) -> Optional[BaseVectorStoreDriver.Entry]: + """Load a single vector entry from the Astra DB store given its ID. + + Args: + vector_id: the ID of the required vector. + namespace: a namespace, within the vector store, to constrain the search. + + Returns: + The vector entry (a `BaseVectorStoreDriver.Entry`) if found, otherwise None. + """ + find_filter = {k: v for k, v in {"_id": vector_id, "namespace": namespace}.items() if v is not None} + match = self.collection.find_one(filter=find_filter, projection={"*": 1}) + if match is not None: + return BaseVectorStoreDriver.Entry( + id=match["_id"], vector=match.get("$vector"), meta=match.get("meta"), namespace=match.get("namespace") + ) + else: + return None + + def load_entries(self, *, namespace: Optional[str] = None) -> list[BaseVectorStoreDriver.Entry]: + """Load entries from the Astra DB store. + + Args: + namespace: a namespace, within the vector store, to constrain the search. + + Returns: + A list of vector (`BaseVectorStoreDriver.Entry`) entries. + """ + find_filter: dict[str, str] = {} if namespace is None else {"namespace": namespace} + return [ + BaseVectorStoreDriver.Entry( + id=match["_id"], vector=match.get("$vector"), meta=match.get("meta"), namespace=match.get("namespace") + ) + for match in self.collection.find(filter=find_filter, projection={"*": 1}) + ] + + def query( + self, + query: str, + *, + count: Optional[int] = None, + namespace: Optional[str] = None, + include_vectors: bool = False, + **kwargs: Any, + ) -> list[BaseVectorStoreDriver.Entry]: + """Run a similarity search on the Astra DB store, based on a query string. + + Args: + query: the query string. + count: the maximum number of results to return. If omitted, defaults will apply. + namespace: the namespace to filter results by. + include_vectors: whether to include vector data in the results. + kwargs: additional keyword arguments. Currently only the free-form dict `filter` + is recognized (and goes straight to the Data API query); + others will generate a warning and be ignored. + + Returns: + A list of vector (`BaseVectorStoreDriver.Entry`) entries, + with their `score` attribute set to the vector similarity to the query. + """ + query_filter: Optional[dict[str, Any]] = kwargs.get("filter") + find_filter_ns: dict[str, Any] = {} if namespace is None else {"namespace": namespace} + find_filter = {**(query_filter or {}), **find_filter_ns} + find_projection: Optional[dict[str, int]] = {"*": 1} if include_vectors else None + vector = self.embedding_driver.embed_string(query) + ann_limit = count or BaseVectorStoreDriver.DEFAULT_QUERY_COUNT + matches = self.collection.find( + filter=find_filter, + sort={"$vector": vector}, + limit=ann_limit, + projection=find_projection, + include_similarity=True, + ) + return [ + BaseVectorStoreDriver.Entry( + id=match["_id"], + vector=match.get("$vector"), + score=match["$similarity"], + meta=match.get("meta"), + namespace=match.get("namespace"), + ) + for match in matches + ] diff --git a/griptape/drivers/vector/base_vector_store_driver.py b/griptape/drivers/vector/base_vector_store_driver.py index d1da78188..2abb29c3f 100644 --- a/griptape/drivers/vector/base_vector_store_driver.py +++ b/griptape/drivers/vector/base_vector_store_driver.py @@ -2,22 +2,21 @@ import uuid from abc import ABC, abstractmethod -from concurrent import futures from dataclasses import dataclass -from typing import TYPE_CHECKING, Any, Callable, Optional +from typing import TYPE_CHECKING, Any, Optional -from attrs import Factory, define, field +from attrs import define, field from griptape import utils from griptape.artifacts import BaseArtifact, ListArtifact, TextArtifact -from griptape.mixins import EventPublisherMixin, SerializableMixin +from griptape.mixins import FuturesExecutorMixin, SerializableMixin if TYPE_CHECKING: from griptape.drivers import BaseEmbeddingDriver @define -class BaseVectorStoreDriver(EventPublisherMixin, SerializableMixin, ABC): +class BaseVectorStoreDriver(SerializableMixin, FuturesExecutorMixin, ABC): DEFAULT_QUERY_COUNT = 5 @dataclass @@ -36,10 +35,6 @@ def to_artifact(self) -> BaseArtifact: return BaseArtifact.from_json(self.meta["artifact"]) # pyright: ignore[reportOptionalSubscript] embedding_driver: BaseEmbeddingDriver = field(kw_only=True, metadata={"serializable": True}) - futures_executor_fn: Callable[[], futures.Executor] = field( - default=Factory(lambda: lambda: futures.ThreadPoolExecutor()), - kw_only=True, - ) def upsert_text_artifacts( self, @@ -48,24 +43,28 @@ def upsert_text_artifacts( meta: Optional[dict] = None, **kwargs, ) -> None: - with self.futures_executor_fn() as executor: - if isinstance(artifacts, list): - utils.execute_futures_list( - [ - executor.submit(self.upsert_text_artifact, a, namespace=None, meta=meta, **kwargs) - for a in artifacts - ], - ) - else: - utils.execute_futures_dict( - { - namespace: executor.submit( + if isinstance(artifacts, list): + utils.execute_futures_list( + [ + self.futures_executor.submit(self.upsert_text_artifact, a, namespace=None, meta=meta, **kwargs) + for a in artifacts + ], + ) + else: + futures_dict = {} + + for namespace, artifact_list in artifacts.items(): + for a in artifact_list: + if not futures_dict.get(namespace): + futures_dict[namespace] = [] + + futures_dict[namespace].append( + self.futures_executor.submit( self.upsert_text_artifact, a, namespace=namespace, meta=meta, **kwargs ) - for namespace, artifact_list in artifacts.items() - for a in artifact_list - }, - ) + ) + + utils.execute_futures_list_dict(futures_dict) def upsert_text_artifact( self, diff --git a/griptape/drivers/vector/griptape_cloud_knowledge_base_vector_store_driver.py b/griptape/drivers/vector/griptape_cloud_knowledge_base_vector_store_driver.py index 34b646846..a3bd6a011 100644 --- a/griptape/drivers/vector/griptape_cloud_knowledge_base_vector_store_driver.py +++ b/griptape/drivers/vector/griptape_cloud_knowledge_base_vector_store_driver.py @@ -105,8 +105,7 @@ def query( response = requests.post(url, json=request, headers=self.headers).json() entries = response.get("entries", []) - entry_list = [BaseVectorStoreDriver.Entry.from_dict(entry) for entry in entries] - return entry_list + return [BaseVectorStoreDriver.Entry.from_dict(entry) for entry in entries] def delete_vector(self, vector_id: str) -> NoReturn: raise NotImplementedError(f"{self.__class__.__name__} does not support deletion.") diff --git a/griptape/drivers/vector/local_vector_store_driver.py b/griptape/drivers/vector/local_vector_store_driver.py index 2b42b19f5..36203d540 100644 --- a/griptape/drivers/vector/local_vector_store_driver.py +++ b/griptape/drivers/vector/local_vector_store_driver.py @@ -31,24 +31,19 @@ def __attrs_post_init__(self) -> None: if not os.path.isfile(self.persist_file): with open(self.persist_file, "w") as file: - self.save_entries_to_file(file) + self.__save_entries_to_file(file) with open(self.persist_file, "r+") as file: if os.path.getsize(self.persist_file) > 0: self.entries = self.load_entries_from_file(file) else: - self.save_entries_to_file(file) - - def save_entries_to_file(self, json_file: TextIO) -> None: - with self.thread_lock: - serialized_data = {k: asdict(v) for k, v in self.entries.items()} - - json.dump(serialized_data, json_file) + self.__save_entries_to_file(file) def load_entries_from_file(self, json_file: TextIO) -> dict[str, BaseVectorStoreDriver.Entry]: - data = json.load(json_file) + with self.thread_lock: + data = json.load(json_file) - return {k: BaseVectorStoreDriver.Entry.from_dict(v) for k, v in data.items()} + return {k: BaseVectorStoreDriver.Entry.from_dict(v) for k, v in data.items()} def upsert_vector( self, @@ -62,7 +57,7 @@ def upsert_vector( vector_id = vector_id or utils.str_to_hash(str(vector)) with self.thread_lock: - self.entries[self._namespaced_vector_id(vector_id, namespace=namespace)] = self.Entry( + self.entries[self.__namespaced_vector_id(vector_id, namespace=namespace)] = self.Entry( id=vector_id, vector=vector, meta=meta, @@ -73,12 +68,12 @@ def upsert_vector( # TODO: optimize later since it reserializes all entries from memory and stores them in the JSON file # every time a new vector is inserted with open(self.persist_file, "w") as file: - self.save_entries_to_file(file) + self.__save_entries_to_file(file) return vector_id def load_entry(self, vector_id: str, *, namespace: Optional[str] = None) -> Optional[BaseVectorStoreDriver.Entry]: - return self.entries.get(self._namespaced_vector_id(vector_id, namespace=namespace), None) + return self.entries.get(self.__namespaced_vector_id(vector_id, namespace=namespace), None) def load_entries(self, *, namespace: Optional[str] = None) -> list[BaseVectorStoreDriver.Entry]: return [entry for key, entry in self.entries.items() if namespace is None or entry.namespace == namespace] @@ -100,8 +95,9 @@ def query( entries = self.entries entries_and_relatednesses = [ - (entry, self.relatedness_fn(query_embedding, entry.vector)) for entry in entries.values() + (entry, self.relatedness_fn(query_embedding, entry.vector)) for entry in list(entries.values()) ] + entries_and_relatednesses.sort(key=operator.itemgetter(1), reverse=True) result = [ @@ -120,5 +116,11 @@ def query( def delete_vector(self, vector_id: str) -> NoReturn: raise NotImplementedError(f"{self.__class__.__name__} does not support deletion.") - def _namespaced_vector_id(self, vector_id: str, *, namespace: Optional[str]) -> str: + def __save_entries_to_file(self, json_file: TextIO) -> None: + with self.thread_lock: + serialized_data = {k: asdict(v) for k, v in self.entries.items()} + + json.dump(serialized_data, json_file) + + def __namespaced_vector_id(self, vector_id: str, *, namespace: Optional[str]) -> str: return vector_id if namespace is None else f"{namespace}-{vector_id}" diff --git a/griptape/drivers/vector/mongodb_atlas_vector_store_driver.py b/griptape/drivers/vector/mongodb_atlas_vector_store_driver.py index 34b1d3a5e..bc3f1e22f 100644 --- a/griptape/drivers/vector/mongodb_atlas_vector_store_driver.py +++ b/griptape/drivers/vector/mongodb_atlas_vector_store_driver.py @@ -160,7 +160,7 @@ def query( if namespace: pipeline[0]["$vectorSearch"]["filter"] = {"namespace": namespace} - results = [ + return [ BaseVectorStoreDriver.Entry( id=str(doc["_id"]), vector=doc[self.vector_path] if include_vectors else [], @@ -171,8 +171,6 @@ def query( for doc in collection.aggregate(pipeline) ] - return results - def delete_vector(self, vector_id: str) -> None: """Deletes the vector from the collection.""" collection = self.get_collection() diff --git a/griptape/drivers/vector/opensearch_vector_store_driver.py b/griptape/drivers/vector/opensearch_vector_store_driver.py index 267b549b7..cf944116a 100644 --- a/griptape/drivers/vector/opensearch_vector_store_driver.py +++ b/griptape/drivers/vector/opensearch_vector_store_driver.py @@ -83,13 +83,12 @@ def load_entry(self, vector_id: str, *, namespace: Optional[str] = None) -> Opti if response["hits"]["total"]["value"] > 0: vector_data = response["hits"]["hits"][0]["_source"] - entry = BaseVectorStoreDriver.Entry( + return BaseVectorStoreDriver.Entry( id=vector_id, meta=vector_data.get("metadata"), vector=vector_data.get("vector"), namespace=vector_data.get("namespace"), ) - return entry else: return None except Exception as e: @@ -109,7 +108,7 @@ def load_entries(self, *, namespace: Optional[str] = None) -> list[BaseVectorSto response = self.client.search(index=self.index_name, body=query_body) - entries = [ + return [ BaseVectorStoreDriver.Entry( id=hit["_id"], vector=hit["_source"].get("vector"), @@ -118,7 +117,6 @@ def load_entries(self, *, namespace: Optional[str] = None) -> list[BaseVectorSto ) for hit in response["hits"]["hits"] ] - return entries def query( self, diff --git a/griptape/drivers/vector/qdrant_vector_store_driver.py b/griptape/drivers/vector/qdrant_vector_store_driver.py index c33b7eb2e..154e54af7 100644 --- a/griptape/drivers/vector/qdrant_vector_store_driver.py +++ b/griptape/drivers/vector/qdrant_vector_store_driver.py @@ -114,7 +114,7 @@ def query( results = self.client.search(**request) # Convert results to QueryResult objects - query_results = [ + return [ BaseVectorStoreDriver.Entry( id=result.id, vector=result.vector if include_vectors else [], @@ -123,7 +123,6 @@ def query( ) for result in results ] - return query_results def upsert_vector( self, diff --git a/griptape/drivers/web_scraper/markdownify_web_scraper_driver.py b/griptape/drivers/web_scraper/markdownify_web_scraper_driver.py index 556d5e06e..b54ff072f 100644 --- a/griptape/drivers/web_scraper/markdownify_web_scraper_driver.py +++ b/griptape/drivers/web_scraper/markdownify_web_scraper_driver.py @@ -60,6 +60,7 @@ def skip_loading_images(route: Any) -> Any: if route.request.resource_type == "image": return route.abort() route.continue_() + return None page.route("**/*", skip_loading_images) diff --git a/griptape/drivers/web_scraper/trafilatura_web_scraper_driver.py b/griptape/drivers/web_scraper/trafilatura_web_scraper_driver.py index 0763155d5..06f5573a4 100644 --- a/griptape/drivers/web_scraper/trafilatura_web_scraper_driver.py +++ b/griptape/drivers/web_scraper/trafilatura_web_scraper_driver.py @@ -29,13 +29,12 @@ def scrape_url(self, url: str) -> TextArtifact: if page is None: raise Exception("can't access URL") - else: - extracted_page = trafilatura.extract( - page, - include_links=self.include_links, - output_format="json", - config=config, - ) + extracted_page = trafilatura.extract( + page, + include_links=self.include_links, + output_format="json", + config=config, + ) if not extracted_page: raise Exception("can't extract page") diff --git a/griptape/drivers/web_search/duck_duck_go_web_search_driver.py b/griptape/drivers/web_search/duck_duck_go_web_search_driver.py index e701e8e0c..b67e81f35 100644 --- a/griptape/drivers/web_search/duck_duck_go_web_search_driver.py +++ b/griptape/drivers/web_search/duck_duck_go_web_search_driver.py @@ -19,7 +19,9 @@ class DuckDuckGoWebSearchDriver(BaseWebSearchDriver): def search(self, query: str, **kwargs) -> ListArtifact: try: - results = self.client.text(query, region=f"{self.language}-{self.country}", max_results=self.results_count) + results = self.client.text( + query, region=f"{self.language}-{self.country}", max_results=self.results_count, **kwargs + ) return ListArtifact( [ TextArtifact( diff --git a/griptape/drivers/web_search/google_web_search_driver.py b/griptape/drivers/web_search/google_web_search_driver.py index 26ac57342..012c52307 100644 --- a/griptape/drivers/web_search/google_web_search_driver.py +++ b/griptape/drivers/web_search/google_web_search_driver.py @@ -11,31 +11,30 @@ @define class GoogleWebSearchDriver(BaseWebSearchDriver): - api_key: str = field(default=None, kw_only=True) - search_id: str = field(default=None, kw_only=True) + api_key: str = field(kw_only=True) + search_id: str = field(kw_only=True) def search(self, query: str, **kwargs) -> ListArtifact: return ListArtifact([TextArtifact(json.dumps(result)) for result in self._search_google(query, **kwargs)]) def _search_google(self, query: str, **kwargs) -> list[dict]: - url = ( - f"https://www.googleapis.com/customsearch/v1?" - f"key={self.api_key}&" - f"cx={self.search_id}&" - f"q={query}&" - f"start=0&" - f"lr=lang_{self.language}&" - f"num={self.results_count}&" - f"gl={self.country}" - ) - response = requests.get(url) + query_params = { + "key": self.api_key, + "cx": self.search_id, + "q": query, + "start": 0, + "lr": f"lang_{self.language}", + "num": self.results_count, + "gl": self.country, + **kwargs, + } + response = requests.get("https://www.googleapis.com/customsearch/v1", params=query_params) if response.status_code == 200: data = response.json() - links = [{"url": r["link"], "title": r["title"], "description": r["snippet"]} for r in data["items"]] + return [{"url": r["link"], "title": r["title"], "description": r["snippet"]} for r in data["items"]] - return links else: raise Exception( f"Google Search API returned an error with status code " diff --git a/griptape/engines/audio/audio_transcription_engine.py b/griptape/engines/audio/audio_transcription_engine.py index 3631b2d17..4084c8829 100644 --- a/griptape/engines/audio/audio_transcription_engine.py +++ b/griptape/engines/audio/audio_transcription_engine.py @@ -1,12 +1,15 @@ -from attrs import define, field +from attrs import Factory, define, field from griptape.artifacts import AudioArtifact, TextArtifact +from griptape.configs import Defaults from griptape.drivers import BaseAudioTranscriptionDriver @define class AudioTranscriptionEngine: - audio_transcription_driver: BaseAudioTranscriptionDriver = field(kw_only=True) + audio_transcription_driver: BaseAudioTranscriptionDriver = field( + default=Factory(lambda: Defaults.drivers_config.audio_transcription_driver), kw_only=True + ) def run(self, audio: AudioArtifact, *args, **kwargs) -> TextArtifact: return self.audio_transcription_driver.try_run(audio) diff --git a/griptape/engines/audio/text_to_speech_engine.py b/griptape/engines/audio/text_to_speech_engine.py index af5d5a494..1261ae369 100644 --- a/griptape/engines/audio/text_to_speech_engine.py +++ b/griptape/engines/audio/text_to_speech_engine.py @@ -2,7 +2,9 @@ from typing import TYPE_CHECKING -from attrs import define, field +from attrs import Factory, define, field + +from griptape.configs import Defaults if TYPE_CHECKING: from griptape.artifacts.audio_artifact import AudioArtifact @@ -11,7 +13,9 @@ @define class TextToSpeechEngine: - text_to_speech_driver: BaseTextToSpeechDriver = field(kw_only=True) + text_to_speech_driver: BaseTextToSpeechDriver = field( + default=Factory(lambda: Defaults.drivers_config.text_to_speech_driver), kw_only=True + ) def run(self, prompts: list[str], *args, **kwargs) -> AudioArtifact: return self.text_to_speech_driver.try_text_to_audio(prompts=prompts) diff --git a/griptape/engines/extraction/base_extraction_engine.py b/griptape/engines/extraction/base_extraction_engine.py index f263ee0aa..fb1fab6c4 100644 --- a/griptape/engines/extraction/base_extraction_engine.py +++ b/griptape/engines/extraction/base_extraction_engine.py @@ -6,6 +6,7 @@ from attrs import Attribute, Factory, define, field from griptape.chunkers import BaseChunker, TextChunker +from griptape.configs import Defaults if TYPE_CHECKING: from griptape.artifacts import ErrorArtifact, ListArtifact @@ -17,7 +18,9 @@ class BaseExtractionEngine(ABC): max_token_multiplier: float = field(default=0.5, kw_only=True) chunk_joiner: str = field(default="\n\n", kw_only=True) - prompt_driver: BasePromptDriver = field(kw_only=True) + prompt_driver: BasePromptDriver = field( + default=Factory(lambda: Defaults.drivers_config.prompt_driver), kw_only=True + ) chunker: BaseChunker = field( default=Factory( lambda self: TextChunker(tokenizer=self.prompt_driver.tokenizer, max_tokens=self.max_chunker_tokens), @@ -30,7 +33,7 @@ class BaseExtractionEngine(ABC): def validate_max_token_multiplier(self, _: Attribute, max_token_multiplier: int) -> None: if max_token_multiplier > 1: raise ValueError("has to be less than or equal to 1") - elif max_token_multiplier <= 0: + if max_token_multiplier <= 0: raise ValueError("has to be greater than 0") @property diff --git a/griptape/engines/extraction/csv_extraction_engine.py b/griptape/engines/extraction/csv_extraction_engine.py index 3184654b1..c9c040f65 100644 --- a/griptape/engines/extraction/csv_extraction_engine.py +++ b/griptape/engines/extraction/csv_extraction_engine.py @@ -7,8 +7,7 @@ from attrs import Factory, define, field from griptape.artifacts import CsvRowArtifact, ErrorArtifact, ListArtifact, TextArtifact -from griptape.common import PromptStack -from griptape.common.prompt_stack.messages.message import Message +from griptape.common import Message, PromptStack from griptape.engines import BaseExtractionEngine from griptape.utils import J2 @@ -18,25 +17,22 @@ @define class CsvExtractionEngine(BaseExtractionEngine): - template_generator: J2 = field(default=Factory(lambda: J2("engines/extraction/csv_extraction.j2")), kw_only=True) + column_names: list[str] = field(default=Factory(list), kw_only=True) + system_template_generator: J2 = field(default=Factory(lambda: J2("engines/extraction/csv/system.j2")), kw_only=True) + user_template_generator: J2 = field(default=Factory(lambda: J2("engines/extraction/csv/user.j2")), kw_only=True) def extract( self, text: str | ListArtifact, *, rulesets: Optional[list[Ruleset]] = None, - column_names: Optional[list[str]] = None, **kwargs, ) -> ListArtifact | ErrorArtifact: - if column_names is None: - column_names = [] try: return ListArtifact( self._extract_rec( cast(list[TextArtifact], text.value) if isinstance(text, ListArtifact) else [TextArtifact(text)], - column_names, [], - rulesets=rulesets, ), item_separator="\n", ) @@ -55,39 +51,56 @@ def text_to_csv_rows(self, text: str, column_names: list[str]) -> list[CsvRowArt def _extract_rec( self, artifacts: list[TextArtifact], - column_names: list[str], rows: list[CsvRowArtifact], + *, rulesets: Optional[list[Ruleset]] = None, ) -> list[CsvRowArtifact]: artifacts_text = self.chunk_joiner.join([a.value for a in artifacts]) - full_text = self.template_generator.render( - column_names=column_names, - text=artifacts_text, + system_prompt = self.system_template_generator.render( + column_names=self.column_names, rulesets=J2("rulesets/rulesets.j2").render(rulesets=rulesets), ) + user_prompt = self.user_template_generator.render( + text=artifacts_text, + ) - if self.prompt_driver.tokenizer.count_input_tokens_left(full_text) >= self.min_response_tokens: + if ( + self.prompt_driver.tokenizer.count_input_tokens_left(system_prompt + user_prompt) + >= self.min_response_tokens + ): rows.extend( self.text_to_csv_rows( - self.prompt_driver.run(PromptStack(messages=[Message(full_text, role=Message.USER_ROLE)])).value, - column_names, + self.prompt_driver.run( + PromptStack( + messages=[ + Message(system_prompt, role=Message.SYSTEM_ROLE), + Message(user_prompt, role=Message.USER_ROLE), + ] + ) + ).value, + self.column_names, ), ) return rows else: chunks = self.chunker.chunk(artifacts_text) - partial_text = self.template_generator.render( - column_names=column_names, + partial_text = self.user_template_generator.render( text=chunks[0].value, - rulesets=J2("rulesets/rulesets.j2").render(rulesets=rulesets), ) rows.extend( self.text_to_csv_rows( - self.prompt_driver.run(PromptStack(messages=[Message(partial_text, role=Message.USER_ROLE)])).value, - column_names, + self.prompt_driver.run( + PromptStack( + messages=[ + Message(system_prompt, role=Message.SYSTEM_ROLE), + Message(partial_text, role=Message.USER_ROLE), + ] + ) + ).value, + self.column_names, ), ) - return self._extract_rec(chunks[1:], column_names, rows, rulesets=rulesets) + return self._extract_rec(chunks[1:], rows, rulesets=rulesets) diff --git a/griptape/engines/extraction/json_extraction_engine.py b/griptape/engines/extraction/json_extraction_engine.py index 436fc093f..8f2f4a3fe 100644 --- a/griptape/engines/extraction/json_extraction_engine.py +++ b/griptape/engines/extraction/json_extraction_engine.py @@ -1,6 +1,7 @@ from __future__ import annotations import json +import re from typing import TYPE_CHECKING, Optional, cast from attrs import Factory, define, field @@ -17,25 +18,25 @@ @define class JsonExtractionEngine(BaseExtractionEngine): - template_generator: J2 = field(default=Factory(lambda: J2("engines/extraction/json_extraction.j2")), kw_only=True) + JSON_PATTERN = r"(?s)[^\[]*(\[.*\])" + + template_schema: dict = field(default=Factory(dict), kw_only=True) + system_template_generator: J2 = field( + default=Factory(lambda: J2("engines/extraction/json/system.j2")), kw_only=True + ) + user_template_generator: J2 = field(default=Factory(lambda: J2("engines/extraction/json/user.j2")), kw_only=True) def extract( self, text: str | ListArtifact, *, rulesets: Optional[list[Ruleset]] = None, - template_schema: Optional[list[dict]] = None, **kwargs, ) -> ListArtifact | ErrorArtifact: - if template_schema is None: - template_schema = [] try: - json_schema = json.dumps(template_schema) - return ListArtifact( self._extract_rec( cast(list[TextArtifact], text.value) if isinstance(text, ListArtifact) else [TextArtifact(text)], - json_schema, [], rulesets=rulesets, ), @@ -45,42 +46,64 @@ def extract( return ErrorArtifact(f"error extracting JSON: {e}") def json_to_text_artifacts(self, json_input: str) -> list[TextArtifact]: - return [TextArtifact(json.dumps(e)) for e in json.loads(json_input)] + json_matches = re.findall(self.JSON_PATTERN, json_input, re.DOTALL) + + if json_matches: + return [TextArtifact(json.dumps(e)) for e in json.loads(json_matches[-1])] + else: + return [] def _extract_rec( self, artifacts: list[TextArtifact], - json_template_schema: str, extractions: list[TextArtifact], + *, rulesets: Optional[list[Ruleset]] = None, ) -> list[TextArtifact]: artifacts_text = self.chunk_joiner.join([a.value for a in artifacts]) - full_text = self.template_generator.render( - json_template_schema=json_template_schema, - text=artifacts_text, + system_prompt = self.system_template_generator.render( + json_template_schema=json.dumps(self.template_schema), rulesets=J2("rulesets/rulesets.j2").render(rulesets=rulesets), ) + user_prompt = self.user_template_generator.render( + text=artifacts_text, + ) - if self.prompt_driver.tokenizer.count_input_tokens_left(full_text) >= self.min_response_tokens: + if ( + self.prompt_driver.tokenizer.count_input_tokens_left(user_prompt + system_prompt) + >= self.min_response_tokens + ): extractions.extend( self.json_to_text_artifacts( - self.prompt_driver.run(PromptStack(messages=[Message(full_text, role=Message.USER_ROLE)])).value, + self.prompt_driver.run( + PromptStack( + messages=[ + Message(system_prompt, role=Message.SYSTEM_ROLE), + Message(user_prompt, role=Message.USER_ROLE), + ] + ) + ).value ), ) return extractions else: chunks = self.chunker.chunk(artifacts_text) - partial_text = self.template_generator.render( - template_schema=json_template_schema, + partial_text = self.user_template_generator.render( text=chunks[0].value, - rulesets=J2("rulesets/rulesets.j2").render(rulesets=rulesets), ) extractions.extend( self.json_to_text_artifacts( - self.prompt_driver.run(PromptStack(messages=[Message(partial_text, role=Message.USER_ROLE)])).value, + self.prompt_driver.run( + PromptStack( + messages=[ + Message(system_prompt, role=Message.SYSTEM_ROLE), + Message(partial_text, role=Message.USER_ROLE), + ] + ) + ).value, ), ) - return self._extract_rec(chunks[1:], json_template_schema, extractions, rulesets=rulesets) + return self._extract_rec(chunks[1:], extractions, rulesets=rulesets) diff --git a/griptape/engines/image/base_image_generation_engine.py b/griptape/engines/image/base_image_generation_engine.py index 47a853871..5fdc60531 100644 --- a/griptape/engines/image/base_image_generation_engine.py +++ b/griptape/engines/image/base_image_generation_engine.py @@ -3,7 +3,9 @@ from abc import ABC, abstractmethod from typing import TYPE_CHECKING, Optional -from attrs import define, field +from attrs import Factory, define, field + +from griptape.configs import Defaults if TYPE_CHECKING: from griptape.artifacts import ImageArtifact @@ -13,7 +15,9 @@ @define class BaseImageGenerationEngine(ABC): - image_generation_driver: BaseImageGenerationDriver = field(kw_only=True) + image_generation_driver: BaseImageGenerationDriver = field( + kw_only=True, default=Factory(lambda: Defaults.drivers_config.image_generation_driver) + ) @abstractmethod def run(self, prompts: list[str], *args, rulesets: Optional[list[Ruleset]], **kwargs) -> ImageArtifact: ... diff --git a/griptape/engines/image_query/image_query_engine.py b/griptape/engines/image_query/image_query_engine.py index d0a1e99d4..348017e64 100644 --- a/griptape/engines/image_query/image_query_engine.py +++ b/griptape/engines/image_query/image_query_engine.py @@ -2,7 +2,9 @@ from typing import TYPE_CHECKING -from attrs import define, field +from attrs import Factory, define, field + +from griptape.configs import Defaults if TYPE_CHECKING: from griptape.artifacts import ImageArtifact, TextArtifact @@ -11,7 +13,9 @@ @define class ImageQueryEngine: - image_query_driver: BaseImageQueryDriver = field(kw_only=True) + image_query_driver: BaseImageQueryDriver = field( + default=Factory(lambda: Defaults.drivers_config.image_query_driver), kw_only=True + ) def run(self, query: str, images: list[ImageArtifact]) -> TextArtifact: return self.image_query_driver.query(query, images) diff --git a/griptape/engines/rag/modules/__init__.py b/griptape/engines/rag/modules/__init__.py index 4a180490d..ace9f4a3b 100644 --- a/griptape/engines/rag/modules/__init__.py +++ b/griptape/engines/rag/modules/__init__.py @@ -1,5 +1,6 @@ from .base_rag_module import BaseRagModule from .query.base_query_rag_module import BaseQueryRagModule +from .query.translate_query_rag_module import TranslateQueryRagModule from .retrieval.base_retrieval_rag_module import BaseRetrievalRagModule from .retrieval.base_rerank_rag_module import BaseRerankRagModule from .retrieval.text_chunks_rerank_rag_module import TextChunksRerankRagModule @@ -9,14 +10,13 @@ from .response.base_after_response_rag_module import BaseAfterResponseRagModule from .response.base_response_rag_module import BaseResponseRagModule from .response.prompt_response_rag_module import PromptResponseRagModule -from .response.rulesets_before_response_rag_module import RulesetsBeforeResponseRagModule -from .response.metadata_before_response_rag_module import MetadataBeforeResponseRagModule from .response.text_chunks_response_rag_module import TextChunksResponseRagModule from .response.footnote_prompt_response_rag_module import FootnotePromptResponseRagModule __all__ = [ "BaseRagModule", "BaseQueryRagModule", + "TranslateQueryRagModule", "BaseRetrievalRagModule", "BaseRerankRagModule", "TextChunksRerankRagModule", @@ -26,8 +26,6 @@ "BaseAfterResponseRagModule", "BaseResponseRagModule", "PromptResponseRagModule", - "RulesetsBeforeResponseRagModule", - "MetadataBeforeResponseRagModule", "TextChunksResponseRagModule", "FootnotePromptResponseRagModule", ] diff --git a/griptape/engines/rag/modules/base_rag_module.py b/griptape/engines/rag/modules/base_rag_module.py index f2c8316a8..668b3aced 100644 --- a/griptape/engines/rag/modules/base_rag_module.py +++ b/griptape/engines/rag/modules/base_rag_module.py @@ -1,28 +1,33 @@ from __future__ import annotations +import uuid from abc import ABC -from concurrent import futures -from typing import TYPE_CHECKING, Any, Callable, Optional +from typing import TYPE_CHECKING, Any, Optional from attrs import Factory, define, field from griptape.common import Message, PromptStack +from griptape.mixins import FuturesExecutorMixin if TYPE_CHECKING: from griptape.engines.rag import RagContext @define(kw_only=True) -class BaseRagModule(ABC): - name: str = field(default=Factory(lambda self: self.__class__.__name__, takes_self=True), kw_only=True) - futures_executor_fn: Callable[[], futures.Executor] = field( - default=Factory(lambda: lambda: futures.ThreadPoolExecutor()), +class BaseRagModule(FuturesExecutorMixin, ABC): + name: str = field( + default=Factory(lambda self: f"{self.__class__.__name__}-{uuid.uuid4().hex}", takes_self=True), kw_only=True ) - def generate_query_prompt_stack(self, system_prompt: str, query: str) -> PromptStack: - return PromptStack( - messages=[Message(system_prompt, role=Message.SYSTEM_ROLE), Message(query, role=Message.USER_ROLE)], - ) + def generate_prompt_stack(self, system_prompt: Optional[str], query: str) -> PromptStack: + messages = [] + + if system_prompt is not None: + messages.append(Message(system_prompt, role=Message.SYSTEM_ROLE)) + + messages.append(Message(query, role=Message.USER_ROLE)) + + return PromptStack(messages=messages) def get_context_param(self, context: RagContext, key: str) -> Optional[Any]: return context.module_configs.get(self.name, {}).get(key) diff --git a/griptape/engines/rag/modules/query/translate_query_rag_module.py b/griptape/engines/rag/modules/query/translate_query_rag_module.py new file mode 100644 index 000000000..f1f9ca0ec --- /dev/null +++ b/griptape/engines/rag/modules/query/translate_query_rag_module.py @@ -0,0 +1,32 @@ +from __future__ import annotations + +from typing import TYPE_CHECKING, Callable + +from attrs import Factory, define, field + +from griptape.engines.rag.modules import BaseQueryRagModule +from griptape.utils import J2 + +if TYPE_CHECKING: + from griptape.drivers import BasePromptDriver + from griptape.engines.rag import RagContext + + +@define(kw_only=True) +class TranslateQueryRagModule(BaseQueryRagModule): + prompt_driver: BasePromptDriver = field() + language: str = field() + generate_user_template: Callable[[str, str], str] = field( + default=Factory(lambda self: self.default_user_template_generator, takes_self=True), + ) + + def run(self, context: RagContext) -> RagContext: + user_prompt = self.generate_user_template(context.query, self.language) + output = self.prompt_driver.run(self.generate_prompt_stack(None, user_prompt)).to_artifact() + + context.query = output.to_text() + + return context + + def default_user_template_generator(self, query: str, language: str) -> str: + return J2("engines/rag/modules/query/translate/user.j2").render(query=query, language=language) diff --git a/griptape/engines/rag/modules/response/base_response_rag_module.py b/griptape/engines/rag/modules/response/base_response_rag_module.py index 30ab82201..1bd3ddeb7 100644 --- a/griptape/engines/rag/modules/response/base_response_rag_module.py +++ b/griptape/engines/rag/modules/response/base_response_rag_module.py @@ -2,6 +2,7 @@ from attrs import define +from griptape.artifacts import BaseArtifact from griptape.engines.rag import RagContext from griptape.engines.rag.modules import BaseRagModule @@ -9,4 +10,4 @@ @define(kw_only=True) class BaseResponseRagModule(BaseRagModule, ABC): @abstractmethod - def run(self, context: RagContext) -> RagContext: ... + def run(self, context: RagContext) -> BaseArtifact: ... diff --git a/griptape/engines/rag/modules/response/metadata_before_response_rag_module.py b/griptape/engines/rag/modules/response/metadata_before_response_rag_module.py deleted file mode 100644 index d2d546213..000000000 --- a/griptape/engines/rag/modules/response/metadata_before_response_rag_module.py +++ /dev/null @@ -1,25 +0,0 @@ -from __future__ import annotations - -from typing import TYPE_CHECKING, Optional - -from attrs import define, field - -from griptape.engines.rag.modules import BaseBeforeResponseRagModule -from griptape.utils import J2 - -if TYPE_CHECKING: - from griptape.engines.rag import RagContext - - -@define(kw_only=True) -class MetadataBeforeResponseRagModule(BaseBeforeResponseRagModule): - metadata: Optional[str] = field(default=None) - - def run(self, context: RagContext) -> RagContext: - context_metadata = self.get_context_param(context, "metadata") - metadata = self.metadata if context_metadata is None else context_metadata - - if metadata is not None: - context.before_query.append(J2("engines/rag/modules/response/metadata/system.j2").render(metadata=metadata)) - - return context diff --git a/griptape/engines/rag/modules/response/prompt_response_rag_module.py b/griptape/engines/rag/modules/response/prompt_response_rag_module.py index 0b7cbd953..78dfba8f4 100644 --- a/griptape/engines/rag/modules/response/prompt_response_rag_module.py +++ b/griptape/engines/rag/modules/response/prompt_response_rag_module.py @@ -1,27 +1,31 @@ from __future__ import annotations -from typing import TYPE_CHECKING, Callable +from typing import TYPE_CHECKING, Any, Callable, Optional from attrs import Factory, define, field from griptape.artifacts.text_artifact import TextArtifact +from griptape.configs import Defaults from griptape.engines.rag.modules import BaseResponseRagModule +from griptape.mixins import RuleMixin from griptape.utils import J2 if TYPE_CHECKING: + from griptape.artifacts import BaseArtifact from griptape.drivers import BasePromptDriver from griptape.engines.rag import RagContext @define(kw_only=True) -class PromptResponseRagModule(BaseResponseRagModule): +class PromptResponseRagModule(BaseResponseRagModule, RuleMixin): + prompt_driver: BasePromptDriver = field(default=Factory(lambda: Defaults.drivers_config.prompt_driver)) answer_token_offset: int = field(default=400) - prompt_driver: BasePromptDriver = field() + metadata: Optional[str] = field(default=None) generate_system_template: Callable[[RagContext, list[TextArtifact]], str] = field( default=Factory(lambda self: self.default_system_template_generator, takes_self=True), ) - def run(self, context: RagContext) -> RagContext: + def run(self, context: RagContext) -> BaseArtifact: query = context.query tokenizer = self.prompt_driver.tokenizer included_chunks = [] @@ -32,7 +36,7 @@ def run(self, context: RagContext) -> RagContext: system_prompt = self.generate_system_template(context, included_chunks) message_token_count = self.prompt_driver.tokenizer.count_tokens( - self.prompt_driver.prompt_stack_to_string(self.generate_query_prompt_stack(system_prompt, query)), + self.prompt_driver.prompt_stack_to_string(self.generate_prompt_stack(system_prompt, query)), ) if message_token_count + self.answer_token_offset >= tokenizer.max_input_tokens: @@ -42,18 +46,20 @@ def run(self, context: RagContext) -> RagContext: break - output = self.prompt_driver.run(self.generate_query_prompt_stack(system_prompt, query)).to_artifact() + output = self.prompt_driver.run(self.generate_prompt_stack(system_prompt, query)).to_artifact() if isinstance(output, TextArtifact): - context.output = output + return output else: raise ValueError("Prompt driver did not return a TextArtifact") - return context - def default_system_template_generator(self, context: RagContext, artifacts: list[TextArtifact]) -> str: - return J2("engines/rag/modules/response/prompt/system.j2").render( - text_chunks=[c.to_text() for c in artifacts], - before_system_prompt="\n\n".join(context.before_query), - after_system_prompt="\n\n".join(context.after_query), - ) + params: dict[str, Any] = {"text_chunks": [c.to_text() for c in artifacts]} + + if len(self.all_rulesets) > 0: + params["rulesets"] = J2("rulesets/rulesets.j2").render(rulesets=self.all_rulesets) + + if self.metadata is not None: + params["metadata"] = J2("engines/rag/modules/response/metadata/system.j2").render(metadata=self.metadata) + + return J2("engines/rag/modules/response/prompt/system.j2").render(**params) diff --git a/griptape/engines/rag/modules/response/rulesets_before_response_rag_module.py b/griptape/engines/rag/modules/response/rulesets_before_response_rag_module.py deleted file mode 100644 index 81b8410ce..000000000 --- a/griptape/engines/rag/modules/response/rulesets_before_response_rag_module.py +++ /dev/null @@ -1,22 +0,0 @@ -from __future__ import annotations - -from typing import TYPE_CHECKING - -from attrs import define, field - -from griptape.engines.rag.modules import BaseBeforeResponseRagModule -from griptape.utils import J2 - -if TYPE_CHECKING: - from griptape.engines.rag import RagContext - from griptape.rules import Ruleset - - -@define -class RulesetsBeforeResponseRagModule(BaseBeforeResponseRagModule): - rulesets: list[Ruleset] = field(kw_only=True) - - def run(self, context: RagContext) -> RagContext: - context.before_query.append(J2("rulesets/rulesets.j2").render(rulesets=self.rulesets)) - - return context diff --git a/griptape/engines/rag/modules/response/text_chunks_response_rag_module.py b/griptape/engines/rag/modules/response/text_chunks_response_rag_module.py index fd57b3905..35da0592b 100644 --- a/griptape/engines/rag/modules/response/text_chunks_response_rag_module.py +++ b/griptape/engines/rag/modules/response/text_chunks_response_rag_module.py @@ -1,13 +1,11 @@ from attrs import define -from griptape.artifacts import ListArtifact +from griptape.artifacts import BaseArtifact, ListArtifact from griptape.engines.rag import RagContext from griptape.engines.rag.modules import BaseResponseRagModule @define(kw_only=True) class TextChunksResponseRagModule(BaseResponseRagModule): - def run(self, context: RagContext) -> RagContext: - context.output = ListArtifact(context.text_chunks) - - return context + def run(self, context: RagContext) -> BaseArtifact: + return ListArtifact(context.text_chunks) diff --git a/griptape/engines/rag/modules/retrieval/text_loader_retrieval_rag_module.py b/griptape/engines/rag/modules/retrieval/text_loader_retrieval_rag_module.py index b79668583..4f53cc5f9 100644 --- a/griptape/engines/rag/modules/retrieval/text_loader_retrieval_rag_module.py +++ b/griptape/engines/rag/modules/retrieval/text_loader_retrieval_rag_module.py @@ -40,7 +40,6 @@ def run(self, context: RagContext) -> Sequence[TextArtifact]: if isinstance(loader_output, ErrorArtifact): raise Exception(loader_output.to_text() if loader_output.exception is None else loader_output.exception) - else: - self.vector_store_driver.upsert_text_artifacts({namespace: loader_output}) + self.vector_store_driver.upsert_text_artifacts({namespace: loader_output}) - return self.process_query_output_fn(self.vector_store_driver.query(context.query, **query_params)) + return self.process_query_output_fn(self.vector_store_driver.query(context.query, **query_params)) diff --git a/griptape/engines/rag/modules/retrieval/vector_store_retrieval_rag_module.py b/griptape/engines/rag/modules/retrieval/vector_store_retrieval_rag_module.py index 0a07b4c50..ddff2549c 100644 --- a/griptape/engines/rag/modules/retrieval/vector_store_retrieval_rag_module.py +++ b/griptape/engines/rag/modules/retrieval/vector_store_retrieval_rag_module.py @@ -5,6 +5,7 @@ from attrs import Factory, define, field from griptape import utils +from griptape.configs import Defaults from griptape.engines.rag.modules import BaseRetrievalRagModule if TYPE_CHECKING: @@ -17,7 +18,9 @@ @define(kw_only=True) class VectorStoreRetrievalRagModule(BaseRetrievalRagModule): - vector_store_driver: BaseVectorStoreDriver = field() + vector_store_driver: BaseVectorStoreDriver = field( + default=Factory(lambda: Defaults.drivers_config.vector_store_driver) + ) query_params: dict[str, Any] = field(factory=dict) process_query_output_fn: Callable[[list[BaseVectorStoreDriver.Entry]], Sequence[TextArtifact]] = field( default=Factory(lambda: lambda es: [e.to_artifact() for e in es]), diff --git a/griptape/engines/rag/rag_context.py b/griptape/engines/rag/rag_context.py index 1b0496d72..3dbfc6834 100644 --- a/griptape/engines/rag/rag_context.py +++ b/griptape/engines/rag/rag_context.py @@ -1,6 +1,6 @@ from __future__ import annotations -from typing import TYPE_CHECKING, Optional +from typing import TYPE_CHECKING from attrs import define, field @@ -18,11 +18,11 @@ class RagContext(SerializableMixin): Attributes: query: Query provided by the user. - module_configs: Dictionary of module configs. First key should be a module name and the second a dictionary of config parameters. - before_query: An optional list of strings to add before the query in generation modules. - after_query: An optional list of strings to add after the query in generation modules. - text_chunks: A list of text chunks to pass around from the retrieval stage to the generation stage. - output: Final output from the generation stage. + module_configs: Dictionary of module configs. First key should be a module name and the second a dictionary of configs parameters. + before_query: An optional list of strings to add before the query in response modules. + after_query: An optional list of strings to add after the query in response modules. + text_chunks: A list of text chunks to pass around from the retrieval stage to the response stage. + outputs: List of outputs from the response stage. """ query: str = field(metadata={"serializable": True}) @@ -30,7 +30,7 @@ class RagContext(SerializableMixin): before_query: list[str] = field(factory=list, metadata={"serializable": True}) after_query: list[str] = field(factory=list, metadata={"serializable": True}) text_chunks: list[TextArtifact] = field(factory=list, metadata={"serializable": True}) - output: Optional[BaseArtifact] = field(default=None, metadata={"serializable": True}) + outputs: list[BaseArtifact] = field(factory=list, metadata={"serializable": True}) def get_references(self) -> list[Reference]: return utils.references_from_artifacts(self.text_chunks) diff --git a/griptape/engines/rag/stages/base_rag_stage.py b/griptape/engines/rag/stages/base_rag_stage.py index 4f5a9bcd1..6a28551b4 100644 --- a/griptape/engines/rag/stages/base_rag_stage.py +++ b/griptape/engines/rag/stages/base_rag_stage.py @@ -1,20 +1,15 @@ from abc import ABC, abstractmethod from collections.abc import Sequence -from concurrent import futures -from typing import Callable -from attrs import Factory, define, field +from attrs import define from griptape.engines.rag import RagContext from griptape.engines.rag.modules import BaseRagModule +from griptape.mixins import FuturesExecutorMixin @define(kw_only=True) -class BaseRagStage(ABC): - futures_executor_fn: Callable[[], futures.Executor] = field( - default=Factory(lambda: lambda: futures.ThreadPoolExecutor()), - ) - +class BaseRagStage(FuturesExecutorMixin, ABC): @abstractmethod def run(self, context: RagContext) -> RagContext: ... diff --git a/griptape/engines/rag/stages/query_rag_stage.py b/griptape/engines/rag/stages/query_rag_stage.py index 7340e4aed..ebde3170c 100644 --- a/griptape/engines/rag/stages/query_rag_stage.py +++ b/griptape/engines/rag/stages/query_rag_stage.py @@ -5,7 +5,6 @@ from attrs import define, field -from griptape import utils from griptape.engines.rag.stages import BaseRagStage if TYPE_CHECKING: @@ -24,9 +23,8 @@ def modules(self) -> Sequence[BaseRagModule]: return self.query_modules def run(self, context: RagContext) -> RagContext: - logging.info("QueryStage: running %s query generation modules in parallel", len(self.query_modules)) + logging.info("QueryRagStage: running %s query generation modules sequentially", len(self.query_modules)) - with self.futures_executor_fn() as executor: - utils.execute_futures_list([executor.submit(r.run, context) for r in self.query_modules]) + [qm.run(context) for qm in self.query_modules] return context diff --git a/griptape/engines/rag/stages/response_rag_stage.py b/griptape/engines/rag/stages/response_rag_stage.py index b63b5bc21..de286317c 100644 --- a/griptape/engines/rag/stages/response_rag_stage.py +++ b/griptape/engines/rag/stages/response_rag_stage.py @@ -5,13 +5,12 @@ from attrs import define, field +from griptape import utils from griptape.engines.rag.stages import BaseRagStage if TYPE_CHECKING: from griptape.engines.rag import RagContext from griptape.engines.rag.modules import ( - BaseAfterResponseRagModule, - BaseBeforeResponseRagModule, BaseRagModule, BaseResponseRagModule, ) @@ -19,35 +18,23 @@ @define(kw_only=True) class ResponseRagStage(BaseRagStage): - before_response_modules: list[BaseBeforeResponseRagModule] = field(factory=list) - response_module: BaseResponseRagModule = field() - after_response_modules: list[BaseAfterResponseRagModule] = field(factory=list) + response_modules: list[BaseResponseRagModule] = field() @property def modules(self) -> list[BaseRagModule]: ms = [] - ms.extend(self.before_response_modules) - ms.extend(self.after_response_modules) - - if self.response_module is not None: - ms.append(self.response_module) + ms.extend(self.response_modules) return ms def run(self, context: RagContext) -> RagContext: - logging.info("GenerationStage: running %s before modules sequentially", len(self.before_response_modules)) - - for generator in self.before_response_modules: - context = generator.run(context) - - logging.info("GenerationStage: running generation module") - - context = self.response_module.run(context) + logging.info("ResponseRagStage: running %s retrieval modules in parallel", len(self.response_modules)) - logging.info("GenerationStage: running %s after modules sequentially", len(self.after_response_modules)) + results = utils.execute_futures_list( + [self.futures_executor.submit(r.run, context) for r in self.response_modules] + ) - for generator in self.after_response_modules: - context = generator.run(context) + context.outputs = results return context diff --git a/griptape/engines/rag/stages/retrieval_rag_stage.py b/griptape/engines/rag/stages/retrieval_rag_stage.py index 50b84abfc..6ce9fb19f 100644 --- a/griptape/engines/rag/stages/retrieval_rag_stage.py +++ b/griptape/engines/rag/stages/retrieval_rag_stage.py @@ -33,10 +33,11 @@ def modules(self) -> list[BaseRagModule]: return ms def run(self, context: RagContext) -> RagContext: - logging.info("RetrievalStage: running %s retrieval modules in parallel", len(self.retrieval_modules)) + logging.info("RetrievalRagStage: running %s retrieval modules in parallel", len(self.retrieval_modules)) - with self.futures_executor_fn() as executor: - results = utils.execute_futures_list([executor.submit(r.run, context) for r in self.retrieval_modules]) + results = utils.execute_futures_list( + [self.futures_executor.submit(r.run, context) for r in self.retrieval_modules] + ) # flatten the list of lists results = list(itertools.chain.from_iterable(results)) @@ -47,7 +48,7 @@ def run(self, context: RagContext) -> RagContext: chunks_after_dedup = len(results) logging.info( - "RetrievalStage: deduplicated %s " "chunks (%s - %s)", + "RetrievalRagStage: deduplicated %s " "chunks (%s - %s)", chunks_before_dedup - chunks_after_dedup, chunks_before_dedup, chunks_after_dedup, @@ -56,7 +57,7 @@ def run(self, context: RagContext) -> RagContext: context.text_chunks = [a for a in results if isinstance(a, TextArtifact)] if self.rerank_module: - logging.info("RetrievalStage: running rerank module on %s chunks", chunks_after_dedup) + logging.info("RetrievalRagStage: running rerank module on %s chunks", chunks_after_dedup) context.text_chunks = [a for a in self.rerank_module.run(context) if isinstance(a, TextArtifact)] diff --git a/griptape/engines/summary/prompt_summary_engine.py b/griptape/engines/summary/prompt_summary_engine.py index c5d8e695d..99e133844 100644 --- a/griptape/engines/summary/prompt_summary_engine.py +++ b/griptape/engines/summary/prompt_summary_engine.py @@ -6,8 +6,8 @@ from griptape.artifacts import ListArtifact, TextArtifact from griptape.chunkers import BaseChunker, TextChunker -from griptape.common import PromptStack -from griptape.common.prompt_stack.messages.message import Message +from griptape.common import Message, PromptStack +from griptape.configs import Defaults from griptape.engines import BaseSummaryEngine from griptape.utils import J2 @@ -22,7 +22,9 @@ class PromptSummaryEngine(BaseSummaryEngine): max_token_multiplier: float = field(default=0.5, kw_only=True) system_template_generator: J2 = field(default=Factory(lambda: J2("engines/summary/system.j2")), kw_only=True) user_template_generator: J2 = field(default=Factory(lambda: J2("engines/summary/user.j2")), kw_only=True) - prompt_driver: BasePromptDriver = field(kw_only=True) + prompt_driver: BasePromptDriver = field( + default=Factory(lambda: Defaults.drivers_config.prompt_driver), kw_only=True + ) chunker: BaseChunker = field( default=Factory( lambda self: TextChunker(tokenizer=self.prompt_driver.tokenizer, max_tokens=self.max_chunker_tokens), @@ -35,7 +37,7 @@ class PromptSummaryEngine(BaseSummaryEngine): def validate_allowlist(self, _: Attribute, max_token_multiplier: int) -> None: if max_token_multiplier > 1: raise ValueError("has to be less than or equal to 1") - elif max_token_multiplier <= 0: + if max_token_multiplier <= 0: raise ValueError("has to be greater than 0") @property diff --git a/griptape/events/__init__.py b/griptape/events/__init__.py index 944a309eb..b3e2f3a79 100644 --- a/griptape/events/__init__.py +++ b/griptape/events/__init__.py @@ -22,6 +22,7 @@ from .base_audio_transcription_event import BaseAudioTranscriptionEvent from .start_audio_transcription_event import StartAudioTranscriptionEvent from .finish_audio_transcription_event import FinishAudioTranscriptionEvent +from .event_bus import EventBus __all__ = [ "BaseEvent", @@ -48,4 +49,5 @@ "BaseAudioTranscriptionEvent", "StartAudioTranscriptionEvent", "FinishAudioTranscriptionEvent", + "EventBus", ] diff --git a/griptape/mixins/event_publisher_mixin.py b/griptape/events/event_bus.py similarity index 56% rename from griptape/mixins/event_publisher_mixin.py rename to griptape/events/event_bus.py index 67a302ed6..3ddc325ff 100644 --- a/griptape/mixins/event_publisher_mixin.py +++ b/griptape/events/event_bus.py @@ -4,13 +4,19 @@ from attrs import define, field +from griptape.mixins.singleton_mixin import SingletonMixin + if TYPE_CHECKING: from griptape.events import BaseEvent, EventListener @define -class EventPublisherMixin: - event_listeners: list[EventListener] = field(factory=list, kw_only=True) +class _EventBus(SingletonMixin): + _event_listeners: list[EventListener] = field(factory=list, kw_only=True, alias="_event_listeners") + + @property + def event_listeners(self) -> list[EventListener]: + return self._event_listeners def add_event_listeners(self, event_listeners: list[EventListener]) -> list[EventListener]: return [self.add_event_listener(event_listener) for event_listener in event_listeners] @@ -20,15 +26,21 @@ def remove_event_listeners(self, event_listeners: list[EventListener]) -> None: self.remove_event_listener(event_listener) def add_event_listener(self, event_listener: EventListener) -> EventListener: - if event_listener not in self.event_listeners: - self.event_listeners.append(event_listener) + if event_listener not in self._event_listeners: + self._event_listeners.append(event_listener) return event_listener def remove_event_listener(self, event_listener: EventListener) -> None: - if event_listener in self.event_listeners: - self.event_listeners.remove(event_listener) + if event_listener in self._event_listeners: + self._event_listeners.remove(event_listener) def publish_event(self, event: BaseEvent, *, flush: bool = False) -> None: - for event_listener in self.event_listeners: + for event_listener in self._event_listeners: event_listener.publish_event(event, flush=flush) + + def clear_event_listeners(self) -> None: + self._event_listeners.clear() + + +EventBus = _EventBus() diff --git a/griptape/exceptions/dummy_exception.py b/griptape/exceptions/dummy_exception.py index 815cb245f..0020ce547 100644 --- a/griptape/exceptions/dummy_exception.py +++ b/griptape/exceptions/dummy_exception.py @@ -2,7 +2,7 @@ class DummyError(Exception): def __init__(self, dummy_class_name: str, dummy_method_name: str) -> None: message = ( f"You have attempted to use a {dummy_class_name}'s {dummy_method_name} method. " - "This likely originated from using a `StructureConfig` without providing a Driver required for this feature." + "This likely originated from using a `DriversConfig` without providing a Driver required for this feature." ) super().__init__(message) diff --git a/griptape/loaders/audio_loader.py b/griptape/loaders/audio_loader.py index 532662e79..84d6b767a 100644 --- a/griptape/loaders/audio_loader.py +++ b/griptape/loaders/audio_loader.py @@ -14,9 +14,7 @@ class AudioLoader(BaseLoader): """Loads audio content into audio artifacts.""" def load(self, source: bytes, *args, **kwargs) -> AudioArtifact: - audio_artifact = AudioArtifact(source, format=import_optional_dependency("filetype").guess(source).extension) - - return audio_artifact + return AudioArtifact(source, format=import_optional_dependency("filetype").guess(source).extension) def load_collection(self, sources: list[bytes], *args, **kwargs) -> dict[str, AudioArtifact]: return cast(dict[str, AudioArtifact], super().load_collection(sources, *args, **kwargs)) diff --git a/griptape/loaders/base_loader.py b/griptape/loaders/base_loader.py index 09551d9ab..525b4df0a 100644 --- a/griptape/loaders/base_loader.py +++ b/griptape/loaders/base_loader.py @@ -1,11 +1,11 @@ from __future__ import annotations from abc import ABC, abstractmethod -from concurrent import futures -from typing import TYPE_CHECKING, Any, Callable, Optional +from typing import TYPE_CHECKING, Any, Optional -from attrs import Factory, define, field +from attrs import define, field +from griptape.mixins import FuturesExecutorMixin from griptape.utils.futures import execute_futures_dict from griptape.utils.hash import bytes_to_hash, str_to_hash @@ -16,11 +16,7 @@ @define -class BaseLoader(ABC): - futures_executor_fn: Callable[[], futures.Executor] = field( - default=Factory(lambda: lambda: futures.ThreadPoolExecutor()), - kw_only=True, - ) +class BaseLoader(FuturesExecutorMixin, ABC): encoding: Optional[str] = field(default=None, kw_only=True) @abstractmethod @@ -36,10 +32,12 @@ def load_collection( # to avoid duplicate work. sources_by_key = {self.to_key(source): source for source in sources} - with self.futures_executor_fn() as executor: - return execute_futures_dict( - {key: executor.submit(self.load, source, *args, **kwargs) for key, source in sources_by_key.items()}, - ) + return execute_futures_dict( + { + key: self.futures_executor.submit(self.load, source, *args, **kwargs) + for key, source in sources_by_key.items() + }, + ) def to_key(self, source: Any, *args, **kwargs) -> str: if isinstance(source, bytes): diff --git a/griptape/loaders/image_loader.py b/griptape/loaders/image_loader.py index b7a277edb..83060dfa8 100644 --- a/griptape/loaders/image_loader.py +++ b/griptape/loaders/image_loader.py @@ -42,9 +42,7 @@ def load(self, source: bytes, *args, **kwargs) -> ImageArtifact: image = pil_image.open(byte_stream) source = byte_stream.getvalue() - image_artifact = ImageArtifact(source, format=image.format.lower(), width=image.width, height=image.height) - - return image_artifact + return ImageArtifact(source, format=image.format.lower(), width=image.width, height=image.height) def _get_mime_type(self, image_format: str | None) -> str: if image_format is None: diff --git a/griptape/memory/structure/base_conversation_memory.py b/griptape/memory/structure/base_conversation_memory.py index c3d3c501e..44c053dc4 100644 --- a/griptape/memory/structure/base_conversation_memory.py +++ b/griptape/memory/structure/base_conversation_memory.py @@ -3,9 +3,10 @@ from abc import ABC, abstractmethod from typing import TYPE_CHECKING, Optional -from attrs import define, field +from attrs import Factory, define, field from griptape.common import PromptStack +from griptape.configs import Defaults from griptape.mixins import SerializableMixin if TYPE_CHECKING: @@ -16,7 +17,9 @@ @define class BaseConversationMemory(SerializableMixin, ABC): - driver: Optional[BaseConversationMemoryDriver] = field(default=None, kw_only=True) + driver: Optional[BaseConversationMemoryDriver] = field( + default=Factory(lambda: Defaults.drivers_config.conversation_memory_driver), kw_only=True + ) runs: list[Run] = field(factory=list, kw_only=True, metadata={"serializable": True}) structure: Structure = field(init=False) autoload: bool = field(default=True, kw_only=True) @@ -64,7 +67,7 @@ def add_to_prompt_stack(self, prompt_stack: PromptStack, index: Optional[int] = if self.autoprune and hasattr(self, "structure"): should_prune = True - prompt_driver = self.structure.config.prompt_driver + prompt_driver = Defaults.drivers_config.prompt_driver temp_stack = PromptStack() # Try to determine how many Conversation Memory runs we can diff --git a/griptape/memory/structure/summary_conversation_memory.py b/griptape/memory/structure/summary_conversation_memory.py index f29bbb767..055057d34 100644 --- a/griptape/memory/structure/summary_conversation_memory.py +++ b/griptape/memory/structure/summary_conversation_memory.py @@ -5,8 +5,8 @@ from attrs import Factory, define, field -from griptape.common import PromptStack -from griptape.common.prompt_stack.messages.message import Message +from griptape.common import Message, PromptStack +from griptape.configs import Defaults from griptape.memory.structure import ConversationMemory from griptape.utils import J2 @@ -18,7 +18,9 @@ @define class SummaryConversationMemory(ConversationMemory): offset: int = field(default=1, kw_only=True, metadata={"serializable": True}) - _prompt_driver: BasePromptDriver = field(kw_only=True, default=None, alias="prompt_driver") + prompt_driver: BasePromptDriver = field( + kw_only=True, default=Factory(lambda: Defaults.drivers_config.prompt_driver) + ) summary: Optional[str] = field(default=None, kw_only=True, metadata={"serializable": True}) summary_index: int = field(default=0, kw_only=True, metadata={"serializable": True}) summary_template_generator: J2 = field(default=Factory(lambda: J2("memory/conversation/summary.j2")), kw_only=True) @@ -27,19 +29,6 @@ class SummaryConversationMemory(ConversationMemory): kw_only=True, ) - @property - def prompt_driver(self) -> BasePromptDriver: - if self._prompt_driver is None: - if self.structure is not None: - self._prompt_driver = self.structure.config.prompt_driver - else: - raise ValueError("Prompt Driver is not set.") - return self._prompt_driver - - @prompt_driver.setter - def prompt_driver(self, value: BasePromptDriver) -> None: - self._prompt_driver = value - def to_prompt_stack(self, last_n: Optional[int] = None) -> PromptStack: stack = PromptStack() if self.summary: diff --git a/griptape/memory/task/storage/base_artifact_storage.py b/griptape/memory/task/storage/base_artifact_storage.py index 866df19da..792f479bc 100644 --- a/griptape/memory/task/storage/base_artifact_storage.py +++ b/griptape/memory/task/storage/base_artifact_storage.py @@ -1,12 +1,12 @@ from __future__ import annotations from abc import ABC, abstractmethod -from typing import TYPE_CHECKING, Any +from typing import TYPE_CHECKING from attrs import define if TYPE_CHECKING: - from griptape.artifacts import BaseArtifact, InfoArtifact, ListArtifact, TextArtifact + from griptape.artifacts import BaseArtifact, ListArtifact @define @@ -19,9 +19,3 @@ def load_artifacts(self, namespace: str) -> ListArtifact: ... @abstractmethod def can_store(self, artifact: BaseArtifact) -> bool: ... - - @abstractmethod - def summarize(self, namespace: str) -> TextArtifact | InfoArtifact: ... - - @abstractmethod - def query(self, namespace: str, query: str, metadata: Any = None) -> BaseArtifact: ... diff --git a/griptape/memory/task/storage/blob_artifact_storage.py b/griptape/memory/task/storage/blob_artifact_storage.py index 6199dc3a3..3f4309481 100644 --- a/griptape/memory/task/storage/blob_artifact_storage.py +++ b/griptape/memory/task/storage/blob_artifact_storage.py @@ -1,10 +1,8 @@ from __future__ import annotations -from typing import Any - from attrs import define, field -from griptape.artifacts import BaseArtifact, BlobArtifact, InfoArtifact, ListArtifact +from griptape.artifacts import BaseArtifact, BlobArtifact, ListArtifact from griptape.memory.task.storage import BaseArtifactStorage @@ -26,9 +24,3 @@ def store_artifact(self, namespace: str, artifact: BaseArtifact) -> None: def load_artifacts(self, namespace: str) -> ListArtifact: return ListArtifact(next((blobs for key, blobs in self.blobs.items() if key == namespace), [])) - - def summarize(self, namespace: str) -> InfoArtifact: - return InfoArtifact("can't summarize artifacts") - - def query(self, namespace: str, query: str, metadata: Any = None) -> BaseArtifact: - return InfoArtifact("can't query artifacts") diff --git a/griptape/memory/task/storage/text_artifact_storage.py b/griptape/memory/task/storage/text_artifact_storage.py index 8e66c5aba..1560702fb 100644 --- a/griptape/memory/task/storage/text_artifact_storage.py +++ b/griptape/memory/task/storage/text_artifact_storage.py @@ -1,31 +1,22 @@ from __future__ import annotations -from typing import TYPE_CHECKING, Any, Optional +from typing import TYPE_CHECKING -from attrs import Attribute, define, field +from attrs import Factory, define, field -from griptape.artifacts import BaseArtifact, InfoArtifact, ListArtifact, TextArtifact -from griptape.engines.rag import RagContext, RagEngine +from griptape.artifacts import BaseArtifact, ListArtifact, TextArtifact +from griptape.configs import Defaults from griptape.memory.task.storage import BaseArtifactStorage if TYPE_CHECKING: from griptape.drivers import BaseVectorStoreDriver - from griptape.engines import BaseSummaryEngine, CsvExtractionEngine, JsonExtractionEngine @define(kw_only=True) class TextArtifactStorage(BaseArtifactStorage): - vector_store_driver: BaseVectorStoreDriver = field() - rag_engine: Optional[RagEngine] = field(default=None) - retrieval_rag_module_name: Optional[str] = field(default=None) - summary_engine: Optional[BaseSummaryEngine] = field(default=None) - csv_extraction_engine: Optional[CsvExtractionEngine] = field(default=None) - json_extraction_engine: Optional[JsonExtractionEngine] = field(default=None) - - @rag_engine.validator # pyright: ignore[reportAttributeAccessIssue] - def validate_rag_engine(self, _: Attribute, rag_engine: str) -> None: - if rag_engine is not None and self.retrieval_rag_module_name is None: - raise ValueError("You have to set retrieval_rag_module_name if rag_engine is provided") + vector_store_driver: BaseVectorStoreDriver = field( + default=Factory(lambda: Defaults.drivers_config.vector_store_driver) + ) def can_store(self, artifact: BaseArtifact) -> bool: return isinstance(artifact, TextArtifact) @@ -38,35 +29,3 @@ def store_artifact(self, namespace: str, artifact: BaseArtifact) -> None: def load_artifacts(self, namespace: str) -> ListArtifact: return self.vector_store_driver.load_artifacts(namespace=namespace) - - def summarize(self, namespace: str) -> TextArtifact: - if self.summary_engine is None: - raise ValueError("Summary engine is not set.") - - return self.summary_engine.summarize_artifacts(self.load_artifacts(namespace)) - - def query(self, namespace: str, query: str, metadata: Any = None) -> BaseArtifact: - if self.rag_engine is None: - raise ValueError("rag_engine is not set") - - if self.retrieval_rag_module_name is None: - raise ValueError("retrieval_rag_module_name is not set") - - result = self.rag_engine.process( - RagContext( - query=query, - module_configs={ - self.retrieval_rag_module_name: { - "query_params": { - "namespace": namespace, - "metadata": None if metadata is None else str(metadata), - }, - }, - }, - ), - ).output - - if result is None: - return InfoArtifact("Empty output") - else: - return result diff --git a/griptape/memory/task/task_memory.py b/griptape/memory/task/task_memory.py index e2131d1f0..c7f12b233 100644 --- a/griptape/memory/task/task_memory.py +++ b/griptape/memory/task/task_memory.py @@ -4,8 +4,9 @@ from attrs import Attribute, Factory, define, field -from griptape.artifacts import BaseArtifact, ErrorArtifact, InfoArtifact, ListArtifact, TextArtifact +from griptape.artifacts import BaseArtifact, BlobArtifact, ErrorArtifact, InfoArtifact, ListArtifact, TextArtifact from griptape.memory.meta import ActionSubtaskMetaEntry +from griptape.memory.task.storage import BlobArtifactStorage, TextArtifactStorage from griptape.mixins import ActivityMixin if TYPE_CHECKING: @@ -16,7 +17,15 @@ @define class TaskMemory(ActivityMixin): name: str = field(default=Factory(lambda self: self.__class__.__name__, takes_self=True), kw_only=True) - artifact_storages: dict[type, BaseArtifactStorage] = field(factory=dict, kw_only=True) + artifact_storages: dict[type, BaseArtifactStorage] = field( + default=Factory( + lambda: { + TextArtifact: TextArtifactStorage(), + BlobArtifact: BlobArtifactStorage(), + } + ), + kw_only=True, + ) namespace_storage: dict[str, BaseArtifactStorage] = field(factory=dict, kw_only=True) namespace_metadata: dict[str, Any] = field(factory=dict, kw_only=True) @@ -123,19 +132,3 @@ def find_input_memory(self, memory_name: str) -> Optional[TaskMemory]: return self else: return None - - def summarize_namespace(self, namespace: str) -> TextArtifact | InfoArtifact: - storage = self.namespace_storage.get(namespace) - - if storage: - return storage.summarize(namespace) - else: - return InfoArtifact("Can't find memory content") - - def query_namespace(self, namespace: str, query: str) -> BaseArtifact: - storage = self.namespace_storage.get(namespace) - - if storage: - return storage.query(namespace=namespace, query=query, metadata=self.namespace_metadata.get(namespace)) - else: - return InfoArtifact("Can't find memory content") diff --git a/griptape/mixins/__init__.py b/griptape/mixins/__init__.py index 944027c59..32e00dd8b 100644 --- a/griptape/mixins/__init__.py +++ b/griptape/mixins/__init__.py @@ -4,7 +4,8 @@ from .rule_mixin import RuleMixin from .serializable_mixin import SerializableMixin from .media_artifact_file_output_mixin import BlobArtifactFileOutputMixin -from .event_publisher_mixin import EventPublisherMixin +from .futures_executor_mixin import FuturesExecutorMixin +from .singleton_mixin import SingletonMixin __all__ = [ "ActivityMixin", @@ -13,5 +14,6 @@ "RuleMixin", "BlobArtifactFileOutputMixin", "SerializableMixin", - "EventPublisherMixin", + "FuturesExecutorMixin", + "SingletonMixin", ] diff --git a/griptape/mixins/activity_mixin.py b/griptape/mixins/activity_mixin.py index 2caee49e4..61e8076b1 100644 --- a/griptape/mixins/activity_mixin.py +++ b/griptape/mixins/activity_mixin.py @@ -1,18 +1,27 @@ from __future__ import annotations import inspect +from copy import deepcopy from typing import Callable, Optional -import schema from attrs import Attribute, define, field from jinja2 import Template -from schema import Literal, Schema +from schema import Schema @define(slots=False) class ActivityMixin: + """Provides Tool Activity management functionality to Tools. + + Attributes: + allowlist: List of Tool Activities to include in the Tool schema. + denylist: List of Tool Activities to remove from the Tool schema. + extra_schema_properties: Mapping of Activity name and extra properties to include in the activity's schema. + """ + allowlist: Optional[list[str]] = field(default=None, kw_only=True) denylist: Optional[list[str]] = field(default=None, kw_only=True) + extra_schema_properties: Optional[dict[str, dict]] = field(default=None, kw_only=True) @allowlist.validator # pyright: ignore[reportAttributeAccessIssue] def validate_allowlist(self, _: Attribute, allowlist: Optional[list[str]]) -> None: @@ -68,32 +77,27 @@ def find_activity(self, name: str) -> Optional[Callable]: def activity_name(self, activity: Callable) -> str: if activity is None or not getattr(activity, "is_activity", False): raise Exception("This method is not an activity.") - else: - return getattr(activity, "name") + return getattr(activity, "name") def activity_description(self, activity: Callable) -> str: if activity is None or not getattr(activity, "is_activity", False): raise Exception("This method is not an activity.") - else: - return Template(getattr(activity, "config")["description"]).render({"_self": self}) + return Template(getattr(activity, "config")["description"]).render({"_self": self}) def activity_schema(self, activity: Callable) -> Optional[Schema]: if activity is None or not getattr(activity, "is_activity", False): raise Exception("This method is not an activity.") - elif getattr(activity, "config")["schema"]: - full_schema = { - "values": getattr(activity, "config")["schema"].schema if getattr(activity, "config")["schema"] else {}, - } + if getattr(activity, "config")["schema"] is not None: + # Need to deepcopy to avoid modifying the original schema + config_schema = deepcopy(getattr(activity, "config")["schema"]) + activity_name = self.activity_name(activity) - return Schema(full_schema) - else: - return None + if self.extra_schema_properties is not None and activity_name in self.extra_schema_properties: + config_schema.schema.update(self.extra_schema_properties[activity_name]) - def activity_to_input(self, activity: Callable) -> dict: - if self.activity_schema(activity): - return {Literal("input"): {"values": getattr(activity, "config")["schema"]}} + return Schema({"values": config_schema}) else: - return {schema.Optional("input"): {}} + return None def _validate_tool_activity(self, activity_name: str) -> None: tool = self.__class__ diff --git a/griptape/mixins/futures_executor_mixin.py b/griptape/mixins/futures_executor_mixin.py new file mode 100644 index 000000000..8fa3ed168 --- /dev/null +++ b/griptape/mixins/futures_executor_mixin.py @@ -0,0 +1,27 @@ +from __future__ import annotations + +from abc import ABC +from concurrent import futures +from typing import Callable, Optional + +from attrs import Factory, define, field + + +@define(slots=False, kw_only=True) +class FuturesExecutorMixin(ABC): + futures_executor_fn: Callable[[], futures.Executor] = field( + default=Factory(lambda: lambda: futures.ThreadPoolExecutor()), + ) + + futures_executor: Optional[futures.Executor] = field( + default=Factory(lambda self: self.futures_executor_fn(), takes_self=True) + ) + + def __del__(self) -> None: + if hasattr(self, "futures_executor"): + executor = self.futures_executor + + if executor is not None: + self.futures_executor = None + + executor.shutdown(wait=True) diff --git a/griptape/mixins/singleton_mixin.py b/griptape/mixins/singleton_mixin.py new file mode 100644 index 000000000..1d565ceec --- /dev/null +++ b/griptape/mixins/singleton_mixin.py @@ -0,0 +1,10 @@ +from __future__ import annotations + + +class SingletonMixin: + _instance = None + + def __new__(cls, *args, **kwargs) -> SingletonMixin: + if not cls._instance: + cls._instance = super().__new__(cls, *args, **kwargs) # noqa: UP008 + return cls._instance diff --git a/griptape/schemas/base_schema.py b/griptape/schemas/base_schema.py index f2172b119..f25e8870b 100644 --- a/griptape/schemas/base_schema.py +++ b/griptape/schemas/base_schema.py @@ -36,7 +36,7 @@ def make_obj(self, data: Any, **kwargs) -> Any: cls._resolve_types(attrs_cls) return SubSchema.from_dict( { - a.name: cls._get_field_for_type(a.type) + a.alias or a.name: cls._get_field_for_type(a.type) for a in attrs.fields(attrs_cls) if a.metadata.get("serializable") }, diff --git a/griptape/structures/agent.py b/griptape/structures/agent.py index b133a7b6b..24f57395c 100644 --- a/griptape/structures/agent.py +++ b/griptape/structures/agent.py @@ -2,16 +2,18 @@ from typing import TYPE_CHECKING, Callable, Optional -from attrs import Attribute, define, field +from attrs import Attribute, Factory, define, field from griptape.artifacts.text_artifact import TextArtifact from griptape.common import observable +from griptape.configs import Defaults from griptape.memory.structure import Run from griptape.structures import Structure from griptape.tasks import PromptTask, ToolkitTask if TYPE_CHECKING: from griptape.artifacts import BaseArtifact + from griptape.drivers import BasePromptDriver from griptape.tasks import BaseTask from griptape.tools import BaseTool @@ -21,6 +23,10 @@ class Agent(Structure): input: str | list | tuple | BaseArtifact | Callable[[BaseTask], BaseArtifact] = field( default=lambda task: task.full_context["args"][0] if task.full_context["args"] else TextArtifact(value=""), ) + stream: bool = field(default=Factory(lambda: Defaults.drivers_config.prompt_driver.stream), kw_only=True) + prompt_driver: BasePromptDriver = field( + default=Factory(lambda: Defaults.drivers_config.prompt_driver), kw_only=True + ) tools: list[BaseTool] = field(factory=list, kw_only=True) max_meta_memory_entries: Optional[int] = field(default=20, kw_only=True) fail_fast: bool = field(default=False, kw_only=True) @@ -32,11 +38,20 @@ def validate_fail_fast(self, _: Attribute, fail_fast: bool) -> None: # noqa: FB def __attrs_post_init__(self) -> None: super().__attrs_post_init__() + + self.prompt_driver.stream = self.stream if len(self.tasks) == 0: if self.tools: - task = ToolkitTask(self.input, tools=self.tools, max_meta_memory_entries=self.max_meta_memory_entries) + task = ToolkitTask( + self.input, + prompt_driver=self.prompt_driver, + tools=self.tools, + max_meta_memory_entries=self.max_meta_memory_entries, + ) else: - task = PromptTask(self.input, max_meta_memory_entries=self.max_meta_memory_entries) + task = PromptTask( + self.input, prompt_driver=self.prompt_driver, max_meta_memory_entries=self.max_meta_memory_entries + ) self.add_task(task) diff --git a/griptape/structures/pipeline.py b/griptape/structures/pipeline.py index 0aed369bb..e89d83818 100644 --- a/griptape/structures/pipeline.py +++ b/griptape/structures/pipeline.py @@ -16,6 +16,9 @@ @define class Pipeline(Structure): def add_task(self, task: BaseTask) -> BaseTask: + if (existing_task := self.try_find_task(task.id)) is not None: + return existing_task + task.preprocess(self) if self.output_task: diff --git a/griptape/structures/structure.py b/griptape/structures/structure.py index 765910f5c..0572e289d 100644 --- a/griptape/structures/structure.py +++ b/griptape/structures/structure.py @@ -1,81 +1,41 @@ from __future__ import annotations -import logging import uuid from abc import ABC, abstractmethod -from logging import Logger from typing import TYPE_CHECKING, Any, Optional from attrs import Attribute, Factory, define, field -from rich.logging import RichHandler -from griptape.artifacts import BaseArtifact, BlobArtifact, TextArtifact from griptape.common import observable -from griptape.config import BaseStructureConfig, OpenAiStructureConfig, StructureConfig -from griptape.drivers import ( - BaseEmbeddingDriver, - BasePromptDriver, - LocalVectorStoreDriver, - OpenAiChatPromptDriver, - OpenAiEmbeddingDriver, -) -from griptape.engines import CsvExtractionEngine, JsonExtractionEngine, PromptSummaryEngine -from griptape.engines.rag import RagEngine -from griptape.engines.rag.modules import ( - MetadataBeforeResponseRagModule, - PromptResponseRagModule, - RulesetsBeforeResponseRagModule, - VectorStoreRetrievalRagModule, -) -from griptape.engines.rag.stages import ResponseRagStage, RetrievalRagStage -from griptape.events.finish_structure_run_event import FinishStructureRunEvent -from griptape.events.start_structure_run_event import StartStructureRunEvent +from griptape.events import EventBus, FinishStructureRunEvent, StartStructureRunEvent from griptape.memory import TaskMemory from griptape.memory.meta import MetaMemory from griptape.memory.structure import ConversationMemory -from griptape.memory.task.storage import BlobArtifactStorage, TextArtifactStorage -from griptape.mixins import EventPublisherMixin -from griptape.utils import deprecation_warn if TYPE_CHECKING: + from griptape.artifacts import BaseArtifact from griptape.memory.structure import BaseConversationMemory from griptape.rules import Rule, Ruleset from griptape.tasks import BaseTask @define -class Structure(ABC, EventPublisherMixin): - LOGGER_NAME = "griptape" - +class Structure(ABC): id: str = field(default=Factory(lambda: uuid.uuid4().hex), kw_only=True) - stream: Optional[bool] = field(default=None, kw_only=True) - prompt_driver: Optional[BasePromptDriver] = field(default=None) - embedding_driver: Optional[BaseEmbeddingDriver] = field(default=None, kw_only=True) - config: BaseStructureConfig = field( - default=Factory(lambda self: self.default_config, takes_self=True), - kw_only=True, - ) rulesets: list[Ruleset] = field(factory=list, kw_only=True) rules: list[Rule] = field(factory=list, kw_only=True) tasks: list[BaseTask] = field(factory=list, kw_only=True) - custom_logger: Optional[Logger] = field(default=None, kw_only=True) - logger_level: int = field(default=logging.INFO, kw_only=True) conversation_memory: Optional[BaseConversationMemory] = field( - default=Factory( - lambda self: ConversationMemory(driver=self.config.conversation_memory_driver), - takes_self=True, - ), + default=Factory(lambda: ConversationMemory()), kw_only=True, ) - rag_engine: RagEngine = field(default=Factory(lambda self: self.default_rag_engine, takes_self=True), kw_only=True) task_memory: TaskMemory = field( - default=Factory(lambda self: self.default_task_memory, takes_self=True), + default=Factory(lambda self: TaskMemory(), takes_self=True), kw_only=True, ) meta_memory: MetaMemory = field(default=Factory(lambda: MetaMemory()), kw_only=True) fail_fast: bool = field(default=True, kw_only=True) _execution_args: tuple = () - _logger: Optional[Logger] = None @rulesets.validator # pyright: ignore[reportAttributeAccessIssue] def validate_rulesets(self, _: Attribute, rulesets: list[Ruleset]) -> None: @@ -97,8 +57,6 @@ def __attrs_post_init__(self) -> None: if self.conversation_memory is not None: self.conversation_memory.structure = self - self.config.structure = self - tasks = self.tasks.copy() self.tasks.clear() self.add_tasks(*tasks) @@ -106,39 +64,10 @@ def __attrs_post_init__(self) -> None: def __add__(self, other: BaseTask | list[BaseTask]) -> list[BaseTask]: return self.add_tasks(*other) if isinstance(other, list) else self + [other] - @prompt_driver.validator # pyright: ignore[reportAttributeAccessIssue] - def validate_prompt_driver(self, attribute: Attribute, value: BasePromptDriver) -> None: - if value is not None: - deprecation_warn(f"`{attribute.name}` is deprecated, use `config.prompt_driver` instead.") - - @embedding_driver.validator # pyright: ignore[reportAttributeAccessIssue] - def validate_embedding_driver(self, attribute: Attribute, value: BaseEmbeddingDriver) -> None: - if value is not None: - deprecation_warn(f"`{attribute.name}` is deprecated, use `config.embedding_driver` instead.") - - @stream.validator # pyright: ignore[reportAttributeAccessIssue] - def validate_stream(self, attribute: Attribute, value: bool) -> None: # noqa: FBT001 - if value is not None: - deprecation_warn(f"`{attribute.name}` is deprecated, use `config.prompt_driver.stream` instead.") - @property def execution_args(self) -> tuple: return self._execution_args - @property - def logger(self) -> Logger: - if self.custom_logger: - return self.custom_logger - else: - if self._logger is None: - self._logger = logging.getLogger(self.LOGGER_NAME) - - self._logger.propagate = False - self._logger.level = self.logger_level - - self._logger.handlers = [RichHandler(show_time=True, show_path=False)] - return self._logger - @property def input_task(self) -> Optional[BaseTask]: return self.tasks[0] if self.tasks else None @@ -155,59 +84,6 @@ def output(self) -> Optional[BaseArtifact]: def finished_tasks(self) -> list[BaseTask]: return [s for s in self.tasks if s.is_finished()] - @property - def default_config(self) -> BaseStructureConfig: - if self.prompt_driver is not None or self.embedding_driver is not None or self.stream is not None: - config = StructureConfig() - - prompt_driver = OpenAiChatPromptDriver(model="gpt-4o") if self.prompt_driver is None else self.prompt_driver - - embedding_driver = OpenAiEmbeddingDriver() if self.embedding_driver is None else self.embedding_driver - - if self.stream is not None: - prompt_driver.stream = self.stream - - vector_store_driver = LocalVectorStoreDriver(embedding_driver=embedding_driver) - - config.prompt_driver = prompt_driver - config.vector_store_driver = vector_store_driver - config.embedding_driver = embedding_driver - else: - config = OpenAiStructureConfig() - - return config - - @property - def default_rag_engine(self) -> RagEngine: - return RagEngine( - retrieval_stage=RetrievalRagStage( - retrieval_modules=[VectorStoreRetrievalRagModule(vector_store_driver=self.config.vector_store_driver)], - ), - response_stage=ResponseRagStage( - before_response_modules=[ - RulesetsBeforeResponseRagModule(rulesets=self.rulesets), - MetadataBeforeResponseRagModule(), - ], - response_module=PromptResponseRagModule(prompt_driver=self.config.prompt_driver), - ), - ) - - @property - def default_task_memory(self) -> TaskMemory: - return TaskMemory( - artifact_storages={ - TextArtifact: TextArtifactStorage( - rag_engine=self.rag_engine, - retrieval_rag_module_name="VectorStoreRetrievalRagModule", - vector_store_driver=self.config.vector_store_driver, - summary_engine=PromptSummaryEngine(prompt_driver=self.config.prompt_driver), - csv_extraction_engine=CsvExtractionEngine(prompt_driver=self.config.prompt_driver), - json_extraction_engine=JsonExtractionEngine(prompt_driver=self.config.prompt_driver), - ), - BlobArtifact: BlobArtifactStorage(), - }, - ) - def is_finished(self) -> bool: return all(s.is_finished() for s in self.tasks) @@ -215,10 +91,15 @@ def is_executing(self) -> bool: return any(s for s in self.tasks if s.is_executing()) def find_task(self, task_id: str) -> BaseTask: + if (task := self.try_find_task(task_id)) is not None: + return task + raise ValueError(f"Task with id {task_id} doesn't exist.") + + def try_find_task(self, task_id: str) -> Optional[BaseTask]: for task in self.tasks: if task.id == task_id: return task - raise ValueError(f"Task with id {task_id} doesn't exist.") + return None def add_tasks(self, *tasks: BaseTask) -> list[BaseTask]: return [self.add_task(s) for s in tasks] @@ -227,7 +108,11 @@ def context(self, task: BaseTask) -> dict[str, Any]: return {"args": self.execution_args, "structure": self} def resolve_relationships(self) -> None: - task_by_id = {task.id: task for task in self.tasks} + task_by_id = {} + for task in self.tasks: + if task.id in task_by_id: + raise ValueError(f"Duplicate task with id {task.id} found.") + task_by_id[task.id] = task for task in self.tasks: # Ensure parents include this task as a child @@ -252,7 +137,7 @@ def before_run(self, args: Any) -> None: [task.reset() for task in self.tasks] - self.publish_event( + EventBus.publish_event( StartStructureRunEvent( structure_id=self.id, input_task_input=self.input_task.input, @@ -264,7 +149,7 @@ def before_run(self, args: Any) -> None: @observable def after_run(self) -> None: - self.publish_event( + EventBus.publish_event( FinishStructureRunEvent( structure_id=self.id, output_task_input=self.output_task.input, diff --git a/griptape/structures/workflow.py b/griptape/structures/workflow.py index 89f8305ca..f1e1ec86b 100644 --- a/griptape/structures/workflow.py +++ b/griptape/structures/workflow.py @@ -1,14 +1,15 @@ from __future__ import annotations import concurrent.futures as futures -from typing import TYPE_CHECKING, Any, Callable, Optional +from typing import TYPE_CHECKING, Any, Optional -from attrs import Factory, define, field +from attrs import define from graphlib import TopologicalSorter from griptape.artifacts import ErrorArtifact from griptape.common import observable from griptape.memory.structure import Run +from griptape.mixins import FuturesExecutorMixin from griptape.structures import Structure if TYPE_CHECKING: @@ -16,17 +17,19 @@ @define -class Workflow(Structure): - futures_executor_fn: Callable[[], futures.Executor] = field( - default=Factory(lambda: lambda: futures.ThreadPoolExecutor()), - kw_only=True, - ) +class Workflow(Structure, FuturesExecutorMixin): + @property + def input_task(self) -> Optional[BaseTask]: + return self.order_tasks()[0] if self.tasks else None @property def output_task(self) -> Optional[BaseTask]: return self.order_tasks()[-1] if self.tasks else None def add_task(self, task: BaseTask) -> BaseTask: + if (existing_task := self.try_find_task(task.id)) is not None: + return existing_task + task.preprocess(self) self.tasks.append(task) @@ -87,22 +90,21 @@ def insert_task( def try_run(self, *args) -> Workflow: exit_loop = False - with self.futures_executor_fn() as executor: - while not self.is_finished() and not exit_loop: - futures_list = {} - ordered_tasks = self.order_tasks() + while not self.is_finished() and not exit_loop: + futures_list = {} + ordered_tasks = self.order_tasks() - for task in ordered_tasks: - if task.can_execute(): - future = executor.submit(task.execute) - futures_list[future] = task + for task in ordered_tasks: + if task.can_execute(): + future = self.futures_executor.submit(task.execute) + futures_list[future] = task - # Wait for all tasks to complete - for future in futures.as_completed(futures_list): - if isinstance(future.result(), ErrorArtifact) and self.fail_fast: - exit_loop = True + # Wait for all tasks to complete + for future in futures.as_completed(futures_list): + if isinstance(future.result(), ErrorArtifact) and self.fail_fast: + exit_loop = True - break + break if self.conversation_memory and self.output is not None: run = Run(input=self.input_task.input, output=self.output) diff --git a/griptape/tasks/__init__.py b/griptape/tasks/__init__.py index 764d1669a..7d08cf858 100644 --- a/griptape/tasks/__init__.py +++ b/griptape/tasks/__init__.py @@ -8,8 +8,6 @@ from .tool_task import ToolTask from .rag_task import RagTask from .extraction_task import ExtractionTask -from .csv_extraction_task import CsvExtractionTask -from .json_extraction_task import JsonExtractionTask from .base_image_generation_task import BaseImageGenerationTask from .code_execution_task import CodeExecutionTask from .prompt_image_generation_task import PromptImageGenerationTask @@ -33,8 +31,6 @@ "ToolTask", "RagTask", "ExtractionTask", - "CsvExtractionTask", - "JsonExtractionTask", "BaseImageGenerationTask", "CodeExecutionTask", "PromptImageGenerationTask", diff --git a/griptape/tasks/actions_subtask.py b/griptape/tasks/actions_subtask.py index d691906c9..7cdb5d4de 100644 --- a/griptape/tasks/actions_subtask.py +++ b/griptape/tasks/actions_subtask.py @@ -1,6 +1,7 @@ from __future__ import annotations import json +import logging import re from typing import TYPE_CHECKING, Callable, Optional @@ -10,7 +11,8 @@ from griptape import utils from griptape.artifacts import ActionArtifact, BaseArtifact, ErrorArtifact, ListArtifact, TextArtifact from griptape.common import ToolAction -from griptape.events import FinishActionsSubtaskEvent, StartActionsSubtaskEvent +from griptape.configs import Defaults +from griptape.events import EventBus, FinishActionsSubtaskEvent, StartActionsSubtaskEvent from griptape.mixins import ActionsSubtaskOriginMixin from griptape.tasks import BaseTask from griptape.utils import remove_null_values_in_dict_recursively @@ -18,6 +20,8 @@ if TYPE_CHECKING: from griptape.memory import TaskMemory +logger = logging.getLogger(Defaults.logging_config.logger_name) + @define class ActionsSubtask(BaseTask): @@ -64,6 +68,16 @@ def children(self) -> list[BaseTask]: else: raise Exception("ActionSubtask must be attached to a Task that implements ActionSubtaskOriginMixin.") + def add_child(self, child: BaseTask) -> BaseTask: + if child.id not in self.child_ids: + self.child_ids.append(child.id) + return child + + def add_parent(self, parent: BaseTask) -> BaseTask: + if parent.id not in self.parent_ids: + self.parent_ids.append(parent.id) + return parent + def attach_to(self, parent_task: BaseTask) -> None: self.parent_task_id = parent_task.id self.structure = parent_task.structure @@ -74,12 +88,12 @@ def attach_to(self, parent_task: BaseTask) -> None: else: self.__init_from_artifacts(self.input) except Exception as e: - self.structure.logger.error("Subtask %s\nError parsing tool action: %s", self.origin_task.id, e) + logger.error("Subtask %s\nError parsing tool action: %s", self.origin_task.id, e) self.output = ErrorArtifact(f"ToolAction input parsing error: {e}", exception=e) def before_run(self) -> None: - self.structure.publish_event( + EventBus.publish_event( StartActionsSubtaskEvent( task_id=self.id, task_parent_ids=self.parent_ids, @@ -97,7 +111,7 @@ def before_run(self) -> None: *([f"\nThought: {self.thought}"] if self.thought is not None else []), f"\nActions: {self.actions_to_json()}", ] - self.structure.logger.info("".join(parts)) + logger.info("".join(parts)) def run(self) -> BaseArtifact: try: @@ -116,7 +130,7 @@ def run(self) -> BaseArtifact: actions_output.append(output) self.output = ListArtifact(actions_output) except Exception as e: - self.structure.logger.exception("Subtask %s\n%s", self.id, e) + logger.exception("Subtask %s\n%s", self.id, e) self.output = ErrorArtifact(str(e), exception=e) if self.output is not None: @@ -125,10 +139,7 @@ def run(self) -> BaseArtifact: return ErrorArtifact("no tool output") def execute_actions(self, actions: list[ToolAction]) -> list[tuple[str, BaseArtifact]]: - with self.futures_executor_fn() as executor: - results = utils.execute_futures_dict({a.tag: executor.submit(self.execute_action, a) for a in actions}) - - return list(results.values()) + return utils.execute_futures_list([self.futures_executor.submit(self.execute_action, a) for a in actions]) def execute_action(self, action: ToolAction) -> tuple[str, BaseArtifact]: if action.tool is not None: @@ -145,7 +156,7 @@ def execute_action(self, action: ToolAction) -> tuple[str, BaseArtifact]: def after_run(self) -> None: response = self.output.to_text() if isinstance(self.output, BaseArtifact) else str(self.output) - self.structure.publish_event( + EventBus.publish_event( FinishActionsSubtaskEvent( task_id=self.id, task_parent_ids=self.parent_ids, @@ -157,7 +168,7 @@ def after_run(self) -> None: subtask_actions=self.actions_to_dicts(), ), ) - self.structure.logger.info("Subtask %s\nResponse: %s", self.id, response) + logger.info("Subtask %s\nResponse: %s", self.id, response) def actions_to_dicts(self) -> list[dict]: json_list = [] @@ -245,7 +256,7 @@ def __parse_actions(self, actions_matches: list[str]) -> None: self.actions = [self.__process_action_object(action_object) for action_object in actions_list] except json.JSONDecodeError as e: - self.structure.logger.exception("Subtask %s\nInvalid actions JSON: %s", self.origin_task.id, e) + logger.exception("Subtask %s\nInvalid actions JSON: %s", self.origin_task.id, e) self.output = ErrorArtifact(f"Actions JSON decoding error: {e}", exception=e) @@ -302,10 +313,10 @@ def __validate_action(self, action: ToolAction) -> None: if activity_schema: activity_schema.validate(action.input) except schema.SchemaError as e: - self.structure.logger.exception("Subtask %s\nInvalid action JSON: %s", self.origin_task.id, e) + logger.exception("Subtask %s\nInvalid action JSON: %s", self.origin_task.id, e) action.output = ErrorArtifact(f"Activity input JSON validation error: {e}", exception=e) except SyntaxError as e: - self.structure.logger.exception("Subtask %s\nSyntax error: %s", self.origin_task.id, e) + logger.exception("Subtask %s\nSyntax error: %s", self.origin_task.id, e) action.output = ErrorArtifact(f"Syntax error: {e}", exception=e) diff --git a/griptape/tasks/audio_transcription_task.py b/griptape/tasks/audio_transcription_task.py index 3a4b17b9e..3d83cf7e7 100644 --- a/griptape/tasks/audio_transcription_task.py +++ b/griptape/tasks/audio_transcription_task.py @@ -2,7 +2,7 @@ from typing import TYPE_CHECKING -from attrs import define, field +from attrs import Factory, define, field from griptape.engines import AudioTranscriptionEngine from griptape.tasks.base_audio_input_task import BaseAudioInputTask @@ -13,26 +13,10 @@ @define class AudioTranscriptionTask(BaseAudioInputTask): - _audio_transcription_engine: AudioTranscriptionEngine = field( - default=None, + audio_transcription_engine: AudioTranscriptionEngine = field( + default=Factory(lambda: AudioTranscriptionEngine()), kw_only=True, - alias="audio_transcription_engine", ) - @property - def audio_transcription_engine(self) -> AudioTranscriptionEngine: - if self._audio_transcription_engine is None: - if self.structure is not None: - self._audio_transcription_engine = AudioTranscriptionEngine( - audio_transcription_driver=self.structure.config.audio_transcription_driver, - ) - else: - raise ValueError("Audio Generation Engine is not set.") - return self._audio_transcription_engine - - @audio_transcription_engine.setter - def audio_transcription_engine(self, value: AudioTranscriptionEngine) -> None: - self._audio_transcription_engine = value - def run(self) -> TextArtifact: return self.audio_transcription_engine.run(self.input) diff --git a/griptape/tasks/base_audio_generation_task.py b/griptape/tasks/base_audio_generation_task.py index d2657561d..519a1a59a 100644 --- a/griptape/tasks/base_audio_generation_task.py +++ b/griptape/tasks/base_audio_generation_task.py @@ -1,21 +1,25 @@ from __future__ import annotations +import logging from abc import ABC from attrs import define +from griptape.configs import Defaults from griptape.mixins import BlobArtifactFileOutputMixin, RuleMixin from griptape.tasks import BaseTask +logger = logging.getLogger(Defaults.logging_config.logger_name) + @define class BaseAudioGenerationTask(BlobArtifactFileOutputMixin, RuleMixin, BaseTask, ABC): def before_run(self) -> None: super().before_run() - self.structure.logger.info("%s %s\nInput: %s", self.__class__.__name__, self.id, self.input.to_text()) + logger.info("%s %s\nInput: %s", self.__class__.__name__, self.id, self.input.to_text()) def after_run(self) -> None: super().after_run() - self.structure.logger.info("%s %s\nOutput: %s", self.__class__.__name__, self.id, self.output.to_text()) + logger.info("%s %s\nOutput: %s", self.__class__.__name__, self.id, self.output.to_text()) diff --git a/griptape/tasks/base_audio_input_task.py b/griptape/tasks/base_audio_input_task.py index 517c03a15..e39f70fcd 100644 --- a/griptape/tasks/base_audio_input_task.py +++ b/griptape/tasks/base_audio_input_task.py @@ -1,14 +1,18 @@ from __future__ import annotations +import logging from abc import ABC from typing import Callable from attrs import define, field from griptape.artifacts.audio_artifact import AudioArtifact +from griptape.configs import Defaults from griptape.mixins import RuleMixin from griptape.tasks import BaseTask +logger = logging.getLogger(Defaults.logging_config.logger_name) + @define class BaseAudioInputTask(RuleMixin, BaseTask, ABC): @@ -30,9 +34,9 @@ def input(self, value: AudioArtifact | Callable[[BaseTask], AudioArtifact]) -> N def before_run(self) -> None: super().before_run() - self.structure.logger.info("%s %s\nInput: %s", self.__class__.__name__, self.id, self.input.to_text()) + logger.info("%s %s\nInput: %s", self.__class__.__name__, self.id, self.input.to_text()) def after_run(self) -> None: super().after_run() - self.structure.logger.info("%s %s\nOutput: %s", self.__class__.__name__, self.id, self.output.to_text()) + logger.info("%s %s\nOutput: %s", self.__class__.__name__, self.id, self.output.to_text()) diff --git a/griptape/tasks/base_image_generation_task.py b/griptape/tasks/base_image_generation_task.py index d32e8f142..f0c1f0e7e 100644 --- a/griptape/tasks/base_image_generation_task.py +++ b/griptape/tasks/base_image_generation_task.py @@ -1,5 +1,6 @@ from __future__ import annotations +import logging import os from abc import ABC from pathlib import Path @@ -7,6 +8,7 @@ from attrs import Attribute, define, field +from griptape.configs import Defaults from griptape.loaders import ImageLoader from griptape.mixins import BlobArtifactFileOutputMixin, RuleMixin from griptape.rules import Rule, Ruleset @@ -16,6 +18,9 @@ from griptape.artifacts import MediaArtifact +logger = logging.getLogger(Defaults.logging_config.logger_name) + + @define class BaseImageGenerationTask(BlobArtifactFileOutputMixin, RuleMixin, BaseTask, ABC): """Provides a base class for image generation-related tasks. @@ -60,5 +65,5 @@ def all_negative_rulesets(self) -> list[Ruleset]: return task_rulesets def _read_from_file(self, path: str) -> MediaArtifact: - self.structure.logger.info("Reading image from %s", os.path.abspath(path)) + logger.info("Reading image from %s", os.path.abspath(path)) return ImageLoader().load(Path(path).read_bytes()) diff --git a/griptape/tasks/base_multi_text_input_task.py b/griptape/tasks/base_multi_text_input_task.py index a0d8cb9ac..347dd7e29 100644 --- a/griptape/tasks/base_multi_text_input_task.py +++ b/griptape/tasks/base_multi_text_input_task.py @@ -1,15 +1,19 @@ from __future__ import annotations +import logging from abc import ABC from typing import Callable from attrs import Factory, define, field from griptape.artifacts import ListArtifact, TextArtifact +from griptape.configs import Defaults from griptape.mixins.rule_mixin import RuleMixin from griptape.tasks import BaseTask from griptape.utils import J2 +logger = logging.getLogger(Defaults.logging_config.logger_name) + @define class BaseMultiTextInputTask(RuleMixin, BaseTask, ABC): @@ -48,9 +52,9 @@ def before_run(self) -> None: super().before_run() joined_input = "\n".join([i.to_text() for i in self.input]) - self.structure.logger.info("%s %s\nInput: %s", self.__class__.__name__, self.id, joined_input) + logger.info("%s %s\nInput: %s", self.__class__.__name__, self.id, joined_input) def after_run(self) -> None: super().after_run() - self.structure.logger.info("%s %s\nOutput: %s", self.__class__.__name__, self.id, self.output.to_text()) + logger.info("%s %s\nOutput: %s", self.__class__.__name__, self.id, self.output.to_text()) diff --git a/griptape/tasks/base_task.py b/griptape/tasks/base_task.py index 299fe5dfe..535b3a92d 100644 --- a/griptape/tasks/base_task.py +++ b/griptape/tasks/base_task.py @@ -1,24 +1,28 @@ from __future__ import annotations +import logging import uuid from abc import ABC, abstractmethod -from concurrent import futures from enum import Enum -from typing import TYPE_CHECKING, Any, Callable, Optional +from typing import TYPE_CHECKING, Any, Optional from attrs import Factory, define, field from griptape.artifacts import ErrorArtifact -from griptape.events import FinishTaskEvent, StartTaskEvent +from griptape.configs import Defaults +from griptape.events import EventBus, FinishTaskEvent, StartTaskEvent +from griptape.mixins import FuturesExecutorMixin if TYPE_CHECKING: from griptape.artifacts import BaseArtifact from griptape.memory.meta import BaseMetaEntry from griptape.structures import Structure +logger = logging.getLogger(Defaults.logging_config.logger_name) + @define -class BaseTask(ABC): +class BaseTask(FuturesExecutorMixin, ABC): class State(Enum): PENDING = 1 EXECUTING = 2 @@ -29,14 +33,24 @@ class State(Enum): parent_ids: list[str] = field(factory=list, kw_only=True) child_ids: list[str] = field(factory=list, kw_only=True) max_meta_memory_entries: Optional[int] = field(default=20, kw_only=True) + structure: Optional[Structure] = field(default=None, kw_only=True) output: Optional[BaseArtifact] = field(default=None, init=False) - structure: Optional[Structure] = field(default=None, init=False) context: dict[str, Any] = field(factory=dict, kw_only=True) - futures_executor_fn: Callable[[], futures.Executor] = field( - default=Factory(lambda: lambda: futures.ThreadPoolExecutor()), - kw_only=True, - ) + + def __rshift__(self, other: BaseTask) -> BaseTask: + self.add_child(other) + + return other + + def __lshift__(self, other: BaseTask) -> BaseTask: + self.add_parent(other) + + return other + + def __attrs_post_init__(self) -> None: + if self.structure is not None: + self.structure.add_task(self) @property @abstractmethod @@ -44,11 +58,15 @@ def input(self) -> BaseArtifact: ... @property def parents(self) -> list[BaseTask]: - return [self.structure.find_task(parent_id) for parent_id in self.parent_ids] + if self.structure is not None: + return [self.structure.find_task(parent_id) for parent_id in self.parent_ids] + raise ValueError("Structure must be set to access parents") @property def children(self) -> list[BaseTask]: - return [self.structure.find_task(child_id) for child_id in self.child_ids] + if self.structure is not None: + return [self.structure.find_task(child_id) for child_id in self.child_ids] + raise ValueError("Structure must be set to access children") @property def parent_outputs(self) -> dict[str, str]: @@ -71,25 +89,37 @@ def meta_memories(self) -> list[BaseMetaEntry]: def __str__(self) -> str: return str(self.output.value) - def add_parents(self, parents: list[str | BaseTask]) -> None: + def add_parents(self, parents: list[BaseTask]) -> None: for parent in parents: self.add_parent(parent) - def add_parent(self, parent: str | BaseTask) -> None: - parent_id = parent if isinstance(parent, str) else parent.id + def add_parent(self, parent: BaseTask) -> BaseTask: + if parent.id not in self.parent_ids: + self.parent_ids.append(parent.id) - if parent_id not in self.parent_ids: - self.parent_ids.append(parent_id) + if self.id not in parent.child_ids: + parent.child_ids.append(self.id) - def add_children(self, children: list[str | BaseTask]) -> None: + if self.structure is not None: + self.structure.add_task(parent) + + return self + + def add_children(self, children: list[BaseTask]) -> None: for child in children: self.add_child(child) - def add_child(self, child: str | BaseTask) -> None: - child_id = child if isinstance(child, str) else child.id + def add_child(self, child: BaseTask) -> BaseTask: + if child.id not in self.child_ids: + self.child_ids.append(child.id) - if child_id not in self.child_ids: - self.child_ids.append(child_id) + if self.id not in child.parent_ids: + child.parent_ids.append(self.id) + + if self.structure is not None: + self.structure.add_task(child) + + return self def preprocess(self, structure: Structure) -> BaseTask: self.structure = structure @@ -107,7 +137,7 @@ def is_executing(self) -> bool: def before_run(self) -> None: if self.structure is not None: - self.structure.publish_event( + EventBus.publish_event( StartTaskEvent( task_id=self.id, task_parent_ids=self.parent_ids, @@ -119,7 +149,7 @@ def before_run(self) -> None: def after_run(self) -> None: if self.structure is not None: - self.structure.publish_event( + EventBus.publish_event( FinishTaskEvent( task_id=self.id, task_parent_ids=self.parent_ids, @@ -139,7 +169,7 @@ def execute(self) -> Optional[BaseArtifact]: self.after_run() except Exception as e: - self.structure.logger.exception("%s %s\n%s", self.__class__.__name__, self.id, e) + logger.exception("%s %s\n%s", self.__class__.__name__, self.id, e) self.output = ErrorArtifact(str(e), exception=e) finally: diff --git a/griptape/tasks/base_text_input_task.py b/griptape/tasks/base_text_input_task.py index 90f60efcd..dfed85bcf 100644 --- a/griptape/tasks/base_text_input_task.py +++ b/griptape/tasks/base_text_input_task.py @@ -1,15 +1,19 @@ from __future__ import annotations +import logging from abc import ABC from typing import Callable from attrs import define, field from griptape.artifacts import TextArtifact +from griptape.configs import Defaults from griptape.mixins.rule_mixin import RuleMixin from griptape.tasks import BaseTask from griptape.utils import J2 +logger = logging.getLogger(Defaults.logging_config.logger_name) + @define class BaseTextInputTask(RuleMixin, BaseTask, ABC): @@ -36,9 +40,9 @@ def input(self, value: str | TextArtifact | Callable[[BaseTask], TextArtifact]) def before_run(self) -> None: super().before_run() - self.structure.logger.info("%s %s\nInput: %s", self.__class__.__name__, self.id, self.input.to_text()) + logger.info("%s %s\nInput: %s", self.__class__.__name__, self.id, self.input.to_text()) def after_run(self) -> None: super().after_run() - self.structure.logger.info("%s %s\nOutput: %s", self.__class__.__name__, self.id, self.output.to_text()) + logger.info("%s %s\nOutput: %s", self.__class__.__name__, self.id, self.output.to_text()) diff --git a/griptape/tasks/csv_extraction_task.py b/griptape/tasks/csv_extraction_task.py deleted file mode 100644 index 538596dfe..000000000 --- a/griptape/tasks/csv_extraction_task.py +++ /dev/null @@ -1,24 +0,0 @@ -from __future__ import annotations - -from attrs import define, field - -from griptape.engines import CsvExtractionEngine -from griptape.tasks import ExtractionTask - - -@define -class CsvExtractionTask(ExtractionTask): - _extraction_engine: CsvExtractionEngine = field(default=None, kw_only=True, alias="extraction_engine") - - @property - def extraction_engine(self) -> CsvExtractionEngine: - if self._extraction_engine is None: - if self.structure is not None: - self._extraction_engine = CsvExtractionEngine(prompt_driver=self.structure.config.prompt_driver) - else: - raise ValueError("Extraction Engine is not set.") - return self._extraction_engine - - @extraction_engine.setter - def extraction_engine(self, value: CsvExtractionEngine) -> None: - self._extraction_engine = value diff --git a/griptape/tasks/extraction_task.py b/griptape/tasks/extraction_task.py index d8f492693..c74c3ac49 100644 --- a/griptape/tasks/extraction_task.py +++ b/griptape/tasks/extraction_task.py @@ -13,12 +13,8 @@ @define class ExtractionTask(BaseTextInputTask): - _extraction_engine: BaseExtractionEngine = field(kw_only=True, default=None, alias="extraction_engine") - args: dict = field(kw_only=True) - - @property - def extraction_engine(self) -> BaseExtractionEngine: - return self._extraction_engine + extraction_engine: BaseExtractionEngine = field(kw_only=True) + args: dict = field(kw_only=True, factory=dict) def run(self) -> ListArtifact | ErrorArtifact: return self.extraction_engine.extract(self.input.to_text(), rulesets=self.all_rulesets, **self.args) diff --git a/griptape/tasks/image_query_task.py b/griptape/tasks/image_query_task.py index ea1b53739..1c77bbc0a 100644 --- a/griptape/tasks/image_query_task.py +++ b/griptape/tasks/image_query_task.py @@ -2,7 +2,7 @@ from typing import Callable -from attrs import define, field +from attrs import Factory, define, field from griptape.artifacts import ImageArtifact, ListArtifact, TextArtifact from griptape.engines import ImageQueryEngine @@ -24,7 +24,7 @@ class ImageQueryTask(BaseTask): image_query_engine: The engine used to execute the query. """ - _image_query_engine: ImageQueryEngine = field(default=None, kw_only=True, alias="image_query_engine") + image_query_engine: ImageQueryEngine = field(default=Factory(lambda: ImageQueryEngine()), kw_only=True) _input: ( tuple[str, list[ImageArtifact]] | tuple[TextArtifact, list[ImageArtifact]] @@ -62,19 +62,6 @@ def input( ) -> None: self._input = value - @property - def image_query_engine(self) -> ImageQueryEngine: - if self._image_query_engine is None: - if self.structure is not None: - self._image_query_engine = ImageQueryEngine(image_query_driver=self.structure.config.image_query_driver) - else: - raise ValueError("Image Query Engine is not set.") - return self._image_query_engine - - @image_query_engine.setter - def image_query_engine(self, value: ImageQueryEngine) -> None: - self._image_query_engine = value - def run(self) -> TextArtifact: query = self.input.value[0] diff --git a/griptape/tasks/inpainting_image_generation_task.py b/griptape/tasks/inpainting_image_generation_task.py index 2096c60e4..0ed28a11b 100644 --- a/griptape/tasks/inpainting_image_generation_task.py +++ b/griptape/tasks/inpainting_image_generation_task.py @@ -2,7 +2,7 @@ from typing import Callable -from attrs import define, field +from attrs import Factory, define, field from griptape.artifacts import ImageArtifact, ListArtifact, TextArtifact from griptape.engines import InpaintingImageGenerationEngine @@ -28,14 +28,13 @@ class InpaintingImageGenerationTask(BaseImageGenerationTask): output_file: If provided, the generated image will be written to disk as output_file. """ - _image_generation_engine: InpaintingImageGenerationEngine = field( - default=None, + image_generation_engine: InpaintingImageGenerationEngine = field( + default=Factory(lambda: InpaintingImageGenerationEngine()), kw_only=True, - alias="image_generation_engine", ) _input: ( tuple[str | TextArtifact, ImageArtifact, ImageArtifact] | Callable[[BaseTask], ListArtifact] | ListArtifact - ) = field(default=None) + ) = field(default=None, alias="input") @property def input(self) -> ListArtifact: @@ -60,21 +59,6 @@ def input( ) -> None: self._input = value - @property - def image_generation_engine(self) -> InpaintingImageGenerationEngine: - if self._image_generation_engine is None: - if self.structure is not None: - self._image_generation_engine = InpaintingImageGenerationEngine( - image_generation_driver=self.structure.config.image_generation_driver, - ) - else: - raise ValueError("Image Generation Engine is not set.") - return self._image_generation_engine - - @image_generation_engine.setter - def image_generation_engine(self, value: InpaintingImageGenerationEngine) -> None: - self._image_generation_engine = value - def run(self) -> ImageArtifact: prompt_artifact = self.input[0] diff --git a/griptape/tasks/json_extraction_task.py b/griptape/tasks/json_extraction_task.py deleted file mode 100644 index ce51b316f..000000000 --- a/griptape/tasks/json_extraction_task.py +++ /dev/null @@ -1,24 +0,0 @@ -from __future__ import annotations - -from attrs import define, field - -from griptape.engines import JsonExtractionEngine -from griptape.tasks import ExtractionTask - - -@define -class JsonExtractionTask(ExtractionTask): - _extraction_engine: JsonExtractionEngine = field(default=None, kw_only=True, alias="extraction_engine") - - @property - def extraction_engine(self) -> JsonExtractionEngine: - if self._extraction_engine is None: - if self.structure is not None: - self._extraction_engine = JsonExtractionEngine(prompt_driver=self.structure.config.prompt_driver) - else: - raise ValueError("Extraction Engine is not set.") - return self._extraction_engine - - @extraction_engine.setter - def extraction_engine(self, value: JsonExtractionEngine) -> None: - self._extraction_engine = value diff --git a/griptape/tasks/outpainting_image_generation_task.py b/griptape/tasks/outpainting_image_generation_task.py index a23fafd0f..6b63709db 100644 --- a/griptape/tasks/outpainting_image_generation_task.py +++ b/griptape/tasks/outpainting_image_generation_task.py @@ -2,7 +2,7 @@ from typing import Callable -from attrs import define, field +from attrs import Factory, define, field from griptape.artifacts import ImageArtifact, ListArtifact, TextArtifact from griptape.engines import OutpaintingImageGenerationEngine @@ -28,14 +28,13 @@ class OutpaintingImageGenerationTask(BaseImageGenerationTask): output_file: If provided, the generated image will be written to disk as output_file. """ - _image_generation_engine: OutpaintingImageGenerationEngine = field( - default=None, + image_generation_engine: OutpaintingImageGenerationEngine = field( + default=Factory(lambda: OutpaintingImageGenerationEngine()), kw_only=True, - alias="image_generation_engine", ) _input: ( tuple[str | TextArtifact, ImageArtifact, ImageArtifact] | Callable[[BaseTask], ListArtifact] | ListArtifact - ) = field(default=None) + ) = field(default=None, alias="input") @property def input(self) -> ListArtifact: @@ -60,22 +59,6 @@ def input( ) -> None: self._input = value - @property - def image_generation_engine(self) -> OutpaintingImageGenerationEngine: - if self._image_generation_engine is None: - if self.structure is not None: - self._image_generation_engine = OutpaintingImageGenerationEngine( - image_generation_driver=self.structure.config.image_generation_driver, - ) - else: - raise ValueError("Image Generation Engine is not set.") - - return self._image_generation_engine - - @image_generation_engine.setter - def image_generation_engine(self, value: OutpaintingImageGenerationEngine) -> None: - self._image_generation_engine = value - def run(self) -> ImageArtifact: prompt_artifact = self.input[0] diff --git a/griptape/tasks/prompt_image_generation_task.py b/griptape/tasks/prompt_image_generation_task.py index 66cffab3e..4d3356392 100644 --- a/griptape/tasks/prompt_image_generation_task.py +++ b/griptape/tasks/prompt_image_generation_task.py @@ -2,7 +2,7 @@ from typing import Callable -from attrs import define, field +from attrs import Factory, define, field from griptape.artifacts import ImageArtifact, TextArtifact from griptape.engines import PromptImageGenerationEngine @@ -29,11 +29,12 @@ class PromptImageGenerationTask(BaseImageGenerationTask): DEFAULT_INPUT_TEMPLATE = "{{ args[0] }}" - _input: str | TextArtifact | Callable[[BaseTask], TextArtifact] = field(default=DEFAULT_INPUT_TEMPLATE) - _image_generation_engine: PromptImageGenerationEngine = field( - default=None, + _input: str | TextArtifact | Callable[[BaseTask], TextArtifact] = field( + default=DEFAULT_INPUT_TEMPLATE, alias="input" + ) + image_generation_engine: PromptImageGenerationEngine = field( + default=Factory(lambda: PromptImageGenerationEngine()), kw_only=True, - alias="image_generation_engine", ) @property @@ -49,21 +50,6 @@ def input(self) -> TextArtifact: def input(self, value: TextArtifact) -> None: self._input = value - @property - def image_generation_engine(self) -> PromptImageGenerationEngine: - if self._image_generation_engine is None: - if self.structure is not None: - self._image_generation_engine = PromptImageGenerationEngine( - image_generation_driver=self.structure.config.image_generation_driver, - ) - else: - raise ValueError("Image Generation Engine is not set.") - return self._image_generation_engine - - @image_generation_engine.setter - def image_generation_engine(self, value: PromptImageGenerationEngine) -> None: - self._image_generation_engine = value - def run(self) -> ImageArtifact: image_artifact = self.image_generation_engine.run( prompts=[self.input.to_text()], diff --git a/griptape/tasks/prompt_task.py b/griptape/tasks/prompt_task.py index 386ebe239..17a73e4cd 100644 --- a/griptape/tasks/prompt_task.py +++ b/griptape/tasks/prompt_task.py @@ -1,11 +1,13 @@ from __future__ import annotations +import logging from typing import TYPE_CHECKING, Callable, Optional from attrs import Factory, define, field from griptape.artifacts import BaseArtifact, ListArtifact, TextArtifact from griptape.common import PromptStack +from griptape.configs import Defaults from griptape.mixins import RuleMixin from griptape.tasks import BaseTask from griptape.utils import J2 @@ -13,10 +15,14 @@ if TYPE_CHECKING: from griptape.drivers import BasePromptDriver +logger = logging.getLogger(Defaults.logging_config.logger_name) + @define class PromptTask(RuleMixin, BaseTask): - _prompt_driver: Optional[BasePromptDriver] = field(default=None, kw_only=True, alias="prompt_driver") + prompt_driver: BasePromptDriver = field( + default=Factory(lambda: Defaults.drivers_config.prompt_driver), kw_only=True + ) generate_system_template: Callable[[PromptTask], str] = field( default=Factory(lambda self: self.default_system_template_generator, takes_self=True), kw_only=True, @@ -56,15 +62,6 @@ def prompt_stack(self) -> PromptStack: return stack - @property - def prompt_driver(self) -> BasePromptDriver: - if self._prompt_driver is None: - if self.structure is not None: - self._prompt_driver = self.structure.config.prompt_driver - else: - raise ValueError("Prompt Driver is not set") - return self._prompt_driver - def default_system_template_generator(self, _: PromptTask) -> str: return J2("tasks/prompt_task/system.j2").render( rulesets=J2("rulesets/rulesets.j2").render(rulesets=self.all_rulesets), @@ -73,12 +70,12 @@ def default_system_template_generator(self, _: PromptTask) -> str: def before_run(self) -> None: super().before_run() - self.structure.logger.info("%s %s\nInput: %s", self.__class__.__name__, self.id, self.input.to_text()) + logger.info("%s %s\nInput: %s", self.__class__.__name__, self.id, self.input.to_text()) def after_run(self) -> None: super().after_run() - self.structure.logger.info("%s %s\nOutput: %s", self.__class__.__name__, self.id, self.output.to_text()) + logger.info("%s %s\nOutput: %s", self.__class__.__name__, self.id, self.output.to_text()) def run(self) -> BaseArtifact: message = self.prompt_driver.run(self.prompt_stack) diff --git a/griptape/tasks/rag_task.py b/griptape/tasks/rag_task.py index 3f88f34d1..b7ea8d7c7 100644 --- a/griptape/tasks/rag_task.py +++ b/griptape/tasks/rag_task.py @@ -1,37 +1,20 @@ from __future__ import annotations -from typing import TYPE_CHECKING +from attrs import Factory, define, field -from attrs import define, field - -from griptape.artifacts import BaseArtifact, ErrorArtifact +from griptape.artifacts import BaseArtifact, ErrorArtifact, ListArtifact +from griptape.engines.rag import RagEngine from griptape.tasks import BaseTextInputTask -if TYPE_CHECKING: - from griptape.engines.rag import RagEngine - @define class RagTask(BaseTextInputTask): - _rag_engine: RagEngine = field(kw_only=True, default=None, alias="rag_engine") - - @property - def rag_engine(self) -> RagEngine: - if self._rag_engine is None: - if self.structure is not None: - self._rag_engine = self.structure.rag_engine - else: - raise ValueError("rag_engine is not set.") - return self._rag_engine - - @rag_engine.setter - def rag_engine(self, value: RagEngine) -> None: - self._rag_engine = value + rag_engine: RagEngine = field(kw_only=True, default=Factory(lambda: RagEngine())) def run(self) -> BaseArtifact: - result = self.rag_engine.process_query(self.input.to_text()).output + outputs = self.rag_engine.process_query(self.input.to_text()).outputs - if result is None: - return ErrorArtifact("empty output") + if len(outputs) > 0: + return ListArtifact(outputs) else: - return result + return ErrorArtifact("empty output") diff --git a/griptape/tasks/text_summary_task.py b/griptape/tasks/text_summary_task.py index 5bd1b547e..dc1a7b8be 100644 --- a/griptape/tasks/text_summary_task.py +++ b/griptape/tasks/text_summary_task.py @@ -1,8 +1,8 @@ from __future__ import annotations -from typing import TYPE_CHECKING, Optional +from typing import TYPE_CHECKING -from attrs import define, field +from attrs import Factory, define, field from griptape.artifacts import TextArtifact from griptape.engines import PromptSummaryEngine @@ -14,20 +14,7 @@ @define class TextSummaryTask(BaseTextInputTask): - _summary_engine: Optional[BaseSummaryEngine] = field(default=None, alias="summary_engine") - - @property - def summary_engine(self) -> Optional[BaseSummaryEngine]: - if self._summary_engine is None: - if self.structure is not None: - self._summary_engine = PromptSummaryEngine(prompt_driver=self.structure.config.prompt_driver) - else: - raise ValueError("Summary Engine is not set.") - return self._summary_engine - - @summary_engine.setter - def summary_engine(self, value: BaseSummaryEngine) -> None: - self._summary_engine = value + summary_engine: BaseSummaryEngine = field(default=Factory(lambda: PromptSummaryEngine()), kw_only=True) def run(self) -> TextArtifact: return TextArtifact(self.summary_engine.summarize_text(self.input.to_text(), rulesets=self.all_rulesets)) diff --git a/griptape/tasks/text_to_speech_task.py b/griptape/tasks/text_to_speech_task.py index 3ca503dfe..680a67603 100644 --- a/griptape/tasks/text_to_speech_task.py +++ b/griptape/tasks/text_to_speech_task.py @@ -2,7 +2,7 @@ from typing import TYPE_CHECKING, Callable -from attrs import define, field +from attrs import Factory, define, field from griptape.artifacts import TextArtifact from griptape.engines import TextToSpeechEngine @@ -19,7 +19,7 @@ class TextToSpeechTask(BaseAudioGenerationTask): DEFAULT_INPUT_TEMPLATE = "{{ args[0] }}" _input: str | TextArtifact | Callable[[BaseTask], TextArtifact] = field(default=DEFAULT_INPUT_TEMPLATE) - _text_to_speech_engine: TextToSpeechEngine = field(default=None, kw_only=True, alias="text_to_speech_engine") + text_to_speech_engine: TextToSpeechEngine = field(default=Factory(lambda: TextToSpeechEngine()), kw_only=True) @property def input(self) -> TextArtifact: @@ -34,21 +34,6 @@ def input(self) -> TextArtifact: def input(self, value: TextArtifact) -> None: self._input = value - @property - def text_to_speech_engine(self) -> TextToSpeechEngine: - if self._text_to_speech_engine is None: - if self.structure is not None: - self._text_to_speech_engine = TextToSpeechEngine( - text_to_speech_driver=self.structure.config.text_to_speech_driver, - ) - else: - raise ValueError("Audio Generation Engine is not set.") - return self._text_to_speech_engine - - @text_to_speech_engine.setter - def text_to_speech_engine(self, value: TextToSpeechEngine) -> None: - self._text_to_speech_engine = value - def run(self) -> AudioArtifact: audio_artifact = self.text_to_speech_engine.run(prompts=[self.input.to_text()], rulesets=self.all_rulesets) diff --git a/griptape/tasks/tool_task.py b/griptape/tasks/tool_task.py index ca548b34d..6dd5000b3 100644 --- a/griptape/tasks/tool_task.py +++ b/griptape/tasks/tool_task.py @@ -37,6 +37,7 @@ def prompt_stack(self) -> PromptStack: return stack def __attrs_post_init__(self) -> None: + super().__attrs_post_init__() if self.task_memory is not None: self.set_default_tools_memory(self.task_memory) diff --git a/griptape/tasks/toolkit_task.py b/griptape/tasks/toolkit_task.py index cb6058141..24607a352 100644 --- a/griptape/tasks/toolkit_task.py +++ b/griptape/tasks/toolkit_task.py @@ -42,6 +42,7 @@ class ToolkitTask(PromptTask, ActionsSubtaskOriginMixin): response_stop_sequence: str = field(default=RESPONSE_STOP_SEQUENCE, kw_only=True) def __attrs_post_init__(self) -> None: + super().__attrs_post_init__() if self.task_memory: self.set_default_tools_memory(self.task_memory) @@ -204,6 +205,7 @@ def find_subtask(self, subtask_id: str) -> ActionsSubtask: def add_subtask(self, subtask: ActionsSubtask) -> ActionsSubtask: subtask.attach_to(self) + subtask.structure = self.structure if len(self.subtasks) > 0: self.subtasks[-1].add_child(subtask) diff --git a/griptape/tasks/variation_image_generation_task.py b/griptape/tasks/variation_image_generation_task.py index df4579efa..e3feaeac5 100644 --- a/griptape/tasks/variation_image_generation_task.py +++ b/griptape/tasks/variation_image_generation_task.py @@ -2,7 +2,7 @@ from typing import Callable -from attrs import define, field +from attrs import Factory, define, field from griptape.artifacts import ImageArtifact, ListArtifact, TextArtifact from griptape.engines import VariationImageGenerationEngine @@ -28,13 +28,12 @@ class VariationImageGenerationTask(BaseImageGenerationTask): output_file: If provided, the generated image will be written to disk as output_file. """ - _image_generation_engine: VariationImageGenerationEngine = field( - default=None, + image_generation_engine: VariationImageGenerationEngine = field( + default=Factory(lambda: VariationImageGenerationEngine()), kw_only=True, - alias="image_generation_engine", ) _input: tuple[str | TextArtifact, ImageArtifact] | Callable[[BaseTask], ListArtifact] | ListArtifact = field( - default=None, + default=None, alias="input" ) @property @@ -57,21 +56,6 @@ def input(self) -> ListArtifact: def input(self, value: tuple[str | TextArtifact, ImageArtifact] | Callable[[BaseTask], ListArtifact]) -> None: self._input = value - @property - def image_generation_engine(self) -> VariationImageGenerationEngine: - if self._image_generation_engine is None: - if self.structure is not None: - self._image_generation_engine = VariationImageGenerationEngine( - image_generation_driver=self.structure.config.image_generation_driver, - ) - else: - raise ValueError("Image Generation Engine is not set.") - return self._image_generation_engine - - @image_generation_engine.setter - def image_generation_engine(self, value: VariationImageGenerationEngine) -> None: - self._image_generation_engine = value - def run(self) -> ImageArtifact: prompt_artifact = self.input[0] diff --git a/griptape/templates/engines/extraction/csv/system.j2 b/griptape/templates/engines/extraction/csv/system.j2 new file mode 100644 index 000000000..7c5776257 --- /dev/null +++ b/griptape/templates/engines/extraction/csv/system.j2 @@ -0,0 +1,7 @@ +Don't add the header row. Don't use markdown formatting for output. Fields containing line breaks (CRLF), double quotes, and commas should be enclosed in double-quotes. +Column Names: """{{ column_names }}""" + +{% if rulesets %} + +{{ rulesets }} +{% endif %} diff --git a/griptape/templates/engines/extraction/csv/user.j2 b/griptape/templates/engines/extraction/csv/user.j2 new file mode 100644 index 000000000..0f33dadc3 --- /dev/null +++ b/griptape/templates/engines/extraction/csv/user.j2 @@ -0,0 +1,4 @@ +Extract information from the Text based on the Column Names and output it as a CSV file. +Text: """{{ text }}""" + +Answer: diff --git a/griptape/templates/engines/extraction/csv_extraction.j2 b/griptape/templates/engines/extraction/csv_extraction.j2 deleted file mode 100644 index 6f9da346b..000000000 --- a/griptape/templates/engines/extraction/csv_extraction.j2 +++ /dev/null @@ -1,11 +0,0 @@ -Text: """{{ text }}""" - -Column Names: """{{ column_names }}""" - -Extract information from the Text based on the Column Names and output it as a CSV file. Don't add the header row. Don't use markdown formatting for output. Fields containing line breaks (CRLF), double quotes, and commas should be enclosed in double-quotes. -{% if rulesets %} - -{{ rulesets }} -{% endif %} - -Answer: diff --git a/griptape/templates/engines/extraction/json/system.j2 b/griptape/templates/engines/extraction/json/system.j2 new file mode 100644 index 000000000..987ff19a9 --- /dev/null +++ b/griptape/templates/engines/extraction/json/system.j2 @@ -0,0 +1,6 @@ +Extraction Template JSON Schema: """{{ json_template_schema }}""" + +{% if rulesets %} + +{{ rulesets }} +{% endif %} diff --git a/griptape/templates/engines/extraction/json_extraction.j2 b/griptape/templates/engines/extraction/json/user.j2 similarity index 56% rename from griptape/templates/engines/extraction/json_extraction.j2 rename to griptape/templates/engines/extraction/json/user.j2 index 85d95bef9..984977d9a 100644 --- a/griptape/templates/engines/extraction/json_extraction.j2 +++ b/griptape/templates/engines/extraction/json/user.j2 @@ -1,11 +1,4 @@ -Text: """{{ text }}""" - -Extraction Template JSON Schema: """{{ json_template_schema }}""" - Extract information from the Text based on the Extraction Template JSON Schema into an array of JSON objects. -{% if rulesets %} - -{{ rulesets }} -{% endif %} +Text: """{{ text }}""" JSON array: diff --git a/griptape/templates/engines/rag/modules/query/translate/user.j2 b/griptape/templates/engines/rag/modules/query/translate/user.j2 new file mode 100644 index 000000000..d18252c1f --- /dev/null +++ b/griptape/templates/engines/rag/modules/query/translate/user.j2 @@ -0,0 +1,3 @@ +Translate the following text into {{ language }}. Only output translated text. + +Text: {{ query }} \ No newline at end of file diff --git a/griptape/templates/engines/rag/modules/response/prompt/system.j2 b/griptape/templates/engines/rag/modules/response/prompt/system.j2 index 1fa9d8c12..38b0297d5 100644 --- a/griptape/templates/engines/rag/modules/response/prompt/system.j2 +++ b/griptape/templates/engines/rag/modules/response/prompt/system.j2 @@ -1,6 +1,10 @@ You are an expert Q&A system. Always answer the question using the provided context information, and not prior knowledge. Always be truthful. Don't make up facts. You can answer questions by searching through text chunks. -{% if before_system_prompt %} -{{ before_system_prompt }} +{% if rulesets %} +{{ rulesets }} + +{% endif %} +{% if metadata %} +{{ metadata }} {% endif %} Use the following list of text chunks to respond. If there are no text chunks available or text chunks don't have relevant information respond with "I could not find an answer." diff --git a/griptape/tokenizers/amazon_bedrock_tokenizer.py b/griptape/tokenizers/amazon_bedrock_tokenizer.py index 292dcde17..bd758c554 100644 --- a/griptape/tokenizers/amazon_bedrock_tokenizer.py +++ b/griptape/tokenizers/amazon_bedrock_tokenizer.py @@ -37,6 +37,4 @@ class AmazonBedrockTokenizer(BaseTokenizer): characters_per_token: int = field(default=4, kw_only=True) def count_tokens(self, text: str) -> int: - num_tokens = (len(text) + self.characters_per_token - 1) // self.characters_per_token - - return num_tokens + return (len(text) + self.characters_per_token - 1) // self.characters_per_token diff --git a/griptape/tokenizers/simple_tokenizer.py b/griptape/tokenizers/simple_tokenizer.py index 214e5be2d..97053acb2 100644 --- a/griptape/tokenizers/simple_tokenizer.py +++ b/griptape/tokenizers/simple_tokenizer.py @@ -11,6 +11,4 @@ class SimpleTokenizer(BaseTokenizer): characters_per_token: int = field(kw_only=True) def count_tokens(self, text: str) -> int: - num_tokens = (len(text) + self.characters_per_token - 1) // self.characters_per_token - - return num_tokens + return (len(text) + self.characters_per_token - 1) // self.characters_per_token diff --git a/griptape/tools/__init__.py b/griptape/tools/__init__.py index d99b63b6c..ce59088f3 100644 --- a/griptape/tools/__init__.py +++ b/griptape/tools/__init__.py @@ -1,67 +1,71 @@ from .base_tool import BaseTool -from .base_image_generation_client import BaseImageGenerationClient -from .calculator.tool import Calculator -from .web_search.tool import WebSearch -from .web_scraper.tool import WebScraper -from .sql_client.tool import SqlClient -from .email_client.tool import EmailClient -from .rest_api_client.tool import RestApiClient -from .file_manager.tool import FileManager -from .vector_store_client.tool import VectorStoreClient -from .date_time.tool import DateTime -from .task_memory_client.tool import TaskMemoryClient -from .base_aws_client import BaseAwsClient -from .aws_iam_client.tool import AwsIamClient -from .aws_s3_client.tool import AwsS3Client -from .computer.tool import Computer -from .base_google_client import BaseGoogleClient -from .google_gmail.tool import GoogleGmailClient -from .google_cal.tool import GoogleCalendarClient -from .google_docs.tool import GoogleDocsClient -from .google_drive.tool import GoogleDriveClient -from .openweather_client.tool import OpenWeatherClient -from .prompt_image_generation_client.tool import PromptImageGenerationClient -from .variation_image_generation_client.tool import VariationImageGenerationClient -from .inpainting_image_generation_client.tool import InpaintingImageGenerationClient -from .outpainting_image_generation_client.tool import OutpaintingImageGenerationClient -from .griptape_cloud_knowledge_base_client.tool import GriptapeCloudKnowledgeBaseClient -from .structure_run_client.tool import StructureRunClient -from .image_query_client.tool import ImageQueryClient -from .rag_client.tool import RagClient -from .text_to_speech_client.tool import TextToSpeechClient -from .audio_transcription_client.tool import AudioTranscriptionClient +from .base_image_generation_tool import BaseImageGenerationTool +from .calculator.tool import CalculatorTool +from .web_search.tool import WebSearchTool +from .web_scraper.tool import WebScraperTool +from .sql.tool import SqlTool +from .email.tool import EmailTool +from .rest_api.tool import RestApiTool +from .file_manager.tool import FileManagerTool +from .vector_store.tool import VectorStoreTool +from .date_time.tool import DateTimeTool +from .base_aws_tool import BaseAwsTool +from .aws_iam.tool import AwsIamTool +from .aws_s3.tool import AwsS3Tool +from .computer.tool import ComputerTool +from .base_google_tool import BaseGoogleTool +from .google_gmail.tool import GoogleGmailTool +from .google_calendar.tool import GoogleCalendarTool +from .google_docs.tool import GoogleDocsTool +from .google_drive.tool import GoogleDriveTool +from .openweather.tool import OpenWeatherTool +from .prompt_image_generation.tool import PromptImageGenerationTool +from .variation_image_generation.tool import VariationImageGenerationTool +from .inpainting_image_generation.tool import InpaintingImageGenerationTool +from .outpainting_image_generation.tool import OutpaintingImageGenerationTool +from .griptape_cloud_knowledge_base.tool import GriptapeCloudKnowledgeBaseTool +from .structure_run.tool import StructureRunTool +from .image_query.tool import ImageQueryTool +from .rag.tool import RagTool +from .text_to_speech.tool import TextToSpeechTool +from .audio_transcription.tool import AudioTranscriptionTool +from .extraction.tool import ExtractionTool +from .prompt_summary.tool import PromptSummaryTool +from .query.tool import QueryTool __all__ = [ "BaseTool", - "BaseImageGenerationClient", - "BaseAwsClient", - "AwsIamClient", - "AwsS3Client", - "BaseGoogleClient", - "GoogleGmailClient", - "GoogleDocsClient", - "GoogleCalendarClient", - "GoogleDriveClient", - "Calculator", - "WebSearch", - "WebScraper", - "SqlClient", - "EmailClient", - "RestApiClient", - "FileManager", - "VectorStoreClient", - "DateTime", - "TaskMemoryClient", - "Computer", - "OpenWeatherClient", - "PromptImageGenerationClient", - "VariationImageGenerationClient", - "InpaintingImageGenerationClient", - "OutpaintingImageGenerationClient", - "GriptapeCloudKnowledgeBaseClient", - "StructureRunClient", - "ImageQueryClient", - "RagClient", - "TextToSpeechClient", - "AudioTranscriptionClient", + "BaseImageGenerationTool", + "BaseAwsTool", + "AwsIamTool", + "AwsS3Tool", + "BaseGoogleTool", + "GoogleGmailTool", + "GoogleDocsTool", + "GoogleCalendarTool", + "GoogleDriveTool", + "CalculatorTool", + "WebSearchTool", + "WebScraperTool", + "SqlTool", + "EmailTool", + "RestApiTool", + "FileManagerTool", + "VectorStoreTool", + "DateTimeTool", + "ComputerTool", + "OpenWeatherTool", + "PromptImageGenerationTool", + "VariationImageGenerationTool", + "InpaintingImageGenerationTool", + "OutpaintingImageGenerationTool", + "GriptapeCloudKnowledgeBaseTool", + "StructureRunTool", + "ImageQueryTool", + "RagTool", + "TextToSpeechTool", + "AudioTranscriptionTool", + "ExtractionTool", + "PromptSummaryTool", + "QueryTool", ] diff --git a/griptape/tools/image_query_client/__init__.py b/griptape/tools/audio_transcription/__init__.py similarity index 100% rename from griptape/tools/image_query_client/__init__.py rename to griptape/tools/audio_transcription/__init__.py diff --git a/griptape/tools/audio_transcription_client/manifest.yml b/griptape/tools/audio_transcription/manifest.yml similarity index 84% rename from griptape/tools/audio_transcription_client/manifest.yml rename to griptape/tools/audio_transcription/manifest.yml index 6bbe4a21a..32b017c55 100644 --- a/griptape/tools/audio_transcription_client/manifest.yml +++ b/griptape/tools/audio_transcription/manifest.yml @@ -1,5 +1,5 @@ version: "v1" -name: Transcription Client +name: Transcription Tool description: A tool for generating transcription of audio. contact_email: hello@griptape.ai legal_info_url: https://www.griptape.ai/legal diff --git a/griptape/tools/audio_transcription_client/tool.py b/griptape/tools/audio_transcription/tool.py similarity index 98% rename from griptape/tools/audio_transcription_client/tool.py rename to griptape/tools/audio_transcription/tool.py index 62cd9e7a5..4174db209 100644 --- a/griptape/tools/audio_transcription_client/tool.py +++ b/griptape/tools/audio_transcription/tool.py @@ -17,7 +17,7 @@ @define -class AudioTranscriptionClient(BaseTool): +class AudioTranscriptionTool(BaseTool): """A tool that can be used to generate transcriptions from input audio.""" engine: AudioTranscriptionEngine = field(kw_only=True) diff --git a/griptape/tools/inpainting_image_generation_client/__init__.py b/griptape/tools/aws_iam/__init__.py similarity index 100% rename from griptape/tools/inpainting_image_generation_client/__init__.py rename to griptape/tools/aws_iam/__init__.py diff --git a/griptape/tools/aws_iam_client/manifest.yml b/griptape/tools/aws_iam/manifest.yml similarity index 86% rename from griptape/tools/aws_iam_client/manifest.yml rename to griptape/tools/aws_iam/manifest.yml index ea825527f..072d4f92e 100644 --- a/griptape/tools/aws_iam_client/manifest.yml +++ b/griptape/tools/aws_iam/manifest.yml @@ -1,5 +1,5 @@ version: "v1" -name: AWS IAM Client +name: AWS IAM Tool description: Tool for the IAM boto3 API. contact_email: hello@griptape.ai legal_info_url: https://www.griptape.ai/legal \ No newline at end of file diff --git a/griptape/tools/aws_iam_client/tool.py b/griptape/tools/aws_iam/tool.py similarity index 97% rename from griptape/tools/aws_iam_client/tool.py rename to griptape/tools/aws_iam/tool.py index 1be0251f0..8d22dd3c9 100644 --- a/griptape/tools/aws_iam_client/tool.py +++ b/griptape/tools/aws_iam/tool.py @@ -6,7 +6,7 @@ from schema import Literal, Schema from griptape.artifacts import ErrorArtifact, ListArtifact, TextArtifact -from griptape.tools import BaseAwsClient +from griptape.tools import BaseAwsTool from griptape.utils.decorators import activity if TYPE_CHECKING: @@ -14,7 +14,7 @@ @define -class AwsIamClient(BaseAwsClient): +class AwsIamTool(BaseAwsTool): iam_client: Client = field(default=Factory(lambda self: self.session.client("iam"), takes_self=True), kw_only=True) @activity( diff --git a/griptape/tools/openweather_client/__init__.py b/griptape/tools/aws_s3/__init__.py similarity index 100% rename from griptape/tools/openweather_client/__init__.py rename to griptape/tools/aws_s3/__init__.py diff --git a/griptape/tools/aws_s3_client/manifest.yml b/griptape/tools/aws_s3/manifest.yml similarity index 86% rename from griptape/tools/aws_s3_client/manifest.yml rename to griptape/tools/aws_s3/manifest.yml index 642b6c588..a48169f0c 100644 --- a/griptape/tools/aws_s3_client/manifest.yml +++ b/griptape/tools/aws_s3/manifest.yml @@ -1,5 +1,5 @@ version: "v1" -name: AWS S3 Client +name: AWS S3 Tool description: Tool for the S3 boto3 API. contact_email: hello@griptape.ai legal_info_url: https://www.griptape.ai/legal \ No newline at end of file diff --git a/griptape/tools/aws_s3_client/tool.py b/griptape/tools/aws_s3/tool.py similarity index 99% rename from griptape/tools/aws_s3_client/tool.py rename to griptape/tools/aws_s3/tool.py index 8f67195a1..24d091d71 100644 --- a/griptape/tools/aws_s3_client/tool.py +++ b/griptape/tools/aws_s3/tool.py @@ -7,7 +7,7 @@ from schema import Literal, Schema from griptape.artifacts import BlobArtifact, ErrorArtifact, InfoArtifact, ListArtifact, TextArtifact -from griptape.tools import BaseAwsClient +from griptape.tools import BaseAwsTool from griptape.utils.decorators import activity if TYPE_CHECKING: @@ -15,7 +15,7 @@ @define -class AwsS3Client(BaseAwsClient): +class AwsS3Tool(BaseAwsTool): s3_client: Client = field(default=Factory(lambda self: self.session.client("s3"), takes_self=True), kw_only=True) @activity( diff --git a/griptape/tools/base_aws_client.py b/griptape/tools/base_aws_tool.py similarity index 95% rename from griptape/tools/base_aws_client.py rename to griptape/tools/base_aws_tool.py index 8c6d02e2b..72fc54583 100644 --- a/griptape/tools/base_aws_client.py +++ b/griptape/tools/base_aws_tool.py @@ -14,7 +14,7 @@ @define -class BaseAwsClient(BaseTool, ABC): +class BaseAwsTool(BaseTool, ABC): session: boto3.Session = field(kw_only=True) @activity(config={"description": "Can be used to get current AWS account and IAM principal."}) diff --git a/griptape/tools/base_google_client.py b/griptape/tools/base_google_tool.py similarity index 98% rename from griptape/tools/base_google_client.py rename to griptape/tools/base_google_tool.py index 2a38d8ffe..c40a583cf 100644 --- a/griptape/tools/base_google_client.py +++ b/griptape/tools/base_google_tool.py @@ -9,7 +9,7 @@ @define -class BaseGoogleClient(BaseTool, ABC): +class BaseGoogleTool(BaseTool, ABC): DRIVE_FILE_SCOPES = ["https://www.googleapis.com/auth/drive.file"] DRIVE_AUTH_SCOPES = ["https://www.googleapis.com/auth/drive"] diff --git a/griptape/tools/base_griptape_cloud_client.py b/griptape/tools/base_griptape_cloud_tool.py similarity index 93% rename from griptape/tools/base_griptape_cloud_client.py rename to griptape/tools/base_griptape_cloud_tool.py index 4f5692957..7ee8f2dfc 100644 --- a/griptape/tools/base_griptape_cloud_client.py +++ b/griptape/tools/base_griptape_cloud_tool.py @@ -8,7 +8,7 @@ @define -class BaseGriptapeCloudClient(BaseTool, ABC): +class BaseGriptapeCloudTool(BaseTool, ABC): """Base class for Griptape Cloud clients. Attributes: diff --git a/griptape/tools/base_image_generation_client.py b/griptape/tools/base_image_generation_tool.py similarity index 88% rename from griptape/tools/base_image_generation_client.py rename to griptape/tools/base_image_generation_tool.py index e85336d23..487c6d1ba 100644 --- a/griptape/tools/base_image_generation_client.py +++ b/griptape/tools/base_image_generation_tool.py @@ -5,7 +5,7 @@ @define -class BaseImageGenerationClient(BlobArtifactFileOutputMixin, BaseTool): +class BaseImageGenerationTool(BlobArtifactFileOutputMixin, BaseTool): """A base class for tools that generate images from text prompts.""" PROMPT_DESCRIPTION = "Features and qualities to include in the generated image, descriptive and succinct." diff --git a/griptape/tools/base_tool.py b/griptape/tools/base_tool.py index bfb754bc0..7c6785649 100644 --- a/griptape/tools/base_tool.py +++ b/griptape/tools/base_tool.py @@ -7,8 +7,9 @@ import subprocess import sys from abc import ABC -from typing import TYPE_CHECKING, Callable, Optional +from typing import TYPE_CHECKING, Any, Callable, Optional +import schema import yaml from attrs import Attribute, Factory, define, field from schema import Literal, Or, Schema @@ -95,18 +96,25 @@ def schema(self) -> dict: return full_schema.json_schema(f"{self.name} ToolAction Schema") def activity_schemas(self) -> list[Schema]: - return [ - Schema( - { - Literal("name"): self.name, - Literal("path", description=self.activity_description(activity)): self.activity_name(activity), - **self.activity_to_input( - activity, - ), # Unpack the dictionary in order to only add the key-values if there are any - }, - ) - for activity in self.activities() - ] + schemas = [] + + for activity in self.activities(): + schema_dict: dict[Literal | schema.Optional, Any] = { + Literal("name"): self.name, + Literal("path", description=self.activity_description(activity)): self.activity_name(activity), + } + + activity_schema = self.activity_schema(activity) + # If no schema is defined, we just make `input` optional instead of omitting it. + # This works better with lower-end models that may accidentally pass in an empty dict. + if activity_schema is None: + schema_dict[schema.Optional("input")] = {} + else: + schema_dict[Literal("input")] = activity_schema.schema + + schemas.append(Schema(schema_dict)) + + return schemas def execute(self, activity: Callable, subtask: ActionsSubtask, action: ToolAction) -> BaseArtifact: try: diff --git a/griptape/tools/calculator/manifest.yml b/griptape/tools/calculator/manifest.yml index 717313495..dd902c616 100644 --- a/griptape/tools/calculator/manifest.yml +++ b/griptape/tools/calculator/manifest.yml @@ -1,5 +1,5 @@ version: "v1" -name: Calculator +name: Calculator Tool description: Tool for making simple calculations in Python. contact_email: hello@griptape.ai -legal_info_url: https://www.griptape.ai/legal \ No newline at end of file +legal_info_url: https://www.griptape.ai/legal diff --git a/griptape/tools/calculator/tool.py b/griptape/tools/calculator/tool.py index e8fcb1ed4..ed128987f 100644 --- a/griptape/tools/calculator/tool.py +++ b/griptape/tools/calculator/tool.py @@ -5,7 +5,7 @@ from griptape.utils.decorators import activity -class Calculator(BaseTool): +class CalculatorTool(BaseTool): @activity( config={ "description": "Can be used for computing simple numerical or algebraic calculations in Python", diff --git a/griptape/tools/computer/manifest.yml b/griptape/tools/computer/manifest.yml index 706c32b5b..4c5d30495 100644 --- a/griptape/tools/computer/manifest.yml +++ b/griptape/tools/computer/manifest.yml @@ -1,5 +1,5 @@ version: "v1" -name: Computer +name: Computer Tool description: Tool that allows LLMs to run Python code and access the shell contact_email: hello@griptape.ai -legal_info_url: https://www.griptape.ai/legal \ No newline at end of file +legal_info_url: https://www.griptape.ai/legal diff --git a/griptape/tools/computer/tool.py b/griptape/tools/computer/tool.py index 4e996c63c..e2da2d9f8 100644 --- a/griptape/tools/computer/tool.py +++ b/griptape/tools/computer/tool.py @@ -23,7 +23,7 @@ @define -class Computer(BaseTool): +class ComputerTool(BaseTool): local_workdir: Optional[str] = field(default=None, kw_only=True) container_workdir: str = field(default="/griptape", kw_only=True) env_vars: dict = field(factory=dict, kw_only=True) diff --git a/griptape/tools/date_time/manifest.yml b/griptape/tools/date_time/manifest.yml index c50b46ed8..da8e553a5 100644 --- a/griptape/tools/date_time/manifest.yml +++ b/griptape/tools/date_time/manifest.yml @@ -1,5 +1,5 @@ version: "v1" -name: Date Time +name: Date Time Tool description: Tool that allows LLMs to retrieve the current date & time contact_email: hello@griptape.ai -legal_info_url: https://www.griptape.ai/legal \ No newline at end of file +legal_info_url: https://www.griptape.ai/legal diff --git a/griptape/tools/date_time/tool.py b/griptape/tools/date_time/tool.py index 728a3449a..5181dbe3e 100644 --- a/griptape/tools/date_time/tool.py +++ b/griptape/tools/date_time/tool.py @@ -7,7 +7,7 @@ from griptape.utils.decorators import activity -class DateTime(BaseTool): +class DateTimeTool(BaseTool): @activity(config={"description": "Can be used to return current date and time."}) def get_current_datetime(self, _: dict) -> BaseArtifact: try: diff --git a/griptape/tools/outpainting_image_generation_client/__init__.py b/griptape/tools/email/__init__.py similarity index 100% rename from griptape/tools/outpainting_image_generation_client/__init__.py rename to griptape/tools/email/__init__.py diff --git a/griptape/tools/email_client/manifest.yml b/griptape/tools/email/manifest.yml similarity index 87% rename from griptape/tools/email_client/manifest.yml rename to griptape/tools/email/manifest.yml index c1e04b226..08009292d 100644 --- a/griptape/tools/email_client/manifest.yml +++ b/griptape/tools/email/manifest.yml @@ -1,5 +1,5 @@ version: "v1" -name: Email Client +name: Email Tool description: Tool for working with email. contact_email: hello@griptape.ai legal_info_url: https://www.griptape.ai/legal \ No newline at end of file diff --git a/griptape/tools/email_client/tool.py b/griptape/tools/email/tool.py similarity index 99% rename from griptape/tools/email_client/tool.py rename to griptape/tools/email/tool.py index 26c13bb9f..f5e7f0247 100644 --- a/griptape/tools/email_client/tool.py +++ b/griptape/tools/email/tool.py @@ -16,7 +16,7 @@ @define -class EmailClient(BaseTool): +class EmailTool(BaseTool): """Tool for working with email. Attributes: diff --git a/griptape/tools/prompt_image_generation_client/__init__.py b/griptape/tools/extraction/__init__.py similarity index 100% rename from griptape/tools/prompt_image_generation_client/__init__.py rename to griptape/tools/extraction/__init__.py diff --git a/griptape/tools/extraction/manifest.yml b/griptape/tools/extraction/manifest.yml new file mode 100644 index 000000000..9c489d9f6 --- /dev/null +++ b/griptape/tools/extraction/manifest.yml @@ -0,0 +1,5 @@ +version: "v1" +name: Extraction Client +description: Tool for performing structured extractions on unstructured data. +contact_email: hello@griptape.ai +legal_info_url: https://www.griptape.ai/legal diff --git a/griptape/tools/inpainting_image_generation_client/requirements.txt b/griptape/tools/extraction/requirements.txt similarity index 100% rename from griptape/tools/inpainting_image_generation_client/requirements.txt rename to griptape/tools/extraction/requirements.txt diff --git a/griptape/tools/extraction/tool.py b/griptape/tools/extraction/tool.py new file mode 100644 index 000000000..1f6d06b80 --- /dev/null +++ b/griptape/tools/extraction/tool.py @@ -0,0 +1,60 @@ +from __future__ import annotations + +from typing import TYPE_CHECKING + +from attrs import define, field +from schema import Literal, Or, Schema + +from griptape.artifacts import ErrorArtifact, ListArtifact, TextArtifact +from griptape.mixins import RuleMixin +from griptape.tools import BaseTool +from griptape.utils.decorators import activity + +if TYPE_CHECKING: + from griptape.artifacts import InfoArtifact + from griptape.engines import BaseExtractionEngine + + +@define(kw_only=True) +class ExtractionTool(BaseTool, RuleMixin): + """Tool for using an Extraction Engine. + + Attributes: + extraction_engine: `ExtractionEngine`. + """ + + extraction_engine: BaseExtractionEngine = field() + + @activity( + config={ + "description": "Can be used extract structured text from data.", + "schema": Schema( + { + Literal("data"): Or( + str, + Schema( + { + "memory_name": str, + "artifact_namespace": str, + } + ), + ), + } + ), + }, + ) + def extract(self, params: dict) -> ListArtifact | InfoArtifact | ErrorArtifact: + data = params["values"]["data"] + + if isinstance(data, str): + artifacts = ListArtifact([TextArtifact(data)]) + else: + memory = self.find_input_memory(data["memory_name"]) + artifact_namespace = data["artifact_namespace"] + + if memory is not None: + artifacts = memory.load_artifacts(artifact_namespace) + else: + return ErrorArtifact("memory not found") + + return self.extraction_engine.extract(artifacts) diff --git a/griptape/tools/file_manager/manifest.yml b/griptape/tools/file_manager/manifest.yml index 8778098fb..132a03327 100644 --- a/griptape/tools/file_manager/manifest.yml +++ b/griptape/tools/file_manager/manifest.yml @@ -1,5 +1,5 @@ version: "v1" -name: File Manager +name: File Manager Tool description: Tool for managing files in the local environment. contact_email: hello@griptape.ai -legal_info_url: https://www.griptape.ai/legal \ No newline at end of file +legal_info_url: https://www.griptape.ai/legal diff --git a/griptape/tools/file_manager/tool.py b/griptape/tools/file_manager/tool.py index c0cb691ea..ece6a0e92 100644 --- a/griptape/tools/file_manager/tool.py +++ b/griptape/tools/file_manager/tool.py @@ -12,8 +12,8 @@ @define -class FileManager(BaseTool): - """FileManager is a tool that can be used to list, load, and save files. +class FileManagerTool(BaseTool): + """FileManagerTool is a tool that can be used to list, load, and save files. Attributes: file_manager_driver: File Manager Driver to use to list, load, and save files. @@ -41,7 +41,7 @@ def list_files_from_disk(self, params: dict) -> TextArtifact | ErrorArtifact: Literal( "paths", description="Relative paths to files to be loaded in the POSIX format. For example, ['foo/bar/file.txt']", - ): list[str], + ): Schema([str]), }, ), }, diff --git a/griptape/tools/rag_client/__init__.py b/griptape/tools/google_calendar/__init__.py similarity index 100% rename from griptape/tools/rag_client/__init__.py rename to griptape/tools/google_calendar/__init__.py diff --git a/griptape/tools/google_cal/manifest.yml b/griptape/tools/google_calendar/manifest.yml similarity index 100% rename from griptape/tools/google_cal/manifest.yml rename to griptape/tools/google_calendar/manifest.yml diff --git a/griptape/tools/google_cal/requirements.txt b/griptape/tools/google_calendar/requirements.txt similarity index 100% rename from griptape/tools/google_cal/requirements.txt rename to griptape/tools/google_calendar/requirements.txt diff --git a/griptape/tools/google_cal/tool.py b/griptape/tools/google_calendar/tool.py similarity index 98% rename from griptape/tools/google_cal/tool.py rename to griptape/tools/google_calendar/tool.py index 70f685605..de9c4e8e1 100644 --- a/griptape/tools/google_cal/tool.py +++ b/griptape/tools/google_calendar/tool.py @@ -7,12 +7,12 @@ from schema import Literal, Optional, Schema from griptape.artifacts import ErrorArtifact, InfoArtifact, ListArtifact, TextArtifact -from griptape.tools import BaseGoogleClient +from griptape.tools import BaseGoogleTool from griptape.utils.decorators import activity @define -class GoogleCalendarClient(BaseGoogleClient): +class GoogleCalendarTool(BaseGoogleTool): CREATE_EVENT_SCOPES = ["https://www.googleapis.com/auth/calendar"] GET_UPCOMING_EVENTS_SCOPES = ["https://www.googleapis.com/auth/calendar"] diff --git a/griptape/tools/google_docs/tool.py b/griptape/tools/google_docs/tool.py index b3564b9b2..be40b09da 100644 --- a/griptape/tools/google_docs/tool.py +++ b/griptape/tools/google_docs/tool.py @@ -6,12 +6,12 @@ from schema import Literal, Optional, Schema from griptape.artifacts import ErrorArtifact, InfoArtifact -from griptape.tools import BaseGoogleClient +from griptape.tools import BaseGoogleTool from griptape.utils.decorators import activity @define -class GoogleDocsClient(BaseGoogleClient): +class GoogleDocsTool(BaseGoogleTool): DOCS_SCOPES = ["https://www.googleapis.com/auth/documents"] DEFAULT_FOLDER_PATH = "root" diff --git a/griptape/tools/google_drive/tool.py b/griptape/tools/google_drive/tool.py index 37122a56b..1642ebaf7 100644 --- a/griptape/tools/google_drive/tool.py +++ b/griptape/tools/google_drive/tool.py @@ -9,12 +9,12 @@ from schema import Literal, Or, Schema from griptape.artifacts import BlobArtifact, ErrorArtifact, InfoArtifact, ListArtifact, TextArtifact -from griptape.tools import BaseGoogleClient +from griptape.tools import BaseGoogleTool from griptape.utils.decorators import activity @define -class GoogleDriveClient(BaseGoogleClient): +class GoogleDriveTool(BaseGoogleTool): LIST_FILES_SCOPES = ["https://www.googleapis.com/auth/drive.readonly"] GOOGLE_EXPORT_MIME_MAPPING = { diff --git a/griptape/tools/google_gmail/manifest.yml b/griptape/tools/google_gmail/manifest.yml index 262e3a6f8..869575166 100644 --- a/griptape/tools/google_gmail/manifest.yml +++ b/griptape/tools/google_gmail/manifest.yml @@ -1,5 +1,5 @@ version: "v1" -name: Google Gmail Client +name: Google Gmail Tool description: Tool for working with Google Gmail. contact_email: hello@griptape.ai legal_info_url: https://www.griptape.ai/legal \ No newline at end of file diff --git a/griptape/tools/google_gmail/tool.py b/griptape/tools/google_gmail/tool.py index 853b8850f..2cc959168 100644 --- a/griptape/tools/google_gmail/tool.py +++ b/griptape/tools/google_gmail/tool.py @@ -8,12 +8,12 @@ from schema import Literal, Schema from griptape.artifacts import ErrorArtifact, InfoArtifact -from griptape.tools import BaseGoogleClient +from griptape.tools import BaseGoogleTool from griptape.utils.decorators import activity @define -class GoogleGmailClient(BaseGoogleClient): +class GoogleGmailTool(BaseGoogleTool): CREATE_DRAFT_EMAIL_SCOPES = ["https://www.googleapis.com/auth/gmail.compose"] owner_email: str = field(kw_only=True) diff --git a/griptape/tools/rest_api_client/__init__.py b/griptape/tools/griptape_cloud_knowledge_base/__init__.py similarity index 100% rename from griptape/tools/rest_api_client/__init__.py rename to griptape/tools/griptape_cloud_knowledge_base/__init__.py diff --git a/griptape/tools/griptape_cloud_knowledge_base_client/manifest.yml b/griptape/tools/griptape_cloud_knowledge_base/manifest.yml similarity index 78% rename from griptape/tools/griptape_cloud_knowledge_base_client/manifest.yml rename to griptape/tools/griptape_cloud_knowledge_base/manifest.yml index 89b7d2fe3..7262964c3 100644 --- a/griptape/tools/griptape_cloud_knowledge_base_client/manifest.yml +++ b/griptape/tools/griptape_cloud_knowledge_base/manifest.yml @@ -1,5 +1,5 @@ version: "v1" -name: Griptape Cloud Knowledge Base Client +name: Griptape Cloud Knowledge Base Tool description: Tool for using the Griptape Cloud Knowledge Base API. contact_email: hello@griptape.ai legal_info_url: https://www.griptape.ai/legal diff --git a/griptape/tools/griptape_cloud_knowledge_base_client/tool.py b/griptape/tools/griptape_cloud_knowledge_base/tool.py similarity index 91% rename from griptape/tools/griptape_cloud_knowledge_base_client/tool.py rename to griptape/tools/griptape_cloud_knowledge_base/tool.py index 0c544524d..13ff76baa 100644 --- a/griptape/tools/griptape_cloud_knowledge_base_client/tool.py +++ b/griptape/tools/griptape_cloud_knowledge_base/tool.py @@ -7,12 +7,12 @@ from schema import Literal, Schema from griptape.artifacts import ErrorArtifact, TextArtifact -from griptape.tools.base_griptape_cloud_client import BaseGriptapeCloudClient +from griptape.tools.base_griptape_cloud_tool import BaseGriptapeCloudTool from griptape.utils.decorators import activity @define -class GriptapeCloudKnowledgeBaseClient(BaseGriptapeCloudClient): +class GriptapeCloudKnowledgeBaseTool(BaseGriptapeCloudTool): """Tool for querying a Griptape Cloud Knowledge Base. Attributes: @@ -64,7 +64,7 @@ def _get_knowledge_base_description(self) -> str: return response_body["description"] else: raise ValueError( - f"No description found for Knowledge Base {self.knowledge_base_id}. Please set a description, or manually set the `GriptapeCloudKnowledgeBaseClient.description` attribute.", + f"No description found for Knowledge Base {self.knowledge_base_id}. Please set a description, or manually set the `GriptapeCloudKnowledgeBaseTool.description` attribute.", ) else: raise ValueError(f"Error accessing Knowledge Base {self.knowledge_base_id}.") diff --git a/griptape/tools/sql_client/__init__.py b/griptape/tools/image_query/__init__.py similarity index 100% rename from griptape/tools/sql_client/__init__.py rename to griptape/tools/image_query/__init__.py diff --git a/griptape/tools/image_query_client/manifest.yml b/griptape/tools/image_query/manifest.yml similarity index 86% rename from griptape/tools/image_query_client/manifest.yml rename to griptape/tools/image_query/manifest.yml index b73027f6a..504543fca 100644 --- a/griptape/tools/image_query_client/manifest.yml +++ b/griptape/tools/image_query/manifest.yml @@ -1,5 +1,5 @@ version: "v1" -name: Image Query Client +name: Image Query Tool description: Tool for executing a natural language query on images. contact_email: hello@griptape.ai legal_info_url: https://www.griptape.ai/legal diff --git a/griptape/tools/image_query_client/tool.py b/griptape/tools/image_query/tool.py similarity index 97% rename from griptape/tools/image_query_client/tool.py rename to griptape/tools/image_query/tool.py index a10929b13..9d1dbb89b 100644 --- a/griptape/tools/image_query_client/tool.py +++ b/griptape/tools/image_query/tool.py @@ -17,7 +17,7 @@ @define -class ImageQueryClient(BaseTool): +class ImageQueryTool(BaseTool): image_query_engine: ImageQueryEngine = field(kw_only=True) image_loader: ImageLoader = field(default=Factory(lambda: ImageLoader()), kw_only=True) @@ -30,7 +30,7 @@ class ImageQueryClient(BaseTool): "query", description="A detailed question to be answered using the contents of the provided images.", ): str, - Literal("image_paths", description="The paths to an image files on disk."): list[str], + Literal("image_paths", description="The paths to an image files on disk."): Schema([str]), }, ), }, diff --git a/griptape/tools/structure_run_client/__init__.py b/griptape/tools/inpainting_image_generation/__init__.py similarity index 100% rename from griptape/tools/structure_run_client/__init__.py rename to griptape/tools/inpainting_image_generation/__init__.py diff --git a/griptape/tools/inpainting_image_generation_client/manifest.yml b/griptape/tools/inpainting_image_generation/manifest.yml similarity index 79% rename from griptape/tools/inpainting_image_generation_client/manifest.yml rename to griptape/tools/inpainting_image_generation/manifest.yml index 575c0630d..d6592b741 100644 --- a/griptape/tools/inpainting_image_generation_client/manifest.yml +++ b/griptape/tools/inpainting_image_generation/manifest.yml @@ -1,5 +1,5 @@ version: "v1" -name: Inpainting Image Generation Client +name: Inpainting Image Generation Tool description: Tool for generating images through image inpainting. contact_email: hello@griptape.ai legal_info_url: https://www.griptape.ai/legal diff --git a/griptape/tools/outpainting_image_generation_client/requirements.txt b/griptape/tools/inpainting_image_generation/requirements.txt similarity index 100% rename from griptape/tools/outpainting_image_generation_client/requirements.txt rename to griptape/tools/inpainting_image_generation/requirements.txt diff --git a/griptape/tools/inpainting_image_generation_client/tool.py b/griptape/tools/inpainting_image_generation/tool.py similarity index 93% rename from griptape/tools/inpainting_image_generation_client/tool.py rename to griptape/tools/inpainting_image_generation/tool.py index e8979efb0..d32f481d9 100644 --- a/griptape/tools/inpainting_image_generation_client/tool.py +++ b/griptape/tools/inpainting_image_generation/tool.py @@ -8,7 +8,7 @@ from griptape.artifacts import ErrorArtifact, ImageArtifact from griptape.loaders import ImageLoader -from griptape.tools.base_image_generation_client import BaseImageGenerationClient +from griptape.tools.base_image_generation_tool import BaseImageGenerationTool from griptape.utils.decorators import activity from griptape.utils.load_artifact_from_memory import load_artifact_from_memory @@ -17,7 +17,7 @@ @define -class InpaintingImageGenerationClient(BaseImageGenerationClient): +class InpaintingImageGenerationTool(BaseImageGenerationTool): """A tool that can be used to generate prompted inpaintings of an image. Attributes: @@ -34,8 +34,8 @@ class InpaintingImageGenerationClient(BaseImageGenerationClient): "description": "Modifies an image within a specified mask area using image and mask files.", "schema": Schema( { - Literal("prompt", description=BaseImageGenerationClient.PROMPT_DESCRIPTION): str, - Literal("negative_prompt", description=BaseImageGenerationClient.NEGATIVE_PROMPT_DESCRIPTION): str, + Literal("prompt", description=BaseImageGenerationTool.PROMPT_DESCRIPTION): str, + Literal("negative_prompt", description=BaseImageGenerationTool.NEGATIVE_PROMPT_DESCRIPTION): str, Literal( "image_file", description="The path to an image file to be used as a base to generate variations from.", @@ -63,8 +63,8 @@ def image_inpainting_from_file(self, params: dict[str, dict[str, str]]) -> Image "description": "Modifies an image within a specified mask area using image and mask artifacts in memory.", "schema": Schema( { - Literal("prompt", description=BaseImageGenerationClient.PROMPT_DESCRIPTION): str, - Literal("negative_prompt", description=BaseImageGenerationClient.NEGATIVE_PROMPT_DESCRIPTION): str, + Literal("prompt", description=BaseImageGenerationTool.PROMPT_DESCRIPTION): str, + Literal("negative_prompt", description=BaseImageGenerationTool.NEGATIVE_PROMPT_DESCRIPTION): str, "memory_name": str, "image_artifact_namespace": str, "image_artifact_name": str, diff --git a/griptape/tools/task_memory_client/__init__.py b/griptape/tools/openweather/__init__.py similarity index 100% rename from griptape/tools/task_memory_client/__init__.py rename to griptape/tools/openweather/__init__.py diff --git a/griptape/tools/openweather_client/manifest.yml b/griptape/tools/openweather/manifest.yml similarity index 86% rename from griptape/tools/openweather_client/manifest.yml rename to griptape/tools/openweather/manifest.yml index 66efae262..315143ea2 100644 --- a/griptape/tools/openweather_client/manifest.yml +++ b/griptape/tools/openweather/manifest.yml @@ -1,5 +1,5 @@ version: "v1" -name: OpenWeather Client +name: OpenWeather Tool description: Tool for using OpenWeather to retrieve weather information contact_email: hello@griptape.ai legal_info_url: https://www.griptape.ai/legal diff --git a/griptape/tools/openweather_client/tool.py b/griptape/tools/openweather/tool.py similarity index 99% rename from griptape/tools/openweather_client/tool.py rename to griptape/tools/openweather/tool.py index 4a7edb0f6..311db733b 100644 --- a/griptape/tools/openweather_client/tool.py +++ b/griptape/tools/openweather/tool.py @@ -13,7 +13,7 @@ @define -class OpenWeatherClient(BaseTool): +class OpenWeatherTool(BaseTool): BASE_URL = "https://api.openweathermap.org/data/3.0/onecall" GEOCODING_URL = "https://api.openweathermap.org/geo/1.0/direct" US_STATE_CODES = [ diff --git a/griptape/tools/text_to_speech_client/__init__.py b/griptape/tools/outpainting_image_generation/__init__.py similarity index 100% rename from griptape/tools/text_to_speech_client/__init__.py rename to griptape/tools/outpainting_image_generation/__init__.py diff --git a/griptape/tools/outpainting_image_generation_client/manifest.yml b/griptape/tools/outpainting_image_generation/manifest.yml similarity index 79% rename from griptape/tools/outpainting_image_generation_client/manifest.yml rename to griptape/tools/outpainting_image_generation/manifest.yml index 54c84668e..8b7ca14a1 100644 --- a/griptape/tools/outpainting_image_generation_client/manifest.yml +++ b/griptape/tools/outpainting_image_generation/manifest.yml @@ -1,5 +1,5 @@ version: "v1" -name: Outpainting Image Generation Client +name: Outpainting Image Generation Tool description: Tool for generating images through image outpainting. contact_email: hello@griptape.ai legal_info_url: https://www.griptape.ai/legal diff --git a/griptape/tools/prompt_image_generation_client/requirements.txt b/griptape/tools/outpainting_image_generation/requirements.txt similarity index 100% rename from griptape/tools/prompt_image_generation_client/requirements.txt rename to griptape/tools/outpainting_image_generation/requirements.txt diff --git a/griptape/tools/outpainting_image_generation_client/tool.py b/griptape/tools/outpainting_image_generation/tool.py similarity index 93% rename from griptape/tools/outpainting_image_generation_client/tool.py rename to griptape/tools/outpainting_image_generation/tool.py index 800d88e70..afa39e178 100644 --- a/griptape/tools/outpainting_image_generation_client/tool.py +++ b/griptape/tools/outpainting_image_generation/tool.py @@ -8,7 +8,7 @@ from griptape.artifacts import ErrorArtifact, ImageArtifact from griptape.loaders import ImageLoader -from griptape.tools import BaseImageGenerationClient +from griptape.tools import BaseImageGenerationTool from griptape.utils.decorators import activity from griptape.utils.load_artifact_from_memory import load_artifact_from_memory @@ -17,7 +17,7 @@ @define -class OutpaintingImageGenerationClient(BaseImageGenerationClient): +class OutpaintingImageGenerationTool(BaseImageGenerationTool): """A tool that can be used to generate prompted outpaintings of an image. Attributes: @@ -34,8 +34,8 @@ class OutpaintingImageGenerationClient(BaseImageGenerationClient): "description": "Modifies an image outside a specified mask area using image and mask files.", "schema": Schema( { - Literal("prompt", description=BaseImageGenerationClient.PROMPT_DESCRIPTION): str, - Literal("negative_prompt", description=BaseImageGenerationClient.NEGATIVE_PROMPT_DESCRIPTION): str, + Literal("prompt", description=BaseImageGenerationTool.PROMPT_DESCRIPTION): str, + Literal("negative_prompt", description=BaseImageGenerationTool.NEGATIVE_PROMPT_DESCRIPTION): str, Literal( "image_file", description="The path to an image file to be used as a base to generate variations from.", @@ -61,8 +61,8 @@ def image_outpainting_from_file(self, params: dict[str, dict[str, str]]) -> Imag "description": "Modifies an image outside a specified mask area using image and mask artifacts in memory.", "schema": Schema( { - Literal("prompt", description=BaseImageGenerationClient.PROMPT_DESCRIPTION): str, - Literal("negative_prompt", description=BaseImageGenerationClient.NEGATIVE_PROMPT_DESCRIPTION): str, + Literal("prompt", description=BaseImageGenerationTool.PROMPT_DESCRIPTION): str, + Literal("negative_prompt", description=BaseImageGenerationTool.NEGATIVE_PROMPT_DESCRIPTION): str, "memory_name": str, "image_artifact_namespace": str, "mask_artifact_namespace": str, diff --git a/griptape/tools/variation_image_generation_client/__init__.py b/griptape/tools/prompt_image_generation/__init__.py similarity index 100% rename from griptape/tools/variation_image_generation_client/__init__.py rename to griptape/tools/prompt_image_generation/__init__.py diff --git a/griptape/tools/prompt_image_generation_client/manifest.yml b/griptape/tools/prompt_image_generation/manifest.yml similarity index 80% rename from griptape/tools/prompt_image_generation_client/manifest.yml rename to griptape/tools/prompt_image_generation/manifest.yml index 665a24444..091cc14d7 100644 --- a/griptape/tools/prompt_image_generation_client/manifest.yml +++ b/griptape/tools/prompt_image_generation/manifest.yml @@ -1,5 +1,5 @@ version: "v1" -name: Prompt Image Generation Client +name: Prompt Image Generation Tool description: Tool for generating images from text prompts. contact_email: hello@griptape.ai legal_info_url: https://www.griptape.ai/legal diff --git a/griptape/tools/rag_client/requirements.txt b/griptape/tools/prompt_image_generation/requirements.txt similarity index 100% rename from griptape/tools/rag_client/requirements.txt rename to griptape/tools/prompt_image_generation/requirements.txt diff --git a/griptape/tools/prompt_image_generation_client/tool.py b/griptape/tools/prompt_image_generation/tool.py similarity index 87% rename from griptape/tools/prompt_image_generation_client/tool.py rename to griptape/tools/prompt_image_generation/tool.py index 771b4e41d..6cd6ac560 100644 --- a/griptape/tools/prompt_image_generation_client/tool.py +++ b/griptape/tools/prompt_image_generation/tool.py @@ -5,7 +5,7 @@ from attrs import define, field from schema import Literal, Schema -from griptape.tools import BaseImageGenerationClient +from griptape.tools import BaseImageGenerationTool from griptape.utils.decorators import activity if TYPE_CHECKING: @@ -14,7 +14,7 @@ @define -class PromptImageGenerationClient(BaseImageGenerationClient): +class PromptImageGenerationTool(BaseImageGenerationTool): """A tool that can be used to generate an image from a text prompt. Attributes: @@ -30,8 +30,8 @@ class PromptImageGenerationClient(BaseImageGenerationClient): "description": "Generates an image from text prompts.", "schema": Schema( { - Literal("prompt", description=BaseImageGenerationClient.PROMPT_DESCRIPTION): str, - Literal("negative_prompt", description=BaseImageGenerationClient.NEGATIVE_PROMPT_DESCRIPTION): str, + Literal("prompt", description=BaseImageGenerationTool.PROMPT_DESCRIPTION): str, + Literal("negative_prompt", description=BaseImageGenerationTool.NEGATIVE_PROMPT_DESCRIPTION): str, } ), }, diff --git a/griptape/tools/vector_store_client/__init__.py b/griptape/tools/prompt_summary/__init__.py similarity index 100% rename from griptape/tools/vector_store_client/__init__.py rename to griptape/tools/prompt_summary/__init__.py diff --git a/griptape/tools/prompt_summary/manifest.yml b/griptape/tools/prompt_summary/manifest.yml new file mode 100644 index 000000000..a83ea4021 --- /dev/null +++ b/griptape/tools/prompt_summary/manifest.yml @@ -0,0 +1,5 @@ +version: "v1" +name: Prompt Summary Client +description: Tool for using a Prompt Summary Engine +contact_email: hello@griptape.ai +legal_info_url: https://www.griptape.ai/legal diff --git a/griptape/tools/variation_image_generation_client/requirements.txt b/griptape/tools/prompt_summary/requirements.txt similarity index 100% rename from griptape/tools/variation_image_generation_client/requirements.txt rename to griptape/tools/prompt_summary/requirements.txt diff --git a/griptape/tools/prompt_summary/tool.py b/griptape/tools/prompt_summary/tool.py new file mode 100644 index 000000000..517507380 --- /dev/null +++ b/griptape/tools/prompt_summary/tool.py @@ -0,0 +1,55 @@ +from __future__ import annotations + +from attrs import Factory, define, field +from schema import Literal, Or, Schema + +from griptape.artifacts import BaseArtifact, ErrorArtifact, ListArtifact, TextArtifact +from griptape.engines import PromptSummaryEngine +from griptape.mixins.rule_mixin import RuleMixin +from griptape.tools import BaseTool +from griptape.utils.decorators import activity + + +@define(kw_only=True) +class PromptSummaryTool(BaseTool, RuleMixin): + """Tool for using a Prompt Summary Engine. + + Attributes: + prompt_summary_engine: `PromptSummaryEngine`. + """ + + prompt_summary_engine: PromptSummaryEngine = field(kw_only=True, default=Factory(lambda: PromptSummaryEngine())) + + @activity( + config={ + "description": "Can be used to summarize text content.", + "schema": Schema( + { + Literal("summary"): Or( + str, + Schema( + { + "memory_name": str, + "artifact_namespace": str, + } + ), + ), + } + ), + }, + ) + def summarize(self, params: dict) -> BaseArtifact: + summary = params["values"]["summary"] + + if isinstance(summary, str): + artifacts = ListArtifact([TextArtifact(summary)]) + else: + memory = self.find_input_memory(summary["memory_name"]) + artifact_namespace = summary["artifact_namespace"] + + if memory is not None: + artifacts = memory.load_artifacts(artifact_namespace) + else: + return ErrorArtifact("memory not found") + + return self.prompt_summary_engine.summarize_artifacts(artifacts, rulesets=self.all_rulesets) diff --git a/tests/unit/config/__init__.py b/griptape/tools/query/__init__.py similarity index 100% rename from tests/unit/config/__init__.py rename to griptape/tools/query/__init__.py diff --git a/griptape/tools/query/manifest.yml b/griptape/tools/query/manifest.yml new file mode 100644 index 000000000..086a86d5a --- /dev/null +++ b/griptape/tools/query/manifest.yml @@ -0,0 +1,5 @@ +version: "v1" +name: Query Client +description: Tool for performing a query against data. +contact_email: hello@griptape.ai +legal_info_url: https://www.griptape.ai/legal diff --git a/griptape/tools/vector_store_client/requirements.txt b/griptape/tools/query/requirements.txt similarity index 100% rename from griptape/tools/vector_store_client/requirements.txt rename to griptape/tools/query/requirements.txt diff --git a/griptape/tools/query/tool.py b/griptape/tools/query/tool.py new file mode 100644 index 000000000..0089970e9 --- /dev/null +++ b/griptape/tools/query/tool.py @@ -0,0 +1,85 @@ +from __future__ import annotations + +from typing import TYPE_CHECKING + +from attrs import Factory, define, field +from schema import Literal, Or, Schema + +from griptape.artifacts import BaseArtifact, ErrorArtifact, ListArtifact, TextArtifact +from griptape.configs import Defaults +from griptape.engines.rag import RagEngine +from griptape.engines.rag.modules import ( + PromptResponseRagModule, +) +from griptape.engines.rag.rag_context import RagContext +from griptape.engines.rag.stages import ResponseRagStage +from griptape.mixins.rule_mixin import RuleMixin +from griptape.tools.base_tool import BaseTool +from griptape.utils.decorators import activity + +if TYPE_CHECKING: + from griptape.drivers.prompt.base_prompt_driver import BasePromptDriver + + +@define(kw_only=True) +class QueryTool(BaseTool, RuleMixin): + """Tool for performing a query against data.""" + + prompt_driver: BasePromptDriver = field(default=Factory(lambda: Defaults.drivers_config.prompt_driver)) + + _rag_engine: RagEngine = field( + default=Factory( + lambda self: RagEngine( + response_stage=ResponseRagStage( + response_modules=[ + PromptResponseRagModule(prompt_driver=self.prompt_driver, rulesets=self.rulesets) + ], + ), + ), + takes_self=True, + ), + alias="_rag_engine", + ) + + @activity( + config={ + "description": "Can be used to search through textual content.", + "schema": Schema( + { + Literal("query", description="A natural language search query"): str, + Literal("content"): Or( + str, + Schema( + { + "memory_name": str, + "artifact_namespace": str, + } + ), + ), + } + ), + }, + ) + def query(self, params: dict) -> BaseArtifact: + query = params["values"]["query"] + content = params["values"]["content"] + + if isinstance(content, str): + text_artifacts = [TextArtifact(content)] + else: + memory = self.find_input_memory(content["memory_name"]) + artifact_namespace = content["artifact_namespace"] + + if memory is not None: + artifacts = memory.load_artifacts(artifact_namespace) + else: + return ErrorArtifact("memory not found") + + text_artifacts = [artifact for artifact in artifacts if isinstance(artifact, TextArtifact)] + + outputs = self._rag_engine.process(RagContext(query=query, text_chunks=text_artifacts)).outputs + + if len(outputs) > 0: + return ListArtifact(outputs) + else: + return ErrorArtifact("query output is empty") diff --git a/griptape/tools/rag/__init__.py b/griptape/tools/rag/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/griptape/tools/rag_client/manifest.yml b/griptape/tools/rag/manifest.yml similarity index 88% rename from griptape/tools/rag_client/manifest.yml rename to griptape/tools/rag/manifest.yml index 86998feb4..7a3d49c65 100644 --- a/griptape/tools/rag_client/manifest.yml +++ b/griptape/tools/rag/manifest.yml @@ -1,5 +1,5 @@ version: "v1" -name: RAG Client +name: RAG Tool description: Tool for querying RAG engines contact_email: hello@griptape.ai legal_info_url: https://www.griptape.ai/legal \ No newline at end of file diff --git a/griptape/tools/rag/requirements.txt b/griptape/tools/rag/requirements.txt new file mode 100644 index 000000000..e69de29bb diff --git a/griptape/tools/rag_client/tool.py b/griptape/tools/rag/tool.py similarity index 64% rename from griptape/tools/rag_client/tool.py rename to griptape/tools/rag/tool.py index bbdef8159..aab52e6c0 100644 --- a/griptape/tools/rag_client/tool.py +++ b/griptape/tools/rag/tool.py @@ -5,7 +5,7 @@ from attrs import define, field from schema import Literal, Schema -from griptape.artifacts import BaseArtifact, ErrorArtifact +from griptape.artifacts import ErrorArtifact, ListArtifact from griptape.tools import BaseTool from griptape.utils.decorators import activity @@ -14,7 +14,7 @@ @define(kw_only=True) -class RagClient(BaseTool): +class RagTool(BaseTool): """Tool for querying a RAG engine. Attributes: @@ -31,15 +31,23 @@ class RagClient(BaseTool): "schema": Schema({Literal("query", description="A natural language search query"): str}), }, ) - def search(self, params: dict) -> BaseArtifact: + def search(self, params: dict) -> ListArtifact | ErrorArtifact: query = params["values"]["query"] try: - result = self.rag_engine.process_query(query) + artifacts = self.rag_engine.process_query(query).outputs - if result.output is None: - return ErrorArtifact("query output is empty") + outputs = [] + for artifact in artifacts: + if isinstance(artifact, ListArtifact): + outputs.extend(artifact.value) + else: + outputs.append(artifact) + + if len(outputs) > 0: + return ListArtifact(outputs) else: - return result.output + return ErrorArtifact("query output is empty") + except Exception as e: return ErrorArtifact(f"error querying: {e}") diff --git a/griptape/tools/rest_api/__init__.py b/griptape/tools/rest_api/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/griptape/tools/rest_api_client/manifest.yml b/griptape/tools/rest_api/manifest.yml similarity index 87% rename from griptape/tools/rest_api_client/manifest.yml rename to griptape/tools/rest_api/manifest.yml index 7a881d037..01816e483 100644 --- a/griptape/tools/rest_api_client/manifest.yml +++ b/griptape/tools/rest_api/manifest.yml @@ -1,5 +1,5 @@ version: "v1" -name: Rest Api +name: Rest Api Tool description: Tool for calling rest apis. contact_email: hello@griptape.ai legal_info_url: https://www.griptape.ai/legal diff --git a/griptape/tools/rest_api_client/tool.py b/griptape/tools/rest_api/tool.py similarity index 97% rename from griptape/tools/rest_api_client/tool.py rename to griptape/tools/rest_api/tool.py index b27beda0e..f5e233e57 100644 --- a/griptape/tools/rest_api_client/tool.py +++ b/griptape/tools/rest_api/tool.py @@ -14,7 +14,7 @@ @define -class RestApiClient(BaseTool): +class RestApiTool(BaseTool): """A tool for making REST API requests. Attributes: @@ -83,7 +83,7 @@ def put(self, params: dict) -> BaseArtifact: ), "schema": Schema( { - Literal("path_params", description="The request path parameters."): list[str], + Literal("path_params", description="The request path parameters."): Schema([str]), Literal("body", description="The request body."): dict, }, ), @@ -148,7 +148,9 @@ def post(self, params: dict) -> BaseArtifact: Schema( { schema.Optional(Literal("query_params", description="The request query parameters.")): dict, - schema.Optional(Literal("path_params", description="The request path parameters.")): list[str], + schema.Optional(Literal("path_params", description="The request path parameters.")): Schema( + [str] + ), }, ), ), @@ -187,7 +189,7 @@ def get(self, params: dict) -> BaseArtifact: "schema": Schema( { schema.Optional(Literal("query_params", description="The request query parameters.")): dict, - schema.Optional(Literal("path_params", description="The request path parameters.")): list[str], + schema.Optional(Literal("path_params", description="The request path parameters.")): Schema([str]), }, ), }, diff --git a/griptape/tools/sql/__init__.py b/griptape/tools/sql/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/griptape/tools/sql_client/manifest.yml b/griptape/tools/sql/manifest.yml similarity index 88% rename from griptape/tools/sql_client/manifest.yml rename to griptape/tools/sql/manifest.yml index 22d0f4be2..2e1459a0d 100644 --- a/griptape/tools/sql_client/manifest.yml +++ b/griptape/tools/sql/manifest.yml @@ -1,5 +1,5 @@ version: "v1" -name: SQL Client +name: SQL Tool description: Tool for executing SQL queries. contact_email: hello@griptape.ai legal_info_url: https://www.griptape.ai/legal \ No newline at end of file diff --git a/griptape/tools/sql_client/tool.py b/griptape/tools/sql/tool.py similarity index 98% rename from griptape/tools/sql_client/tool.py rename to griptape/tools/sql/tool.py index 2de598c6b..a84bb87be 100644 --- a/griptape/tools/sql_client/tool.py +++ b/griptape/tools/sql/tool.py @@ -14,7 +14,7 @@ @define -class SqlClient(BaseTool): +class SqlTool(BaseTool): sql_loader: SqlLoader = field(kw_only=True) schema_name: Optional[str] = field(default=None, kw_only=True) table_name: str = field(kw_only=True) diff --git a/griptape/tools/structure_run/__init__.py b/griptape/tools/structure_run/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/griptape/tools/structure_run_client/manifest.yml b/griptape/tools/structure_run/manifest.yml similarity index 83% rename from griptape/tools/structure_run_client/manifest.yml rename to griptape/tools/structure_run/manifest.yml index 5f53158d8..b5feb835a 100644 --- a/griptape/tools/structure_run_client/manifest.yml +++ b/griptape/tools/structure_run/manifest.yml @@ -1,5 +1,5 @@ version: "v1" -name: Structure Run Client +name: Structure Run Tool description: Tool for running a Structure. contact_email: hello@griptape.ai legal_info_url: https://www.griptape.ai/legal diff --git a/griptape/tools/structure_run_client/tool.py b/griptape/tools/structure_run/tool.py similarity index 76% rename from griptape/tools/structure_run_client/tool.py rename to griptape/tools/structure_run/tool.py index f4f6c3786..317d5482e 100644 --- a/griptape/tools/structure_run_client/tool.py +++ b/griptape/tools/structure_run/tool.py @@ -14,7 +14,7 @@ @define -class StructureRunClient(BaseTool): +class StructureRunTool(BaseTool): """Tool for running a Structure. Attributes: @@ -27,9 +27,13 @@ class StructureRunClient(BaseTool): @activity( config={ - "description": "Can be used to run a Griptape Structure with the following description: {{ self.description }}", + "description": "Can be used to run a Griptape Structure with the following description: {{ _self.description }}", "schema": Schema( - {Literal("args", description="A list of string arguments to submit to the Structure Run"): list[str]}, + { + Literal("args", description="A list of string arguments to submit to the Structure Run"): Schema( + [str] + ) + }, ), }, ) diff --git a/griptape/tools/task_memory_client/manifest.yml b/griptape/tools/task_memory_client/manifest.yml deleted file mode 100644 index 0bff1af3d..000000000 --- a/griptape/tools/task_memory_client/manifest.yml +++ /dev/null @@ -1,5 +0,0 @@ -version: "v1" -name: Task Memory Client -description: Tool for summarizing and querying TaskMemory. -contact_email: hello@griptape.ai -legal_info_url: https://www.griptape.ai/legal \ No newline at end of file diff --git a/griptape/tools/task_memory_client/tool.py b/griptape/tools/task_memory_client/tool.py deleted file mode 100644 index 160a54d85..000000000 --- a/griptape/tools/task_memory_client/tool.py +++ /dev/null @@ -1,52 +0,0 @@ -from __future__ import annotations - -from attrs import define -from schema import Literal, Schema - -from griptape.artifacts import BaseArtifact, ErrorArtifact, InfoArtifact, TextArtifact -from griptape.tools import BaseTool -from griptape.utils.decorators import activity - - -@define -class TaskMemoryClient(BaseTool): - @activity( - config={ - "description": "Can be used to summarize memory content", - "schema": Schema({"memory_name": str, "artifact_namespace": str}), - }, - ) - def summarize(self, params: dict) -> TextArtifact | InfoArtifact | ErrorArtifact: - memory = self.find_input_memory(params["values"]["memory_name"]) - artifact_namespace = params["values"]["artifact_namespace"] - - if memory: - return memory.summarize_namespace(artifact_namespace) - else: - return ErrorArtifact("memory not found") - - @activity( - config={ - "description": "Can be used to search and query memory content", - "schema": Schema( - { - "memory_name": str, - "artifact_namespace": str, - Literal( - "query", - description="A natural language search query in the form of a question with enough " - "contextual information for another person to understand what the query is about", - ): str, - }, - ), - }, - ) - def query(self, params: dict) -> BaseArtifact: - memory = self.find_input_memory(params["values"]["memory_name"]) - artifact_namespace = params["values"]["artifact_namespace"] - query = params["values"]["query"] - - if memory: - return memory.query_namespace(namespace=artifact_namespace, query=query) - else: - return ErrorArtifact("memory not found") diff --git a/griptape/tools/text_to_speech/__init__.py b/griptape/tools/text_to_speech/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/griptape/tools/text_to_speech_client/manifest.yml b/griptape/tools/text_to_speech/manifest.yml similarity index 83% rename from griptape/tools/text_to_speech_client/manifest.yml rename to griptape/tools/text_to_speech/manifest.yml index 73062bb13..875e04576 100644 --- a/griptape/tools/text_to_speech_client/manifest.yml +++ b/griptape/tools/text_to_speech/manifest.yml @@ -1,5 +1,5 @@ version: "v1" -name: Text to Speech Client +name: Text to Speech Tool description: A tool for generating speech from text. contact_email: hello@griptape.ai legal_info_url: https://www.griptape.ai/legal diff --git a/griptape/tools/text_to_speech_client/tool.py b/griptape/tools/text_to_speech/tool.py similarity index 95% rename from griptape/tools/text_to_speech_client/tool.py rename to griptape/tools/text_to_speech/tool.py index 295641fd3..95a42d0ae 100644 --- a/griptape/tools/text_to_speech_client/tool.py +++ b/griptape/tools/text_to_speech/tool.py @@ -15,7 +15,7 @@ @define -class TextToSpeechClient(BlobArtifactFileOutputMixin, BaseTool): +class TextToSpeechTool(BlobArtifactFileOutputMixin, BaseTool): """A tool that can be used to generate speech from input text. Attributes: diff --git a/griptape/tools/variation_image_generation/__init__.py b/griptape/tools/variation_image_generation/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/griptape/tools/variation_image_generation_client/manifest.yml b/griptape/tools/variation_image_generation/manifest.yml similarity index 79% rename from griptape/tools/variation_image_generation_client/manifest.yml rename to griptape/tools/variation_image_generation/manifest.yml index eb9371016..1f3eb28e8 100644 --- a/griptape/tools/variation_image_generation_client/manifest.yml +++ b/griptape/tools/variation_image_generation/manifest.yml @@ -1,5 +1,5 @@ version: "v1" -name: Variation Image Generation Client +name: Variation Image Generation Tool description: Tool for generating variations of existing images. contact_email: hello@griptape.ai legal_info_url: https://www.griptape.ai/legal diff --git a/griptape/tools/variation_image_generation/requirements.txt b/griptape/tools/variation_image_generation/requirements.txt new file mode 100644 index 000000000..e69de29bb diff --git a/griptape/tools/variation_image_generation_client/tool.py b/griptape/tools/variation_image_generation/tool.py similarity index 91% rename from griptape/tools/variation_image_generation_client/tool.py rename to griptape/tools/variation_image_generation/tool.py index 5f836c5b1..9691f6206 100644 --- a/griptape/tools/variation_image_generation_client/tool.py +++ b/griptape/tools/variation_image_generation/tool.py @@ -8,7 +8,7 @@ from griptape.artifacts import ErrorArtifact, ImageArtifact from griptape.loaders import ImageLoader -from griptape.tools.base_image_generation_client import BaseImageGenerationClient +from griptape.tools.base_image_generation_tool import BaseImageGenerationTool from griptape.utils.decorators import activity from griptape.utils.load_artifact_from_memory import load_artifact_from_memory @@ -17,7 +17,7 @@ @define -class VariationImageGenerationClient(BaseImageGenerationClient): +class VariationImageGenerationTool(BaseImageGenerationTool): """A tool that can be used to generate prompted variations of an image. Attributes: @@ -34,8 +34,8 @@ class VariationImageGenerationClient(BaseImageGenerationClient): "description": "Generates a variation of a given input image file.", "schema": Schema( { - Literal("prompt", description=BaseImageGenerationClient.PROMPT_DESCRIPTION): str, - Literal("negative_prompt", description=BaseImageGenerationClient.NEGATIVE_PROMPT_DESCRIPTION): str, + Literal("prompt", description=BaseImageGenerationTool.PROMPT_DESCRIPTION): str, + Literal("negative_prompt", description=BaseImageGenerationTool.NEGATIVE_PROMPT_DESCRIPTION): str, Literal( "image_file", description="The path to an image file to be used as a base to generate variations from.", @@ -61,8 +61,8 @@ def image_variation_from_file(self, params: dict[str, dict[str, str]]) -> ImageA "description": "Generates a variation of a given input image artifact in memory.", "schema": Schema( { - Literal("prompt", description=BaseImageGenerationClient.PROMPT_DESCRIPTION): str, - Literal("negative_prompt", description=BaseImageGenerationClient.NEGATIVE_PROMPT_DESCRIPTION): str, + Literal("prompt", description=BaseImageGenerationTool.PROMPT_DESCRIPTION): str, + Literal("negative_prompt", description=BaseImageGenerationTool.NEGATIVE_PROMPT_DESCRIPTION): str, "memory_name": str, "artifact_namespace": str, "artifact_name": str, diff --git a/griptape/tools/vector_store/__init__.py b/griptape/tools/vector_store/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/griptape/tools/vector_store_client/manifest.yml b/griptape/tools/vector_store/manifest.yml similarity index 85% rename from griptape/tools/vector_store_client/manifest.yml rename to griptape/tools/vector_store/manifest.yml index a1a1d1d0c..d1fab7ce5 100644 --- a/griptape/tools/vector_store_client/manifest.yml +++ b/griptape/tools/vector_store/manifest.yml @@ -1,5 +1,5 @@ version: "v1" -name: Vector Store Client +name: Vector Store Tool description: Tool for storing and accessing data in vector stores contact_email: hello@griptape.ai legal_info_url: https://www.griptape.ai/legal \ No newline at end of file diff --git a/griptape/tools/vector_store/requirements.txt b/griptape/tools/vector_store/requirements.txt new file mode 100644 index 000000000..e69de29bb diff --git a/griptape/tools/vector_store_client/tool.py b/griptape/tools/vector_store/tool.py similarity index 98% rename from griptape/tools/vector_store_client/tool.py rename to griptape/tools/vector_store/tool.py index a0c638eef..71902b1c7 100644 --- a/griptape/tools/vector_store_client/tool.py +++ b/griptape/tools/vector_store/tool.py @@ -14,7 +14,7 @@ @define(kw_only=True) -class VectorStoreClient(BaseTool): +class VectorStoreTool(BaseTool): """A tool for querying a vector database. Attributes: diff --git a/griptape/tools/web_scraper/manifest.yml b/griptape/tools/web_scraper/manifest.yml index e2d0597ec..ec9d3db25 100644 --- a/griptape/tools/web_scraper/manifest.yml +++ b/griptape/tools/web_scraper/manifest.yml @@ -1,5 +1,5 @@ version: "v1" -name: Web Scraper +name: Web Scraper Tool description: Tool for scraping web pages for content, titles, authors, and keywords. contact_email: hello@griptape.ai -legal_info_url: https://www.griptape.ai/legal \ No newline at end of file +legal_info_url: https://www.griptape.ai/legal diff --git a/griptape/tools/web_scraper/tool.py b/griptape/tools/web_scraper/tool.py index 782e85d37..c27aaa066 100644 --- a/griptape/tools/web_scraper/tool.py +++ b/griptape/tools/web_scraper/tool.py @@ -10,7 +10,7 @@ @define -class WebScraper(BaseTool): +class WebScraperTool(BaseTool): web_loader: WebLoader = field(default=Factory(lambda: WebLoader()), kw_only=True) @activity( diff --git a/griptape/tools/web_search/manifest.yml b/griptape/tools/web_search/manifest.yml index 4bb2a82c8..c06db4f20 100644 --- a/griptape/tools/web_search/manifest.yml +++ b/griptape/tools/web_search/manifest.yml @@ -1,5 +1,5 @@ version: "v1" -name: Google Search -description: Tool for making making web searches on Google. +name: Web Search Tool +description: Tool for making making web searches. contact_email: hello@griptape.ai -legal_info_url: https://www.griptape.ai/legal \ No newline at end of file +legal_info_url: https://www.griptape.ai/legal diff --git a/griptape/tools/web_search/tool.py b/griptape/tools/web_search/tool.py index 8a1821a13..557c26a52 100644 --- a/griptape/tools/web_search/tool.py +++ b/griptape/tools/web_search/tool.py @@ -14,12 +14,12 @@ @define -class WebSearch(BaseTool): - web_search_driver: BaseWebSearchDriver = field(default=None, kw_only=True) +class WebSearchTool(BaseTool): + web_search_driver: BaseWebSearchDriver = field(kw_only=True) @activity( config={ - "description": "Can be used for searching the web", + "description": "Can be used for searching the web via the {{ _self.web_search_driver.__class__.__name__}}.", "schema": Schema( { Literal( @@ -31,9 +31,11 @@ class WebSearch(BaseTool): }, ) def search(self, props: dict) -> ListArtifact | ErrorArtifact: - query = props["values"]["query"] + values = props["values"] + query = values["query"] + extra_keys = {k: values[k] for k in values.keys() - {"query"}} try: - return self.web_search_driver.search(query) + return self.web_search_driver.search(query, **extra_keys) except Exception as e: return ErrorArtifact(f"Error searching '{query}' with {self.web_search_driver.__class__.__name__}: {e}") diff --git a/griptape/utils/__init__.py b/griptape/utils/__init__.py index 19730ac3b..03725f59d 100644 --- a/griptape/utils/__init__.py +++ b/griptape/utils/__init__.py @@ -5,8 +5,7 @@ from .python_runner import PythonRunner from .command_runner import CommandRunner from .chat import Chat -from .futures import execute_futures_dict -from .futures import execute_futures_list +from .futures import execute_futures_dict, execute_futures_list, execute_futures_list_dict from .token_counter import TokenCounter from .dict_utils import remove_null_values_in_dict_recursively, dict_merge, remove_key_in_dict_recursively from .file_utils import load_file, load_files @@ -37,6 +36,7 @@ def minify_json(value: str) -> str: "is_dependency_installed", "execute_futures_dict", "execute_futures_list", + "execute_futures_list_dict", "TokenCounter", "remove_null_values_in_dict_recursively", "dict_merge", diff --git a/griptape/utils/chat.py b/griptape/utils/chat.py index e98eeaa4d..21e045db7 100644 --- a/griptape/utils/chat.py +++ b/griptape/utils/chat.py @@ -1,5 +1,6 @@ from __future__ import annotations +import logging from typing import TYPE_CHECKING, Callable, Optional from attrs import Factory, define, field @@ -23,14 +24,26 @@ class Chat: default=Factory(lambda self: self.default_output_fn, takes_self=True), kw_only=True, ) + logger_level: int = field(default=logging.ERROR, kw_only=True) def default_output_fn(self, text: str) -> None: - if self.structure.config.prompt_driver.stream: + from griptape.tasks.prompt_task import PromptTask + + streaming_tasks = [ + task for task in self.structure.tasks if isinstance(task, PromptTask) and task.prompt_driver.stream + ] + if streaming_tasks: print(text, end="", flush=True) # noqa: T201 else: print(text) # noqa: T201 def start(self) -> None: + from griptape.configs import Defaults + + # Hide Griptape's logging output except for errors + old_logger_level = logging.getLogger(Defaults.logging_config.logger_name).getEffectiveLevel() + logging.getLogger(Defaults.logging_config.logger_name).setLevel(self.logger_level) + if self.intro_text: self.output_fn(self.intro_text) while True: @@ -40,7 +53,7 @@ def start(self) -> None: self.output_fn(self.exiting_text) break - if self.structure.config.prompt_driver.stream: + if Defaults.drivers_config.prompt_driver.stream: self.output_fn(self.processing_text + "\n") stream = Stream(self.structure).run(question) first_chunk = next(stream) @@ -50,3 +63,6 @@ def start(self) -> None: else: self.output_fn(self.processing_text) self.output_fn(f"{self.response_prefix}{self.structure.run(question).output_task.output.to_text()}") + + # Restore the original logger level + logging.getLogger(Defaults.logging_config.logger_name).setLevel(old_logger_level) diff --git a/griptape/utils/conversation.py b/griptape/utils/conversation.py index 97318c426..dcc7ae717 100644 --- a/griptape/utils/conversation.py +++ b/griptape/utils/conversation.py @@ -1,31 +1,46 @@ from __future__ import annotations -from typing import TYPE_CHECKING +from typing import TYPE_CHECKING, Optional -from attrs import define, field +from attrs import Attribute, define, field if TYPE_CHECKING: - from griptape.memory.structure import ConversationMemory + from griptape.memory.structure import BaseConversationMemory @define(frozen=True) class Conversation: - memory: ConversationMemory = field() + memory: Optional[BaseConversationMemory] = field() + + @memory.validator # pyright: ignore[reportAttributeAccessIssue] + def validate_memory(self, attribute: Attribute, value: Optional[BaseConversationMemory]) -> None: + if value is None: + raise ValueError("Conversation memory must not be None.") def lines(self) -> list[str]: + from griptape.memory.structure import SummaryConversationMemory + lines = [] for run in self.memory.runs: lines.extend((f"Q: {run.input}", f"A: {run.output}")) + if isinstance(self.memory, SummaryConversationMemory): + lines.append(f"Summary: {self.memory.summary}") + return lines def prompt_stack(self) -> list[str]: + from griptape.memory.structure import SummaryConversationMemory + lines = [] for stack in self.memory.to_prompt_stack().messages: lines.append(f"{stack.role}: {stack.to_text()}") + if isinstance(self.memory, SummaryConversationMemory): + lines.append(f"Summary: {self.memory.summary}") + return lines def __str__(self) -> str: diff --git a/griptape/utils/decorators.py b/griptape/utils/decorators.py index 10bf6c9a4..2ea296693 100644 --- a/griptape/utils/decorators.py +++ b/griptape/utils/decorators.py @@ -1,5 +1,7 @@ +from __future__ import annotations + import functools -from typing import Any, Callable +from typing import Any, Callable, Optional import schema from schema import Schema @@ -27,3 +29,23 @@ def wrapper(self: Any, *args, **kwargs) -> Any: return wrapper return decorator + + +def lazy_property(attr_name: Optional[str] = None) -> Callable[[Callable[[Any], Any]], property]: + def decorator(func: Callable[[Any], Any]) -> property: + actual_attr_name = f"_{func.__name__}" if attr_name is None else attr_name + + @property + @functools.wraps(func) + def lazy_attr(self: Any) -> Any: + if getattr(self, actual_attr_name) is None: + setattr(self, actual_attr_name, func(self)) + return getattr(self, actual_attr_name) + + @lazy_attr.setter + def lazy_attr(self: Any, value: Any) -> None: + setattr(self, actual_attr_name, value) + + return lazy_attr + + return decorator diff --git a/griptape/utils/futures.py b/griptape/utils/futures.py index ea22e4c56..b91bb3918 100644 --- a/griptape/utils/futures.py +++ b/griptape/utils/futures.py @@ -16,3 +16,9 @@ def execute_futures_list(fs_list: list[futures.Future[T]]) -> list[T]: futures.wait(fs_list, timeout=None, return_when=futures.ALL_COMPLETED) return [future.result() for future in fs_list] + + +def execute_futures_list_dict(fs_dict: dict[str, list[futures.Future[T]]]) -> dict[str, list[T]]: + execute_futures_list([item for sublist in fs_dict.values() for item in sublist]) + + return {key: [f.result() for f in fs] for key, fs in fs_dict.items()} diff --git a/griptape/utils/j2.py b/griptape/utils/j2.py index 70cf936db..3aecd8e3c 100644 --- a/griptape/utils/j2.py +++ b/griptape/utils/j2.py @@ -23,8 +23,7 @@ class J2: def render(self, **kwargs) -> str: if self.template_name is None: raise ValueError("template_name is required.") - else: - return self.environment.get_template(self.template_name).render(kwargs).rstrip() + return self.environment.get_template(self.template_name).render(kwargs).rstrip() def render_from_string(self, value: str, **kwargs) -> str: return self.environment.from_string(value).render(kwargs) diff --git a/griptape/utils/load_artifact_from_memory.py b/griptape/utils/load_artifact_from_memory.py index a45a41dbd..2d3f8bc86 100644 --- a/griptape/utils/load_artifact_from_memory.py +++ b/griptape/utils/load_artifact_from_memory.py @@ -1,9 +1,14 @@ -from griptape.artifacts import BaseArtifact -from griptape.memory import TaskMemory +from __future__ import annotations + +from typing import TYPE_CHECKING, Optional + +if TYPE_CHECKING: + from griptape.artifacts import BaseArtifact + from griptape.memory import TaskMemory def load_artifact_from_memory( - memory: TaskMemory, + memory: Optional[TaskMemory], artifact_namespace: str, artifact_name: str, artifact_type: type, diff --git a/griptape/utils/stream.py b/griptape/utils/stream.py index bf33e5df8..8a764e85a 100644 --- a/griptape/utils/stream.py +++ b/griptape/utils/stream.py @@ -7,10 +7,7 @@ from attrs import Attribute, Factory, define, field from griptape.artifacts.text_artifact import TextArtifact -from griptape.events.completion_chunk_event import CompletionChunkEvent -from griptape.events.event_listener import EventListener -from griptape.events.finish_prompt_event import FinishPromptEvent -from griptape.events.finish_structure_run_event import FinishStructureRunEvent +from griptape.events import CompletionChunkEvent, EventBus, EventListener, FinishPromptEvent, FinishStructureRunEvent if TYPE_CHECKING: from collections.abc import Iterator @@ -37,8 +34,13 @@ class Stream: @structure.validator # pyright: ignore[reportAttributeAccessIssue] def validate_structure(self, _: Attribute, structure: Structure) -> None: - if not structure.config.prompt_driver.stream: - raise ValueError("prompt driver does not have streaming enabled, enable with stream=True") + from griptape.tasks import PromptTask + + streaming_tasks = [ + task for task in structure.tasks if isinstance(task, PromptTask) and task.prompt_driver.stream + ] + if not streaming_tasks: + raise ValueError("Structure does not have any streaming tasks, enable with stream=True") _event_queue: Queue[BaseEvent] = field(default=Factory(lambda: Queue())) @@ -64,8 +66,8 @@ def event_handler(event: BaseEvent) -> None: handler=event_handler, event_types=[CompletionChunkEvent, FinishPromptEvent, FinishStructureRunEvent], ) - self.structure.add_event_listener(stream_event_listener) + EventBus.add_event_listener(stream_event_listener) self.structure.run(*args) - self.structure.remove_event_listener(stream_event_listener) + EventBus.remove_event_listener(stream_event_listener) diff --git a/griptape/utils/structure_visualizer.py b/griptape/utils/structure_visualizer.py index f24443cd6..260f6efb8 100644 --- a/griptape/utils/structure_visualizer.py +++ b/griptape/utils/structure_visualizer.py @@ -34,8 +34,7 @@ def to_url(self) -> str: graph_bytes = graph.encode("utf-8") base64_string = base64.b64encode(graph_bytes).decode("utf-8") - url = f"https://mermaid.ink/svg/{base64_string}" - return url + return f"https://mermaid.ink/svg/{base64_string}" def __render_task(self, task: BaseTask) -> str: if task.children: diff --git a/griptape/utils/token_counter.py b/griptape/utils/token_counter.py index 2732d95f1..64c1be492 100644 --- a/griptape/utils/token_counter.py +++ b/griptape/utils/token_counter.py @@ -1,3 +1,5 @@ +from __future__ import annotations + from attrs import define, field @@ -5,7 +7,7 @@ class TokenCounter: tokens: int = field(default=0, kw_only=True) - def add_tokens(self, new_tokens: int) -> int: - self.tokens += new_tokens + def add_tokens(self, new_tokens: int | float) -> int: + self.tokens += int(new_tokens) return self.tokens diff --git a/mkdocs.yml b/mkdocs.yml index 0f249da85..4207d2171 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -91,7 +91,7 @@ nav: - Task Memory and Off Prompt: "griptape-framework/structures/task-memory.md" - Conversation Memory: "griptape-framework/structures/conversation-memory.md" - Rulesets: "griptape-framework/structures/rulesets.md" - - Config: "griptape-framework/structures/config.md" + - Configs: "griptape-framework/structures/configs.md" - Observability: "griptape-framework/structures/observability.md" - Tools: - Overview: "griptape-framework/tools/index.md" @@ -129,34 +129,36 @@ nav: - Tools: - Overview: "griptape-tools/index.md" - Official Tools: - - AwsIamClient: "griptape-tools/official-tools/aws-iam-client.md" - - AwsS3Client: "griptape-tools/official-tools/aws-s3-client.md" - - Calculator: "griptape-tools/official-tools/calculator.md" - - Computer: "griptape-tools/official-tools/computer.md" - - DateTime: "griptape-tools/official-tools/date-time.md" - - EmailClient: "griptape-tools/official-tools/email-client.md" - - FileManager: "griptape-tools/official-tools/file-manager.md" - - GoogleCalendarClient: "griptape-tools/official-tools/google-cal-client.md" - - GoogleGmailClient: "griptape-tools/official-tools/google-gmail-client.md" - - GoogleDriveClient: "griptape-tools/official-tools/google-drive-client.md" - - GoogleDocsClient: "griptape-tools/official-tools/google-docs-client.md" - - StructureRunClient: "griptape-tools/official-tools/structure-run-client.md" - - OpenWeatherClient: "griptape-tools/official-tools/openweather-client.md" - - RestApiClient: "griptape-tools/official-tools/rest-api-client.md" - - SqlClient: "griptape-tools/official-tools/sql-client.md" - - TaskMemoryClient: "griptape-tools/official-tools/task-memory-client.md" - - VectorStoreClient: "griptape-tools/official-tools/vector-store-client.md" - - WebScraper: "griptape-tools/official-tools/web-scraper.md" - - WebSearch: "griptape-tools/official-tools/web-search.md" - - PromptImageGenerationClient: "griptape-tools/official-tools/prompt-image-generation-client.md" - - VariationImageGenerationClient: "griptape-tools/official-tools/variation-image-generation-client.md" - - InpaintingImageGenerationClient: "griptape-tools/official-tools/inpainting-image-generation-client.md" - - OutpaintingImageGenerationClient: "griptape-tools/official-tools/outpainting-image-generation-client.md" - - ImageQueryClient: "griptape-tools/official-tools/image-query-client.md" - - TextToSpeechClient: "griptape-tools/official-tools/text-to-speech-client.md" - - AudioTranscriptionClient: "griptape-tools/official-tools/audio-transcription-client.md" - - GriptapeCloudKnowledgeBaseClient: "griptape-tools/official-tools/griptape-cloud-knowledge-base-client.md" - - RagClient: "griptape-tools/official-tools/rag-client.md" + - Aws Iam: "griptape-tools/official-tools/aws-iam-tool.md" + - Aws S3: "griptape-tools/official-tools/aws-s3-tool.md" + - Calculator: "griptape-tools/official-tools/calculator-tool.md" + - Computer: "griptape-tools/official-tools/computer-tool.md" + - Date Time: "griptape-tools/official-tools/date-time-tool.md" + - Email: "griptape-tools/official-tools/email-tool.md" + - File Manager: "griptape-tools/official-tools/file-manager-tool.md" + - Google Calendar: "griptape-tools/official-tools/google-calendar-tool.md" + - Google Gmail: "griptape-tools/official-tools/google-gmail-tool.md" + - Google Drive: "griptape-tools/official-tools/google-drive-tool.md" + - Google Docs: "griptape-tools/official-tools/google-docs-tool.md" + - Structure Run Client: "griptape-tools/official-tools/structure-run-tool.md" + - Open Weather: "griptape-tools/official-tools/openweather-tool.md" + - Rest Api Client: "griptape-tools/official-tools/rest-api-tool.md" + - Sql: "griptape-tools/official-tools/sql-tool.md" + - Vector Store Tool: "griptape-tools/official-tools/vector-store-tool.md" + - Web Scraper: "griptape-tools/official-tools/web-scraper-tool.md" + - Web Search: "griptape-tools/official-tools/web-search-tool.md" + - Prompt Image Generation: "griptape-tools/official-tools/prompt-image-generation-tool.md" + - Variation ImageGeneration: "griptape-tools/official-tools/variation-image-generation-tool.md" + - Inpainting ImageGeneration: "griptape-tools/official-tools/inpainting-image-generation-tool.md" + - Outpainting ImageGeneration: "griptape-tools/official-tools/outpainting-image-generation-tool.md" + - Image Query: "griptape-tools/official-tools/image-query-tool.md" + - Text To Speech: "griptape-tools/official-tools/text-to-speech-tool.md" + - Audio Transcription: "griptape-tools/official-tools/audio-transcription-tool.md" + - Griptape Cloud Knowledge Base: "griptape-tools/official-tools/griptape-cloud-knowledge-base-tool.md" + - Rag: "griptape-tools/official-tools/rag-tool.md" + - Extraction: "griptape-tools/official-tools/extraction-tool.md" + - Query: "griptape-tools/official-tools/query-tool.md" + - Prompt Summary: "griptape-tools/official-tools/prompt-summary-tool.md" - Custom Tools: - Building Custom Tools: "griptape-tools/custom-tools/index.md" - Recipes: @@ -173,5 +175,6 @@ nav: - Load and Query Pinecone: "examples/load-and-query-pinecone.md" - Load and Query Marqo: "examples/load-query-and-chat-marqo.md" - Query a Webpage: "examples/query-webpage.md" + - RAG with Astra DB vector store: "examples/query-webpage-astra-db.md" - Reference Guide: "reference/" - Trade School: "https://learn.griptape.ai" diff --git a/poetry.lock b/poetry.lock index 73f49743c..32c3964ef 100644 --- a/poetry.lock +++ b/poetry.lock @@ -32,91 +32,103 @@ test-trackers = ["comet-ml", "dvclive", "tensorboard", "wandb"] testing = ["bitsandbytes", "datasets", "diffusers", "evaluate", "parameterized", "pytest (>=7.2.0,<=8.0.0)", "pytest-subtests", "pytest-xdist", "scikit-learn", "scipy", "timm", "torchpippy (>=0.2.0)", "tqdm", "transformers"] [[package]] -name = "aiohttp" -version = "3.9.5" -description = "Async http client/server framework (asyncio)" +name = "aiohappyeyeballs" +version = "2.3.5" +description = "Happy Eyeballs for asyncio" optional = true python-versions = ">=3.8" files = [ - {file = "aiohttp-3.9.5-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:fcde4c397f673fdec23e6b05ebf8d4751314fa7c24f93334bf1f1364c1c69ac7"}, - {file = "aiohttp-3.9.5-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:5d6b3f1fabe465e819aed2c421a6743d8debbde79b6a8600739300630a01bf2c"}, - {file = "aiohttp-3.9.5-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:6ae79c1bc12c34082d92bf9422764f799aee4746fd7a392db46b7fd357d4a17a"}, - {file = "aiohttp-3.9.5-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4d3ebb9e1316ec74277d19c5f482f98cc65a73ccd5430540d6d11682cd857430"}, - {file = "aiohttp-3.9.5-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:84dabd95154f43a2ea80deffec9cb44d2e301e38a0c9d331cc4aa0166fe28ae3"}, - {file = "aiohttp-3.9.5-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c8a02fbeca6f63cb1f0475c799679057fc9268b77075ab7cf3f1c600e81dd46b"}, - {file = "aiohttp-3.9.5-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c26959ca7b75ff768e2776d8055bf9582a6267e24556bb7f7bd29e677932be72"}, - {file = "aiohttp-3.9.5-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:714d4e5231fed4ba2762ed489b4aec07b2b9953cf4ee31e9871caac895a839c0"}, - {file = "aiohttp-3.9.5-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:e7a6a8354f1b62e15d48e04350f13e726fa08b62c3d7b8401c0a1314f02e3558"}, - {file = "aiohttp-3.9.5-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:c413016880e03e69d166efb5a1a95d40f83d5a3a648d16486592c49ffb76d0db"}, - {file = "aiohttp-3.9.5-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:ff84aeb864e0fac81f676be9f4685f0527b660f1efdc40dcede3c251ef1e867f"}, - {file = "aiohttp-3.9.5-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:ad7f2919d7dac062f24d6f5fe95d401597fbb015a25771f85e692d043c9d7832"}, - {file = "aiohttp-3.9.5-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:702e2c7c187c1a498a4e2b03155d52658fdd6fda882d3d7fbb891a5cf108bb10"}, - {file = "aiohttp-3.9.5-cp310-cp310-win32.whl", hash = "sha256:67c3119f5ddc7261d47163ed86d760ddf0e625cd6246b4ed852e82159617b5fb"}, - {file = "aiohttp-3.9.5-cp310-cp310-win_amd64.whl", hash = "sha256:471f0ef53ccedec9995287f02caf0c068732f026455f07db3f01a46e49d76bbb"}, - {file = "aiohttp-3.9.5-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:e0ae53e33ee7476dd3d1132f932eeb39bf6125083820049d06edcdca4381f342"}, - {file = "aiohttp-3.9.5-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:c088c4d70d21f8ca5c0b8b5403fe84a7bc8e024161febdd4ef04575ef35d474d"}, - {file = "aiohttp-3.9.5-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:639d0042b7670222f33b0028de6b4e2fad6451462ce7df2af8aee37dcac55424"}, - {file = "aiohttp-3.9.5-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f26383adb94da5e7fb388d441bf09c61e5e35f455a3217bfd790c6b6bc64b2ee"}, - {file = "aiohttp-3.9.5-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:66331d00fb28dc90aa606d9a54304af76b335ae204d1836f65797d6fe27f1ca2"}, - {file = "aiohttp-3.9.5-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4ff550491f5492ab5ed3533e76b8567f4b37bd2995e780a1f46bca2024223233"}, - {file = "aiohttp-3.9.5-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f22eb3a6c1080d862befa0a89c380b4dafce29dc6cd56083f630073d102eb595"}, - {file = "aiohttp-3.9.5-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a81b1143d42b66ffc40a441379387076243ef7b51019204fd3ec36b9f69e77d6"}, - {file = "aiohttp-3.9.5-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:f64fd07515dad67f24b6ea4a66ae2876c01031de91c93075b8093f07c0a2d93d"}, - {file = "aiohttp-3.9.5-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:93e22add827447d2e26d67c9ac0161756007f152fdc5210277d00a85f6c92323"}, - {file = "aiohttp-3.9.5-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:55b39c8684a46e56ef8c8d24faf02de4a2b2ac60d26cee93bc595651ff545de9"}, - {file = "aiohttp-3.9.5-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:4715a9b778f4293b9f8ae7a0a7cef9829f02ff8d6277a39d7f40565c737d3771"}, - {file = "aiohttp-3.9.5-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:afc52b8d969eff14e069a710057d15ab9ac17cd4b6753042c407dcea0e40bf75"}, - {file = "aiohttp-3.9.5-cp311-cp311-win32.whl", hash = "sha256:b3df71da99c98534be076196791adca8819761f0bf6e08e07fd7da25127150d6"}, - {file = "aiohttp-3.9.5-cp311-cp311-win_amd64.whl", hash = "sha256:88e311d98cc0bf45b62fc46c66753a83445f5ab20038bcc1b8a1cc05666f428a"}, - {file = "aiohttp-3.9.5-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:c7a4b7a6cf5b6eb11e109a9755fd4fda7d57395f8c575e166d363b9fc3ec4678"}, - {file = "aiohttp-3.9.5-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:0a158704edf0abcac8ac371fbb54044f3270bdbc93e254a82b6c82be1ef08f3c"}, - {file = "aiohttp-3.9.5-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:d153f652a687a8e95ad367a86a61e8d53d528b0530ef382ec5aaf533140ed00f"}, - {file = "aiohttp-3.9.5-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:82a6a97d9771cb48ae16979c3a3a9a18b600a8505b1115cfe354dfb2054468b4"}, - {file = "aiohttp-3.9.5-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:60cdbd56f4cad9f69c35eaac0fbbdf1f77b0ff9456cebd4902f3dd1cf096464c"}, - {file = "aiohttp-3.9.5-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8676e8fd73141ded15ea586de0b7cda1542960a7b9ad89b2b06428e97125d4fa"}, - {file = "aiohttp-3.9.5-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:da00da442a0e31f1c69d26d224e1efd3a1ca5bcbf210978a2ca7426dfcae9f58"}, - {file = "aiohttp-3.9.5-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:18f634d540dd099c262e9f887c8bbacc959847cfe5da7a0e2e1cf3f14dbf2daf"}, - {file = "aiohttp-3.9.5-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:320e8618eda64e19d11bdb3bd04ccc0a816c17eaecb7e4945d01deee2a22f95f"}, - {file = "aiohttp-3.9.5-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:2faa61a904b83142747fc6a6d7ad8fccff898c849123030f8e75d5d967fd4a81"}, - {file = "aiohttp-3.9.5-cp312-cp312-musllinux_1_1_ppc64le.whl", hash = "sha256:8c64a6dc3fe5db7b1b4d2b5cb84c4f677768bdc340611eca673afb7cf416ef5a"}, - {file = "aiohttp-3.9.5-cp312-cp312-musllinux_1_1_s390x.whl", hash = "sha256:393c7aba2b55559ef7ab791c94b44f7482a07bf7640d17b341b79081f5e5cd1a"}, - {file = "aiohttp-3.9.5-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:c671dc117c2c21a1ca10c116cfcd6e3e44da7fcde37bf83b2be485ab377b25da"}, - {file = "aiohttp-3.9.5-cp312-cp312-win32.whl", hash = "sha256:5a7ee16aab26e76add4afc45e8f8206c95d1d75540f1039b84a03c3b3800dd59"}, - {file = "aiohttp-3.9.5-cp312-cp312-win_amd64.whl", hash = "sha256:5ca51eadbd67045396bc92a4345d1790b7301c14d1848feaac1d6a6c9289e888"}, - {file = "aiohttp-3.9.5-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:694d828b5c41255e54bc2dddb51a9f5150b4eefa9886e38b52605a05d96566e8"}, - {file = "aiohttp-3.9.5-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:0605cc2c0088fcaae79f01c913a38611ad09ba68ff482402d3410bf59039bfb8"}, - {file = "aiohttp-3.9.5-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:4558e5012ee03d2638c681e156461d37b7a113fe13970d438d95d10173d25f78"}, - {file = "aiohttp-3.9.5-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9dbc053ac75ccc63dc3a3cc547b98c7258ec35a215a92bd9f983e0aac95d3d5b"}, - {file = "aiohttp-3.9.5-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4109adee842b90671f1b689901b948f347325045c15f46b39797ae1bf17019de"}, - {file = "aiohttp-3.9.5-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a6ea1a5b409a85477fd8e5ee6ad8f0e40bf2844c270955e09360418cfd09abac"}, - {file = "aiohttp-3.9.5-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f3c2890ca8c59ee683fd09adf32321a40fe1cf164e3387799efb2acebf090c11"}, - {file = "aiohttp-3.9.5-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3916c8692dbd9d55c523374a3b8213e628424d19116ac4308e434dbf6d95bbdd"}, - {file = "aiohttp-3.9.5-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:8d1964eb7617907c792ca00b341b5ec3e01ae8c280825deadbbd678447b127e1"}, - {file = "aiohttp-3.9.5-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:d5ab8e1f6bee051a4bf6195e38a5c13e5e161cb7bad83d8854524798bd9fcd6e"}, - {file = "aiohttp-3.9.5-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:52c27110f3862a1afbcb2af4281fc9fdc40327fa286c4625dfee247c3ba90156"}, - {file = "aiohttp-3.9.5-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:7f64cbd44443e80094309875d4f9c71d0401e966d191c3d469cde4642bc2e031"}, - {file = "aiohttp-3.9.5-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:8b4f72fbb66279624bfe83fd5eb6aea0022dad8eec62b71e7bf63ee1caadeafe"}, - {file = "aiohttp-3.9.5-cp38-cp38-win32.whl", hash = "sha256:6380c039ec52866c06d69b5c7aad5478b24ed11696f0e72f6b807cfb261453da"}, - {file = "aiohttp-3.9.5-cp38-cp38-win_amd64.whl", hash = "sha256:da22dab31d7180f8c3ac7c7635f3bcd53808f374f6aa333fe0b0b9e14b01f91a"}, - {file = "aiohttp-3.9.5-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:1732102949ff6087589408d76cd6dea656b93c896b011ecafff418c9661dc4ed"}, - {file = "aiohttp-3.9.5-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:c6021d296318cb6f9414b48e6a439a7f5d1f665464da507e8ff640848ee2a58a"}, - {file = "aiohttp-3.9.5-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:239f975589a944eeb1bad26b8b140a59a3a320067fb3cd10b75c3092405a1372"}, - {file = "aiohttp-3.9.5-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3b7b30258348082826d274504fbc7c849959f1989d86c29bc355107accec6cfb"}, - {file = "aiohttp-3.9.5-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:cd2adf5c87ff6d8b277814a28a535b59e20bfea40a101db6b3bdca7e9926bc24"}, - {file = "aiohttp-3.9.5-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e9a3d838441bebcf5cf442700e3963f58b5c33f015341f9ea86dcd7d503c07e2"}, - {file = "aiohttp-3.9.5-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9e3a1ae66e3d0c17cf65c08968a5ee3180c5a95920ec2731f53343fac9bad106"}, - {file = "aiohttp-3.9.5-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9c69e77370cce2d6df5d12b4e12bdcca60c47ba13d1cbbc8645dd005a20b738b"}, - {file = "aiohttp-3.9.5-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:0cbf56238f4bbf49dab8c2dc2e6b1b68502b1e88d335bea59b3f5b9f4c001475"}, - {file = "aiohttp-3.9.5-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:d1469f228cd9ffddd396d9948b8c9cd8022b6d1bf1e40c6f25b0fb90b4f893ed"}, - {file = "aiohttp-3.9.5-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:45731330e754f5811c314901cebdf19dd776a44b31927fa4b4dbecab9e457b0c"}, - {file = "aiohttp-3.9.5-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:3fcb4046d2904378e3aeea1df51f697b0467f2aac55d232c87ba162709478c46"}, - {file = "aiohttp-3.9.5-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:8cf142aa6c1a751fcb364158fd710b8a9be874b81889c2bd13aa8893197455e2"}, - {file = "aiohttp-3.9.5-cp39-cp39-win32.whl", hash = "sha256:7b179eea70833c8dee51ec42f3b4097bd6370892fa93f510f76762105568cf09"}, - {file = "aiohttp-3.9.5-cp39-cp39-win_amd64.whl", hash = "sha256:38d80498e2e169bc61418ff36170e0aad0cd268da8b38a17c4cf29d254a8b3f1"}, - {file = "aiohttp-3.9.5.tar.gz", hash = "sha256:edea7d15772ceeb29db4aff55e482d4bcfb6ae160ce144f2682de02f6d693551"}, + {file = "aiohappyeyeballs-2.3.5-py3-none-any.whl", hash = "sha256:4d6dea59215537dbc746e93e779caea8178c866856a721c9c660d7a5a7b8be03"}, + {file = "aiohappyeyeballs-2.3.5.tar.gz", hash = "sha256:6fa48b9f1317254f122a07a131a86b71ca6946ca989ce6326fff54a99a920105"}, ] -[package.dependencies] +[[package]] +name = "aiohttp" +version = "3.10.3" +description = "Async http client/server framework (asyncio)" +optional = true +python-versions = ">=3.8" +files = [ + {file = "aiohttp-3.10.3-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:cc36cbdedf6f259371dbbbcaae5bb0e95b879bc501668ab6306af867577eb5db"}, + {file = "aiohttp-3.10.3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:85466b5a695c2a7db13eb2c200af552d13e6a9313d7fa92e4ffe04a2c0ea74c1"}, + {file = "aiohttp-3.10.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:71bb1d97bfe7e6726267cea169fdf5df7658831bb68ec02c9c6b9f3511e108bb"}, + {file = "aiohttp-3.10.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:baec1eb274f78b2de54471fc4c69ecbea4275965eab4b556ef7a7698dee18bf2"}, + {file = "aiohttp-3.10.3-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:13031e7ec1188274bad243255c328cc3019e36a5a907978501256000d57a7201"}, + {file = "aiohttp-3.10.3-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2bbc55a964b8eecb341e492ae91c3bd0848324d313e1e71a27e3d96e6ee7e8e8"}, + {file = "aiohttp-3.10.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e8cc0564b286b625e673a2615ede60a1704d0cbbf1b24604e28c31ed37dc62aa"}, + {file = "aiohttp-3.10.3-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f817a54059a4cfbc385a7f51696359c642088710e731e8df80d0607193ed2b73"}, + {file = "aiohttp-3.10.3-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:8542c9e5bcb2bd3115acdf5adc41cda394e7360916197805e7e32b93d821ef93"}, + {file = "aiohttp-3.10.3-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:671efce3a4a0281060edf9a07a2f7e6230dca3a1cbc61d110eee7753d28405f7"}, + {file = "aiohttp-3.10.3-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:0974f3b5b0132edcec92c3306f858ad4356a63d26b18021d859c9927616ebf27"}, + {file = "aiohttp-3.10.3-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:44bb159b55926b57812dca1b21c34528e800963ffe130d08b049b2d6b994ada7"}, + {file = "aiohttp-3.10.3-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:6ae9ae382d1c9617a91647575255ad55a48bfdde34cc2185dd558ce476bf16e9"}, + {file = "aiohttp-3.10.3-cp310-cp310-win32.whl", hash = "sha256:aed12a54d4e1ee647376fa541e1b7621505001f9f939debf51397b9329fd88b9"}, + {file = "aiohttp-3.10.3-cp310-cp310-win_amd64.whl", hash = "sha256:b51aef59370baf7444de1572f7830f59ddbabd04e5292fa4218d02f085f8d299"}, + {file = "aiohttp-3.10.3-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:e021c4c778644e8cdc09487d65564265e6b149896a17d7c0f52e9a088cc44e1b"}, + {file = "aiohttp-3.10.3-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:24fade6dae446b183e2410a8628b80df9b7a42205c6bfc2eff783cbeedc224a2"}, + {file = "aiohttp-3.10.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:bc8e9f15939dacb0e1f2d15f9c41b786051c10472c7a926f5771e99b49a5957f"}, + {file = "aiohttp-3.10.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d5a9ec959b5381271c8ec9310aae1713b2aec29efa32e232e5ef7dcca0df0279"}, + {file = "aiohttp-3.10.3-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:2a5d0ea8a6467b15d53b00c4e8ea8811e47c3cc1bdbc62b1aceb3076403d551f"}, + {file = "aiohttp-3.10.3-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c9ed607dbbdd0d4d39b597e5bf6b0d40d844dfb0ac6a123ed79042ef08c1f87e"}, + {file = "aiohttp-3.10.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d3e66d5b506832e56add66af88c288c1d5ba0c38b535a1a59e436b300b57b23e"}, + {file = "aiohttp-3.10.3-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:fda91ad797e4914cca0afa8b6cccd5d2b3569ccc88731be202f6adce39503189"}, + {file = "aiohttp-3.10.3-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:61ccb867b2f2f53df6598eb2a93329b5eee0b00646ee79ea67d68844747a418e"}, + {file = "aiohttp-3.10.3-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:6d881353264e6156f215b3cb778c9ac3184f5465c2ece5e6fce82e68946868ef"}, + {file = "aiohttp-3.10.3-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:b031ce229114825f49cec4434fa844ccb5225e266c3e146cb4bdd025a6da52f1"}, + {file = "aiohttp-3.10.3-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:5337cc742a03f9e3213b097abff8781f79de7190bbfaa987bd2b7ceb5bb0bdec"}, + {file = "aiohttp-3.10.3-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:ab3361159fd3dcd0e48bbe804006d5cfb074b382666e6c064112056eb234f1a9"}, + {file = "aiohttp-3.10.3-cp311-cp311-win32.whl", hash = "sha256:05d66203a530209cbe40f102ebaac0b2214aba2a33c075d0bf825987c36f1f0b"}, + {file = "aiohttp-3.10.3-cp311-cp311-win_amd64.whl", hash = "sha256:70b4a4984a70a2322b70e088d654528129783ac1ebbf7dd76627b3bd22db2f17"}, + {file = "aiohttp-3.10.3-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:166de65e2e4e63357cfa8417cf952a519ac42f1654cb2d43ed76899e2319b1ee"}, + {file = "aiohttp-3.10.3-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:7084876352ba3833d5d214e02b32d794e3fd9cf21fdba99cff5acabeb90d9806"}, + {file = "aiohttp-3.10.3-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:8d98c604c93403288591d7d6d7d6cc8a63459168f8846aeffd5b3a7f3b3e5e09"}, + {file = "aiohttp-3.10.3-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d73b073a25a0bb8bf014345374fe2d0f63681ab5da4c22f9d2025ca3e3ea54fc"}, + {file = "aiohttp-3.10.3-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8da6b48c20ce78f5721068f383e0e113dde034e868f1b2f5ee7cb1e95f91db57"}, + {file = "aiohttp-3.10.3-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3a9dcdccf50284b1b0dc72bc57e5bbd3cc9bf019060dfa0668f63241ccc16aa7"}, + {file = "aiohttp-3.10.3-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:56fb94bae2be58f68d000d046172d8b8e6b1b571eb02ceee5535e9633dcd559c"}, + {file = "aiohttp-3.10.3-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:bf75716377aad2c718cdf66451c5cf02042085d84522aec1f9246d3e4b8641a6"}, + {file = "aiohttp-3.10.3-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:6c51ed03e19c885c8e91f574e4bbe7381793f56f93229731597e4a499ffef2a5"}, + {file = "aiohttp-3.10.3-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:b84857b66fa6510a163bb083c1199d1ee091a40163cfcbbd0642495fed096204"}, + {file = "aiohttp-3.10.3-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:c124b9206b1befe0491f48185fd30a0dd51b0f4e0e7e43ac1236066215aff272"}, + {file = "aiohttp-3.10.3-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:3461d9294941937f07bbbaa6227ba799bc71cc3b22c40222568dc1cca5118f68"}, + {file = "aiohttp-3.10.3-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:08bd0754d257b2db27d6bab208c74601df6f21bfe4cb2ec7b258ba691aac64b3"}, + {file = "aiohttp-3.10.3-cp312-cp312-win32.whl", hash = "sha256:7f9159ae530297f61a00116771e57516f89a3de6ba33f314402e41560872b50a"}, + {file = "aiohttp-3.10.3-cp312-cp312-win_amd64.whl", hash = "sha256:e1128c5d3a466279cb23c4aa32a0f6cb0e7d2961e74e9e421f90e74f75ec1edf"}, + {file = "aiohttp-3.10.3-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:d1100e68e70eb72eadba2b932b185ebf0f28fd2f0dbfe576cfa9d9894ef49752"}, + {file = "aiohttp-3.10.3-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:a541414578ff47c0a9b0b8b77381ea86b0c8531ab37fc587572cb662ccd80b88"}, + {file = "aiohttp-3.10.3-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:d5548444ef60bf4c7b19ace21f032fa42d822e516a6940d36579f7bfa8513f9c"}, + {file = "aiohttp-3.10.3-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5ba2e838b5e6a8755ac8297275c9460e729dc1522b6454aee1766c6de6d56e5e"}, + {file = "aiohttp-3.10.3-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:48665433bb59144aaf502c324694bec25867eb6630fcd831f7a893ca473fcde4"}, + {file = "aiohttp-3.10.3-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:bac352fceed158620ce2d701ad39d4c1c76d114255a7c530e057e2b9f55bdf9f"}, + {file = "aiohttp-3.10.3-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2b0f670502100cdc567188c49415bebba947eb3edaa2028e1a50dd81bd13363f"}, + {file = "aiohttp-3.10.3-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:43b09f38a67679e32d380fe512189ccb0b25e15afc79b23fbd5b5e48e4fc8fd9"}, + {file = "aiohttp-3.10.3-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:cd788602e239ace64f257d1c9d39898ca65525583f0fbf0988bcba19418fe93f"}, + {file = "aiohttp-3.10.3-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:214277dcb07ab3875f17ee1c777d446dcce75bea85846849cc9d139ab8f5081f"}, + {file = "aiohttp-3.10.3-cp38-cp38-musllinux_1_2_ppc64le.whl", hash = "sha256:32007fdcaab789689c2ecaaf4b71f8e37bf012a15cd02c0a9db8c4d0e7989fa8"}, + {file = "aiohttp-3.10.3-cp38-cp38-musllinux_1_2_s390x.whl", hash = "sha256:123e5819bfe1b87204575515cf448ab3bf1489cdeb3b61012bde716cda5853e7"}, + {file = "aiohttp-3.10.3-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:812121a201f0c02491a5db335a737b4113151926a79ae9ed1a9f41ea225c0e3f"}, + {file = "aiohttp-3.10.3-cp38-cp38-win32.whl", hash = "sha256:b97dc9a17a59f350c0caa453a3cb35671a2ffa3a29a6ef3568b523b9113d84e5"}, + {file = "aiohttp-3.10.3-cp38-cp38-win_amd64.whl", hash = "sha256:3731a73ddc26969d65f90471c635abd4e1546a25299b687e654ea6d2fc052394"}, + {file = "aiohttp-3.10.3-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:38d91b98b4320ffe66efa56cb0f614a05af53b675ce1b8607cdb2ac826a8d58e"}, + {file = "aiohttp-3.10.3-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:9743fa34a10a36ddd448bba8a3adc2a66a1c575c3c2940301bacd6cc896c6bf1"}, + {file = "aiohttp-3.10.3-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:7c126f532caf238031c19d169cfae3c6a59129452c990a6e84d6e7b198a001dc"}, + {file = "aiohttp-3.10.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:926e68438f05703e500b06fe7148ef3013dd6f276de65c68558fa9974eeb59ad"}, + {file = "aiohttp-3.10.3-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:434b3ab75833accd0b931d11874e206e816f6e6626fd69f643d6a8269cd9166a"}, + {file = "aiohttp-3.10.3-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d35235a44ec38109b811c3600d15d8383297a8fab8e3dec6147477ec8636712a"}, + {file = "aiohttp-3.10.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:59c489661edbd863edb30a8bd69ecb044bd381d1818022bc698ba1b6f80e5dd1"}, + {file = "aiohttp-3.10.3-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:50544fe498c81cb98912afabfc4e4d9d85e89f86238348e3712f7ca6a2f01dab"}, + {file = "aiohttp-3.10.3-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:09bc79275737d4dc066e0ae2951866bb36d9c6b460cb7564f111cc0427f14844"}, + {file = "aiohttp-3.10.3-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:af4dbec58e37f5afff4f91cdf235e8e4b0bd0127a2a4fd1040e2cad3369d2f06"}, + {file = "aiohttp-3.10.3-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:b22cae3c9dd55a6b4c48c63081d31c00fc11fa9db1a20c8a50ee38c1a29539d2"}, + {file = "aiohttp-3.10.3-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:ba562736d3fbfe9241dad46c1a8994478d4a0e50796d80e29d50cabe8fbfcc3f"}, + {file = "aiohttp-3.10.3-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:f25d6c4e82d7489be84f2b1c8212fafc021b3731abdb61a563c90e37cced3a21"}, + {file = "aiohttp-3.10.3-cp39-cp39-win32.whl", hash = "sha256:b69d832e5f5fa15b1b6b2c8eb6a9fd2c0ec1fd7729cb4322ed27771afc9fc2ac"}, + {file = "aiohttp-3.10.3-cp39-cp39-win_amd64.whl", hash = "sha256:673bb6e3249dc8825df1105f6ef74e2eab779b7ff78e96c15cadb78b04a83752"}, + {file = "aiohttp-3.10.3.tar.gz", hash = "sha256:21650e7032cc2d31fc23d353d7123e771354f2a3d5b05a5647fc30fea214e696"}, +] + +[package.dependencies] +aiohappyeyeballs = ">=2.3.0" aiosignal = ">=1.1.2" async-timeout = {version = ">=4.0,<5.0", markers = "python_version < \"3.11\""} attrs = ">=17.3.0" @@ -125,7 +137,7 @@ multidict = ">=4.5,<7.0" yarl = ">=1.0,<2.0" [package.extras] -speedups = ["Brotli", "aiodns", "brotlicffi"] +speedups = ["Brotli", "aiodns (>=3.2.0)", "brotlicffi"] [[package]] name = "aiolimiter" @@ -221,6 +233,25 @@ files = [ {file = "asn1crypto-1.5.1.tar.gz", hash = "sha256:13ae38502be632115abf8a24cbe5f4da52e3b5231990aff31123c805306ccb9c"}, ] +[[package]] +name = "astrapy" +version = "1.4.1" +description = "AstraPy is a Pythonic SDK for DataStax Astra and its Data API" +optional = true +python-versions = "<4.0.0,>=3.8.0" +files = [ + {file = "astrapy-1.4.1-py3-none-any.whl", hash = "sha256:f2f6ca3a19cfab9422f306b3941401079fb940e286f3d17c776b71ff76eb9f73"}, + {file = "astrapy-1.4.1.tar.gz", hash = "sha256:ea4ed0ec44f9d7281d034c9bd829b0db844438424d492c9c27136456d1a82719"}, +] + +[package.dependencies] +cassio = ">=0.1.4,<0.2.0" +deprecation = ">=2.1.0,<2.2.0" +httpx = {version = ">=0.25.2,<1", extras = ["http2"]} +pymongo = ">=3" +toml = ">=0.10.2,<0.11.0" +uuid6 = ">=2024.1.12,<2024.2.0" + [[package]] name = "async-timeout" version = "4.0.3" @@ -253,29 +284,18 @@ tests-no-zope = ["attrs[tests-mypy]", "cloudpickle", "hypothesis", "pympler", "p [[package]] name = "babel" -version = "2.15.0" +version = "2.16.0" description = "Internationalization utilities" optional = false python-versions = ">=3.8" files = [ - {file = "Babel-2.15.0-py3-none-any.whl", hash = "sha256:08706bdad8d0a3413266ab61bd6c34d0c28d6e1e7badf40a2cebe67644e2e1fb"}, - {file = "babel-2.15.0.tar.gz", hash = "sha256:8daf0e265d05768bc6c7a314cf1321e9a123afc328cc635c18622a2f30a04413"}, + {file = "babel-2.16.0-py3-none-any.whl", hash = "sha256:368b5b98b37c06b7daf6696391c3240c938b37767d4584413e8438c5c435fa8b"}, + {file = "babel-2.16.0.tar.gz", hash = "sha256:d1f3554ca26605fe173f3de0c65f750f5a42f924499bf134de6423582298e316"}, ] [package.extras] dev = ["freezegun (>=1.0,<2.0)", "pytest (>=6.0)", "pytest-cov"] -[[package]] -name = "backports-strenum" -version = "1.3.1" -description = "Base class for creating enumerated constants that are also subclasses of str" -optional = false -python-versions = ">=3.8.6,<3.11" -files = [ - {file = "backports_strenum-1.3.1-py3-none-any.whl", hash = "sha256:cdcfe36dc897e2615dc793b7d3097f54d359918fc448754a517e6f23044ccf83"}, - {file = "backports_strenum-1.3.1.tar.gz", hash = "sha256:77c52407342898497714f0596e86188bb7084f89063226f4ba66863482f42414"}, -] - [[package]] name = "backports-tarfile" version = "1.2.0" @@ -314,17 +334,17 @@ lxml = ["lxml"] [[package]] name = "boto3" -version = "1.34.146" +version = "1.34.161" description = "The AWS SDK for Python" optional = false python-versions = ">=3.8" files = [ - {file = "boto3-1.34.146-py3-none-any.whl", hash = "sha256:7ec568fb19bce82a70be51f08fddac1ef927ca3fb0896cbb34303a012ba228d8"}, - {file = "boto3-1.34.146.tar.gz", hash = "sha256:5686fe2a6d1aa1de8a88e9589cdcc33361640d3d7a13da718a30717248886124"}, + {file = "boto3-1.34.161-py3-none-any.whl", hash = "sha256:4ef285334a0edc3047e27a04caf00f7742e32c0f03a361101e768014ac5709dd"}, + {file = "boto3-1.34.161.tar.gz", hash = "sha256:a872d8fdb3203c1eb0b12fa9e9d879e6f7fd02983a485f02189e6d5914ccd834"}, ] [package.dependencies] -botocore = ">=1.34.146,<1.35.0" +botocore = ">=1.34.161,<1.35.0" jmespath = ">=0.7.1,<2.0.0" s3transfer = ">=0.10.0,<0.11.0" @@ -333,13 +353,13 @@ crt = ["botocore[crt] (>=1.21.0,<2.0a0)"] [[package]] name = "boto3-stubs" -version = "1.34.146" -description = "Type annotations for boto3 1.34.146 generated with mypy-boto3-builder 7.25.0" +version = "1.34.161" +description = "Type annotations for boto3 1.34.161 generated with mypy-boto3-builder 7.26.0" optional = false python-versions = ">=3.8" files = [ - {file = "boto3_stubs-1.34.146-py3-none-any.whl", hash = "sha256:904918cb9d20f0d3744a7ed0863090fef8c1bd82d3bdcafbc94310e5cb36daf7"}, - {file = "boto3_stubs-1.34.146.tar.gz", hash = "sha256:5e4ccbedb7534fca303d5b8c2f1534a9bcd097c686f1f5a9ed82ae7f259bb8d1"}, + {file = "boto3_stubs-1.34.161-py3-none-any.whl", hash = "sha256:aff48426ed2dc03be038a1b8d0864f8ff1d6d7e0e024f9bd69ec8d0c78b4cf4c"}, + {file = "boto3_stubs-1.34.161.tar.gz", hash = "sha256:58291c105030ab589cf30c02dfb48df1e23cbabdc6e9e0fc3446982a83280edc"}, ] [package.dependencies] @@ -357,7 +377,7 @@ accessanalyzer = ["mypy-boto3-accessanalyzer (>=1.34.0,<1.35.0)"] account = ["mypy-boto3-account (>=1.34.0,<1.35.0)"] acm = ["mypy-boto3-acm (>=1.34.0,<1.35.0)"] acm-pca = ["mypy-boto3-acm-pca (>=1.34.0,<1.35.0)"] -all = ["mypy-boto3-accessanalyzer (>=1.34.0,<1.35.0)", "mypy-boto3-account (>=1.34.0,<1.35.0)", "mypy-boto3-acm (>=1.34.0,<1.35.0)", "mypy-boto3-acm-pca (>=1.34.0,<1.35.0)", "mypy-boto3-amp (>=1.34.0,<1.35.0)", "mypy-boto3-amplify (>=1.34.0,<1.35.0)", "mypy-boto3-amplifybackend (>=1.34.0,<1.35.0)", "mypy-boto3-amplifyuibuilder (>=1.34.0,<1.35.0)", "mypy-boto3-apigateway (>=1.34.0,<1.35.0)", "mypy-boto3-apigatewaymanagementapi (>=1.34.0,<1.35.0)", "mypy-boto3-apigatewayv2 (>=1.34.0,<1.35.0)", "mypy-boto3-appconfig (>=1.34.0,<1.35.0)", "mypy-boto3-appconfigdata (>=1.34.0,<1.35.0)", "mypy-boto3-appfabric (>=1.34.0,<1.35.0)", "mypy-boto3-appflow (>=1.34.0,<1.35.0)", "mypy-boto3-appintegrations (>=1.34.0,<1.35.0)", "mypy-boto3-application-autoscaling (>=1.34.0,<1.35.0)", "mypy-boto3-application-insights (>=1.34.0,<1.35.0)", "mypy-boto3-application-signals (>=1.34.0,<1.35.0)", "mypy-boto3-applicationcostprofiler (>=1.34.0,<1.35.0)", "mypy-boto3-appmesh (>=1.34.0,<1.35.0)", "mypy-boto3-apprunner (>=1.34.0,<1.35.0)", "mypy-boto3-appstream (>=1.34.0,<1.35.0)", "mypy-boto3-appsync (>=1.34.0,<1.35.0)", "mypy-boto3-apptest (>=1.34.0,<1.35.0)", "mypy-boto3-arc-zonal-shift (>=1.34.0,<1.35.0)", "mypy-boto3-artifact (>=1.34.0,<1.35.0)", "mypy-boto3-athena (>=1.34.0,<1.35.0)", "mypy-boto3-auditmanager (>=1.34.0,<1.35.0)", "mypy-boto3-autoscaling (>=1.34.0,<1.35.0)", "mypy-boto3-autoscaling-plans (>=1.34.0,<1.35.0)", "mypy-boto3-b2bi (>=1.34.0,<1.35.0)", "mypy-boto3-backup (>=1.34.0,<1.35.0)", "mypy-boto3-backup-gateway (>=1.34.0,<1.35.0)", "mypy-boto3-batch (>=1.34.0,<1.35.0)", "mypy-boto3-bcm-data-exports (>=1.34.0,<1.35.0)", "mypy-boto3-bedrock (>=1.34.0,<1.35.0)", "mypy-boto3-bedrock-agent (>=1.34.0,<1.35.0)", "mypy-boto3-bedrock-agent-runtime (>=1.34.0,<1.35.0)", "mypy-boto3-bedrock-runtime (>=1.34.0,<1.35.0)", "mypy-boto3-billingconductor (>=1.34.0,<1.35.0)", "mypy-boto3-braket (>=1.34.0,<1.35.0)", "mypy-boto3-budgets (>=1.34.0,<1.35.0)", "mypy-boto3-ce (>=1.34.0,<1.35.0)", "mypy-boto3-chatbot (>=1.34.0,<1.35.0)", "mypy-boto3-chime (>=1.34.0,<1.35.0)", "mypy-boto3-chime-sdk-identity (>=1.34.0,<1.35.0)", "mypy-boto3-chime-sdk-media-pipelines (>=1.34.0,<1.35.0)", "mypy-boto3-chime-sdk-meetings (>=1.34.0,<1.35.0)", "mypy-boto3-chime-sdk-messaging (>=1.34.0,<1.35.0)", "mypy-boto3-chime-sdk-voice (>=1.34.0,<1.35.0)", "mypy-boto3-cleanrooms (>=1.34.0,<1.35.0)", "mypy-boto3-cleanroomsml (>=1.34.0,<1.35.0)", "mypy-boto3-cloud9 (>=1.34.0,<1.35.0)", "mypy-boto3-cloudcontrol (>=1.34.0,<1.35.0)", "mypy-boto3-clouddirectory (>=1.34.0,<1.35.0)", "mypy-boto3-cloudformation (>=1.34.0,<1.35.0)", "mypy-boto3-cloudfront (>=1.34.0,<1.35.0)", "mypy-boto3-cloudfront-keyvaluestore (>=1.34.0,<1.35.0)", "mypy-boto3-cloudhsm (>=1.34.0,<1.35.0)", "mypy-boto3-cloudhsmv2 (>=1.34.0,<1.35.0)", "mypy-boto3-cloudsearch (>=1.34.0,<1.35.0)", "mypy-boto3-cloudsearchdomain (>=1.34.0,<1.35.0)", "mypy-boto3-cloudtrail (>=1.34.0,<1.35.0)", "mypy-boto3-cloudtrail-data (>=1.34.0,<1.35.0)", "mypy-boto3-cloudwatch (>=1.34.0,<1.35.0)", "mypy-boto3-codeartifact (>=1.34.0,<1.35.0)", "mypy-boto3-codebuild (>=1.34.0,<1.35.0)", "mypy-boto3-codecatalyst (>=1.34.0,<1.35.0)", "mypy-boto3-codecommit (>=1.34.0,<1.35.0)", "mypy-boto3-codeconnections (>=1.34.0,<1.35.0)", "mypy-boto3-codedeploy (>=1.34.0,<1.35.0)", "mypy-boto3-codeguru-reviewer (>=1.34.0,<1.35.0)", "mypy-boto3-codeguru-security (>=1.34.0,<1.35.0)", "mypy-boto3-codeguruprofiler (>=1.34.0,<1.35.0)", "mypy-boto3-codepipeline (>=1.34.0,<1.35.0)", "mypy-boto3-codestar (>=1.34.0,<1.35.0)", "mypy-boto3-codestar-connections (>=1.34.0,<1.35.0)", "mypy-boto3-codestar-notifications (>=1.34.0,<1.35.0)", "mypy-boto3-cognito-identity (>=1.34.0,<1.35.0)", "mypy-boto3-cognito-idp (>=1.34.0,<1.35.0)", "mypy-boto3-cognito-sync (>=1.34.0,<1.35.0)", "mypy-boto3-comprehend (>=1.34.0,<1.35.0)", "mypy-boto3-comprehendmedical (>=1.34.0,<1.35.0)", "mypy-boto3-compute-optimizer (>=1.34.0,<1.35.0)", "mypy-boto3-config (>=1.34.0,<1.35.0)", "mypy-boto3-connect (>=1.34.0,<1.35.0)", "mypy-boto3-connect-contact-lens (>=1.34.0,<1.35.0)", "mypy-boto3-connectcampaigns (>=1.34.0,<1.35.0)", "mypy-boto3-connectcases (>=1.34.0,<1.35.0)", "mypy-boto3-connectparticipant (>=1.34.0,<1.35.0)", "mypy-boto3-controlcatalog (>=1.34.0,<1.35.0)", "mypy-boto3-controltower (>=1.34.0,<1.35.0)", "mypy-boto3-cost-optimization-hub (>=1.34.0,<1.35.0)", "mypy-boto3-cur (>=1.34.0,<1.35.0)", "mypy-boto3-customer-profiles (>=1.34.0,<1.35.0)", "mypy-boto3-databrew (>=1.34.0,<1.35.0)", "mypy-boto3-dataexchange (>=1.34.0,<1.35.0)", "mypy-boto3-datapipeline (>=1.34.0,<1.35.0)", "mypy-boto3-datasync (>=1.34.0,<1.35.0)", "mypy-boto3-datazone (>=1.34.0,<1.35.0)", "mypy-boto3-dax (>=1.34.0,<1.35.0)", "mypy-boto3-deadline (>=1.34.0,<1.35.0)", "mypy-boto3-detective (>=1.34.0,<1.35.0)", "mypy-boto3-devicefarm (>=1.34.0,<1.35.0)", "mypy-boto3-devops-guru (>=1.34.0,<1.35.0)", "mypy-boto3-directconnect (>=1.34.0,<1.35.0)", "mypy-boto3-discovery (>=1.34.0,<1.35.0)", "mypy-boto3-dlm (>=1.34.0,<1.35.0)", "mypy-boto3-dms (>=1.34.0,<1.35.0)", "mypy-boto3-docdb (>=1.34.0,<1.35.0)", "mypy-boto3-docdb-elastic (>=1.34.0,<1.35.0)", "mypy-boto3-drs (>=1.34.0,<1.35.0)", "mypy-boto3-ds (>=1.34.0,<1.35.0)", "mypy-boto3-dynamodb (>=1.34.0,<1.35.0)", "mypy-boto3-dynamodbstreams (>=1.34.0,<1.35.0)", "mypy-boto3-ebs (>=1.34.0,<1.35.0)", "mypy-boto3-ec2 (>=1.34.0,<1.35.0)", "mypy-boto3-ec2-instance-connect (>=1.34.0,<1.35.0)", "mypy-boto3-ecr (>=1.34.0,<1.35.0)", "mypy-boto3-ecr-public (>=1.34.0,<1.35.0)", "mypy-boto3-ecs (>=1.34.0,<1.35.0)", "mypy-boto3-efs (>=1.34.0,<1.35.0)", "mypy-boto3-eks (>=1.34.0,<1.35.0)", "mypy-boto3-eks-auth (>=1.34.0,<1.35.0)", "mypy-boto3-elastic-inference (>=1.34.0,<1.35.0)", "mypy-boto3-elasticache (>=1.34.0,<1.35.0)", "mypy-boto3-elasticbeanstalk (>=1.34.0,<1.35.0)", "mypy-boto3-elastictranscoder (>=1.34.0,<1.35.0)", "mypy-boto3-elb (>=1.34.0,<1.35.0)", "mypy-boto3-elbv2 (>=1.34.0,<1.35.0)", "mypy-boto3-emr (>=1.34.0,<1.35.0)", "mypy-boto3-emr-containers (>=1.34.0,<1.35.0)", "mypy-boto3-emr-serverless (>=1.34.0,<1.35.0)", "mypy-boto3-entityresolution (>=1.34.0,<1.35.0)", "mypy-boto3-es (>=1.34.0,<1.35.0)", "mypy-boto3-events (>=1.34.0,<1.35.0)", "mypy-boto3-evidently (>=1.34.0,<1.35.0)", "mypy-boto3-finspace (>=1.34.0,<1.35.0)", "mypy-boto3-finspace-data (>=1.34.0,<1.35.0)", "mypy-boto3-firehose (>=1.34.0,<1.35.0)", "mypy-boto3-fis (>=1.34.0,<1.35.0)", "mypy-boto3-fms (>=1.34.0,<1.35.0)", "mypy-boto3-forecast (>=1.34.0,<1.35.0)", "mypy-boto3-forecastquery (>=1.34.0,<1.35.0)", "mypy-boto3-frauddetector (>=1.34.0,<1.35.0)", "mypy-boto3-freetier (>=1.34.0,<1.35.0)", "mypy-boto3-fsx (>=1.34.0,<1.35.0)", "mypy-boto3-gamelift (>=1.34.0,<1.35.0)", "mypy-boto3-glacier (>=1.34.0,<1.35.0)", "mypy-boto3-globalaccelerator (>=1.34.0,<1.35.0)", "mypy-boto3-glue (>=1.34.0,<1.35.0)", "mypy-boto3-grafana (>=1.34.0,<1.35.0)", "mypy-boto3-greengrass (>=1.34.0,<1.35.0)", "mypy-boto3-greengrassv2 (>=1.34.0,<1.35.0)", "mypy-boto3-groundstation (>=1.34.0,<1.35.0)", "mypy-boto3-guardduty (>=1.34.0,<1.35.0)", "mypy-boto3-health (>=1.34.0,<1.35.0)", "mypy-boto3-healthlake (>=1.34.0,<1.35.0)", "mypy-boto3-iam (>=1.34.0,<1.35.0)", "mypy-boto3-identitystore (>=1.34.0,<1.35.0)", "mypy-boto3-imagebuilder (>=1.34.0,<1.35.0)", "mypy-boto3-importexport (>=1.34.0,<1.35.0)", "mypy-boto3-inspector (>=1.34.0,<1.35.0)", "mypy-boto3-inspector-scan (>=1.34.0,<1.35.0)", "mypy-boto3-inspector2 (>=1.34.0,<1.35.0)", "mypy-boto3-internetmonitor (>=1.34.0,<1.35.0)", "mypy-boto3-iot (>=1.34.0,<1.35.0)", "mypy-boto3-iot-data (>=1.34.0,<1.35.0)", "mypy-boto3-iot-jobs-data (>=1.34.0,<1.35.0)", "mypy-boto3-iot1click-devices (>=1.34.0,<1.35.0)", "mypy-boto3-iot1click-projects (>=1.34.0,<1.35.0)", "mypy-boto3-iotanalytics (>=1.34.0,<1.35.0)", "mypy-boto3-iotdeviceadvisor (>=1.34.0,<1.35.0)", "mypy-boto3-iotevents (>=1.34.0,<1.35.0)", "mypy-boto3-iotevents-data (>=1.34.0,<1.35.0)", "mypy-boto3-iotfleethub (>=1.34.0,<1.35.0)", "mypy-boto3-iotfleetwise (>=1.34.0,<1.35.0)", "mypy-boto3-iotsecuretunneling (>=1.34.0,<1.35.0)", "mypy-boto3-iotsitewise (>=1.34.0,<1.35.0)", "mypy-boto3-iotthingsgraph (>=1.34.0,<1.35.0)", "mypy-boto3-iottwinmaker (>=1.34.0,<1.35.0)", "mypy-boto3-iotwireless (>=1.34.0,<1.35.0)", "mypy-boto3-ivs (>=1.34.0,<1.35.0)", "mypy-boto3-ivs-realtime (>=1.34.0,<1.35.0)", "mypy-boto3-ivschat (>=1.34.0,<1.35.0)", "mypy-boto3-kafka (>=1.34.0,<1.35.0)", "mypy-boto3-kafkaconnect (>=1.34.0,<1.35.0)", "mypy-boto3-kendra (>=1.34.0,<1.35.0)", "mypy-boto3-kendra-ranking (>=1.34.0,<1.35.0)", "mypy-boto3-keyspaces (>=1.34.0,<1.35.0)", "mypy-boto3-kinesis (>=1.34.0,<1.35.0)", "mypy-boto3-kinesis-video-archived-media (>=1.34.0,<1.35.0)", "mypy-boto3-kinesis-video-media (>=1.34.0,<1.35.0)", "mypy-boto3-kinesis-video-signaling (>=1.34.0,<1.35.0)", "mypy-boto3-kinesis-video-webrtc-storage (>=1.34.0,<1.35.0)", "mypy-boto3-kinesisanalytics (>=1.34.0,<1.35.0)", "mypy-boto3-kinesisanalyticsv2 (>=1.34.0,<1.35.0)", "mypy-boto3-kinesisvideo (>=1.34.0,<1.35.0)", "mypy-boto3-kms (>=1.34.0,<1.35.0)", "mypy-boto3-lakeformation (>=1.34.0,<1.35.0)", "mypy-boto3-lambda (>=1.34.0,<1.35.0)", "mypy-boto3-launch-wizard (>=1.34.0,<1.35.0)", "mypy-boto3-lex-models (>=1.34.0,<1.35.0)", "mypy-boto3-lex-runtime (>=1.34.0,<1.35.0)", "mypy-boto3-lexv2-models (>=1.34.0,<1.35.0)", "mypy-boto3-lexv2-runtime (>=1.34.0,<1.35.0)", "mypy-boto3-license-manager (>=1.34.0,<1.35.0)", "mypy-boto3-license-manager-linux-subscriptions (>=1.34.0,<1.35.0)", "mypy-boto3-license-manager-user-subscriptions (>=1.34.0,<1.35.0)", "mypy-boto3-lightsail (>=1.34.0,<1.35.0)", "mypy-boto3-location (>=1.34.0,<1.35.0)", "mypy-boto3-logs (>=1.34.0,<1.35.0)", "mypy-boto3-lookoutequipment (>=1.34.0,<1.35.0)", "mypy-boto3-lookoutmetrics (>=1.34.0,<1.35.0)", "mypy-boto3-lookoutvision (>=1.34.0,<1.35.0)", "mypy-boto3-m2 (>=1.34.0,<1.35.0)", "mypy-boto3-machinelearning (>=1.34.0,<1.35.0)", "mypy-boto3-macie2 (>=1.34.0,<1.35.0)", "mypy-boto3-mailmanager (>=1.34.0,<1.35.0)", "mypy-boto3-managedblockchain (>=1.34.0,<1.35.0)", "mypy-boto3-managedblockchain-query (>=1.34.0,<1.35.0)", "mypy-boto3-marketplace-agreement (>=1.34.0,<1.35.0)", "mypy-boto3-marketplace-catalog (>=1.34.0,<1.35.0)", "mypy-boto3-marketplace-deployment (>=1.34.0,<1.35.0)", "mypy-boto3-marketplace-entitlement (>=1.34.0,<1.35.0)", "mypy-boto3-marketplacecommerceanalytics (>=1.34.0,<1.35.0)", "mypy-boto3-mediaconnect (>=1.34.0,<1.35.0)", "mypy-boto3-mediaconvert (>=1.34.0,<1.35.0)", "mypy-boto3-medialive (>=1.34.0,<1.35.0)", "mypy-boto3-mediapackage (>=1.34.0,<1.35.0)", "mypy-boto3-mediapackage-vod (>=1.34.0,<1.35.0)", "mypy-boto3-mediapackagev2 (>=1.34.0,<1.35.0)", "mypy-boto3-mediastore (>=1.34.0,<1.35.0)", "mypy-boto3-mediastore-data (>=1.34.0,<1.35.0)", "mypy-boto3-mediatailor (>=1.34.0,<1.35.0)", "mypy-boto3-medical-imaging (>=1.34.0,<1.35.0)", "mypy-boto3-memorydb (>=1.34.0,<1.35.0)", "mypy-boto3-meteringmarketplace (>=1.34.0,<1.35.0)", "mypy-boto3-mgh (>=1.34.0,<1.35.0)", "mypy-boto3-mgn (>=1.34.0,<1.35.0)", "mypy-boto3-migration-hub-refactor-spaces (>=1.34.0,<1.35.0)", "mypy-boto3-migrationhub-config (>=1.34.0,<1.35.0)", "mypy-boto3-migrationhuborchestrator (>=1.34.0,<1.35.0)", "mypy-boto3-migrationhubstrategy (>=1.34.0,<1.35.0)", "mypy-boto3-mq (>=1.34.0,<1.35.0)", "mypy-boto3-mturk (>=1.34.0,<1.35.0)", "mypy-boto3-mwaa (>=1.34.0,<1.35.0)", "mypy-boto3-neptune (>=1.34.0,<1.35.0)", "mypy-boto3-neptune-graph (>=1.34.0,<1.35.0)", "mypy-boto3-neptunedata (>=1.34.0,<1.35.0)", "mypy-boto3-network-firewall (>=1.34.0,<1.35.0)", "mypy-boto3-networkmanager (>=1.34.0,<1.35.0)", "mypy-boto3-networkmonitor (>=1.34.0,<1.35.0)", "mypy-boto3-nimble (>=1.34.0,<1.35.0)", "mypy-boto3-oam (>=1.34.0,<1.35.0)", "mypy-boto3-omics (>=1.34.0,<1.35.0)", "mypy-boto3-opensearch (>=1.34.0,<1.35.0)", "mypy-boto3-opensearchserverless (>=1.34.0,<1.35.0)", "mypy-boto3-opsworks (>=1.34.0,<1.35.0)", "mypy-boto3-opsworkscm (>=1.34.0,<1.35.0)", "mypy-boto3-organizations (>=1.34.0,<1.35.0)", "mypy-boto3-osis (>=1.34.0,<1.35.0)", "mypy-boto3-outposts (>=1.34.0,<1.35.0)", "mypy-boto3-panorama (>=1.34.0,<1.35.0)", "mypy-boto3-payment-cryptography (>=1.34.0,<1.35.0)", "mypy-boto3-payment-cryptography-data (>=1.34.0,<1.35.0)", "mypy-boto3-pca-connector-ad (>=1.34.0,<1.35.0)", "mypy-boto3-pca-connector-scep (>=1.34.0,<1.35.0)", "mypy-boto3-personalize (>=1.34.0,<1.35.0)", "mypy-boto3-personalize-events (>=1.34.0,<1.35.0)", "mypy-boto3-personalize-runtime (>=1.34.0,<1.35.0)", "mypy-boto3-pi (>=1.34.0,<1.35.0)", "mypy-boto3-pinpoint (>=1.34.0,<1.35.0)", "mypy-boto3-pinpoint-email (>=1.34.0,<1.35.0)", "mypy-boto3-pinpoint-sms-voice (>=1.34.0,<1.35.0)", "mypy-boto3-pinpoint-sms-voice-v2 (>=1.34.0,<1.35.0)", "mypy-boto3-pipes (>=1.34.0,<1.35.0)", "mypy-boto3-polly (>=1.34.0,<1.35.0)", "mypy-boto3-pricing (>=1.34.0,<1.35.0)", "mypy-boto3-privatenetworks (>=1.34.0,<1.35.0)", "mypy-boto3-proton (>=1.34.0,<1.35.0)", "mypy-boto3-qapps (>=1.34.0,<1.35.0)", "mypy-boto3-qbusiness (>=1.34.0,<1.35.0)", "mypy-boto3-qconnect (>=1.34.0,<1.35.0)", "mypy-boto3-qldb (>=1.34.0,<1.35.0)", "mypy-boto3-qldb-session (>=1.34.0,<1.35.0)", "mypy-boto3-quicksight (>=1.34.0,<1.35.0)", "mypy-boto3-ram (>=1.34.0,<1.35.0)", "mypy-boto3-rbin (>=1.34.0,<1.35.0)", "mypy-boto3-rds (>=1.34.0,<1.35.0)", "mypy-boto3-rds-data (>=1.34.0,<1.35.0)", "mypy-boto3-redshift (>=1.34.0,<1.35.0)", "mypy-boto3-redshift-data (>=1.34.0,<1.35.0)", "mypy-boto3-redshift-serverless (>=1.34.0,<1.35.0)", "mypy-boto3-rekognition (>=1.34.0,<1.35.0)", "mypy-boto3-repostspace (>=1.34.0,<1.35.0)", "mypy-boto3-resiliencehub (>=1.34.0,<1.35.0)", "mypy-boto3-resource-explorer-2 (>=1.34.0,<1.35.0)", "mypy-boto3-resource-groups (>=1.34.0,<1.35.0)", "mypy-boto3-resourcegroupstaggingapi (>=1.34.0,<1.35.0)", "mypy-boto3-robomaker (>=1.34.0,<1.35.0)", "mypy-boto3-rolesanywhere (>=1.34.0,<1.35.0)", "mypy-boto3-route53 (>=1.34.0,<1.35.0)", "mypy-boto3-route53-recovery-cluster (>=1.34.0,<1.35.0)", "mypy-boto3-route53-recovery-control-config (>=1.34.0,<1.35.0)", "mypy-boto3-route53-recovery-readiness (>=1.34.0,<1.35.0)", "mypy-boto3-route53domains (>=1.34.0,<1.35.0)", "mypy-boto3-route53profiles (>=1.34.0,<1.35.0)", "mypy-boto3-route53resolver (>=1.34.0,<1.35.0)", "mypy-boto3-rum (>=1.34.0,<1.35.0)", "mypy-boto3-s3 (>=1.34.0,<1.35.0)", "mypy-boto3-s3control (>=1.34.0,<1.35.0)", "mypy-boto3-s3outposts (>=1.34.0,<1.35.0)", "mypy-boto3-sagemaker (>=1.34.0,<1.35.0)", "mypy-boto3-sagemaker-a2i-runtime (>=1.34.0,<1.35.0)", "mypy-boto3-sagemaker-edge (>=1.34.0,<1.35.0)", "mypy-boto3-sagemaker-featurestore-runtime (>=1.34.0,<1.35.0)", "mypy-boto3-sagemaker-geospatial (>=1.34.0,<1.35.0)", "mypy-boto3-sagemaker-metrics (>=1.34.0,<1.35.0)", "mypy-boto3-sagemaker-runtime (>=1.34.0,<1.35.0)", "mypy-boto3-savingsplans (>=1.34.0,<1.35.0)", "mypy-boto3-scheduler (>=1.34.0,<1.35.0)", "mypy-boto3-schemas (>=1.34.0,<1.35.0)", "mypy-boto3-sdb (>=1.34.0,<1.35.0)", "mypy-boto3-secretsmanager (>=1.34.0,<1.35.0)", "mypy-boto3-securityhub (>=1.34.0,<1.35.0)", "mypy-boto3-securitylake (>=1.34.0,<1.35.0)", "mypy-boto3-serverlessrepo (>=1.34.0,<1.35.0)", "mypy-boto3-service-quotas (>=1.34.0,<1.35.0)", "mypy-boto3-servicecatalog (>=1.34.0,<1.35.0)", "mypy-boto3-servicecatalog-appregistry (>=1.34.0,<1.35.0)", "mypy-boto3-servicediscovery (>=1.34.0,<1.35.0)", "mypy-boto3-ses (>=1.34.0,<1.35.0)", "mypy-boto3-sesv2 (>=1.34.0,<1.35.0)", "mypy-boto3-shield (>=1.34.0,<1.35.0)", "mypy-boto3-signer (>=1.34.0,<1.35.0)", "mypy-boto3-simspaceweaver (>=1.34.0,<1.35.0)", "mypy-boto3-sms (>=1.34.0,<1.35.0)", "mypy-boto3-sms-voice (>=1.34.0,<1.35.0)", "mypy-boto3-snow-device-management (>=1.34.0,<1.35.0)", "mypy-boto3-snowball (>=1.34.0,<1.35.0)", "mypy-boto3-sns (>=1.34.0,<1.35.0)", "mypy-boto3-sqs (>=1.34.0,<1.35.0)", "mypy-boto3-ssm (>=1.34.0,<1.35.0)", "mypy-boto3-ssm-contacts (>=1.34.0,<1.35.0)", "mypy-boto3-ssm-incidents (>=1.34.0,<1.35.0)", "mypy-boto3-ssm-sap (>=1.34.0,<1.35.0)", "mypy-boto3-sso (>=1.34.0,<1.35.0)", "mypy-boto3-sso-admin (>=1.34.0,<1.35.0)", "mypy-boto3-sso-oidc (>=1.34.0,<1.35.0)", "mypy-boto3-stepfunctions (>=1.34.0,<1.35.0)", "mypy-boto3-storagegateway (>=1.34.0,<1.35.0)", "mypy-boto3-sts (>=1.34.0,<1.35.0)", "mypy-boto3-supplychain (>=1.34.0,<1.35.0)", "mypy-boto3-support (>=1.34.0,<1.35.0)", "mypy-boto3-support-app (>=1.34.0,<1.35.0)", "mypy-boto3-swf (>=1.34.0,<1.35.0)", "mypy-boto3-synthetics (>=1.34.0,<1.35.0)", "mypy-boto3-taxsettings (>=1.34.0,<1.35.0)", "mypy-boto3-textract (>=1.34.0,<1.35.0)", "mypy-boto3-timestream-influxdb (>=1.34.0,<1.35.0)", "mypy-boto3-timestream-query (>=1.34.0,<1.35.0)", "mypy-boto3-timestream-write (>=1.34.0,<1.35.0)", "mypy-boto3-tnb (>=1.34.0,<1.35.0)", "mypy-boto3-transcribe (>=1.34.0,<1.35.0)", "mypy-boto3-transfer (>=1.34.0,<1.35.0)", "mypy-boto3-translate (>=1.34.0,<1.35.0)", "mypy-boto3-trustedadvisor (>=1.34.0,<1.35.0)", "mypy-boto3-verifiedpermissions (>=1.34.0,<1.35.0)", "mypy-boto3-voice-id (>=1.34.0,<1.35.0)", "mypy-boto3-vpc-lattice (>=1.34.0,<1.35.0)", "mypy-boto3-waf (>=1.34.0,<1.35.0)", "mypy-boto3-waf-regional (>=1.34.0,<1.35.0)", "mypy-boto3-wafv2 (>=1.34.0,<1.35.0)", "mypy-boto3-wellarchitected (>=1.34.0,<1.35.0)", "mypy-boto3-wisdom (>=1.34.0,<1.35.0)", "mypy-boto3-workdocs (>=1.34.0,<1.35.0)", "mypy-boto3-worklink (>=1.34.0,<1.35.0)", "mypy-boto3-workmail (>=1.34.0,<1.35.0)", "mypy-boto3-workmailmessageflow (>=1.34.0,<1.35.0)", "mypy-boto3-workspaces (>=1.34.0,<1.35.0)", "mypy-boto3-workspaces-thin-client (>=1.34.0,<1.35.0)", "mypy-boto3-workspaces-web (>=1.34.0,<1.35.0)", "mypy-boto3-xray (>=1.34.0,<1.35.0)"] +all = ["mypy-boto3-accessanalyzer (>=1.34.0,<1.35.0)", "mypy-boto3-account (>=1.34.0,<1.35.0)", "mypy-boto3-acm (>=1.34.0,<1.35.0)", "mypy-boto3-acm-pca (>=1.34.0,<1.35.0)", "mypy-boto3-amp (>=1.34.0,<1.35.0)", "mypy-boto3-amplify (>=1.34.0,<1.35.0)", "mypy-boto3-amplifybackend (>=1.34.0,<1.35.0)", "mypy-boto3-amplifyuibuilder (>=1.34.0,<1.35.0)", "mypy-boto3-apigateway (>=1.34.0,<1.35.0)", "mypy-boto3-apigatewaymanagementapi (>=1.34.0,<1.35.0)", "mypy-boto3-apigatewayv2 (>=1.34.0,<1.35.0)", "mypy-boto3-appconfig (>=1.34.0,<1.35.0)", "mypy-boto3-appconfigdata (>=1.34.0,<1.35.0)", "mypy-boto3-appfabric (>=1.34.0,<1.35.0)", "mypy-boto3-appflow (>=1.34.0,<1.35.0)", "mypy-boto3-appintegrations (>=1.34.0,<1.35.0)", "mypy-boto3-application-autoscaling (>=1.34.0,<1.35.0)", "mypy-boto3-application-insights (>=1.34.0,<1.35.0)", "mypy-boto3-application-signals (>=1.34.0,<1.35.0)", "mypy-boto3-applicationcostprofiler (>=1.34.0,<1.35.0)", "mypy-boto3-appmesh (>=1.34.0,<1.35.0)", "mypy-boto3-apprunner (>=1.34.0,<1.35.0)", "mypy-boto3-appstream (>=1.34.0,<1.35.0)", "mypy-boto3-appsync (>=1.34.0,<1.35.0)", "mypy-boto3-apptest (>=1.34.0,<1.35.0)", "mypy-boto3-arc-zonal-shift (>=1.34.0,<1.35.0)", "mypy-boto3-artifact (>=1.34.0,<1.35.0)", "mypy-boto3-athena (>=1.34.0,<1.35.0)", "mypy-boto3-auditmanager (>=1.34.0,<1.35.0)", "mypy-boto3-autoscaling (>=1.34.0,<1.35.0)", "mypy-boto3-autoscaling-plans (>=1.34.0,<1.35.0)", "mypy-boto3-b2bi (>=1.34.0,<1.35.0)", "mypy-boto3-backup (>=1.34.0,<1.35.0)", "mypy-boto3-backup-gateway (>=1.34.0,<1.35.0)", "mypy-boto3-batch (>=1.34.0,<1.35.0)", "mypy-boto3-bcm-data-exports (>=1.34.0,<1.35.0)", "mypy-boto3-bedrock (>=1.34.0,<1.35.0)", "mypy-boto3-bedrock-agent (>=1.34.0,<1.35.0)", "mypy-boto3-bedrock-agent-runtime (>=1.34.0,<1.35.0)", "mypy-boto3-bedrock-runtime (>=1.34.0,<1.35.0)", "mypy-boto3-billingconductor (>=1.34.0,<1.35.0)", "mypy-boto3-braket (>=1.34.0,<1.35.0)", "mypy-boto3-budgets (>=1.34.0,<1.35.0)", "mypy-boto3-ce (>=1.34.0,<1.35.0)", "mypy-boto3-chatbot (>=1.34.0,<1.35.0)", "mypy-boto3-chime (>=1.34.0,<1.35.0)", "mypy-boto3-chime-sdk-identity (>=1.34.0,<1.35.0)", "mypy-boto3-chime-sdk-media-pipelines (>=1.34.0,<1.35.0)", "mypy-boto3-chime-sdk-meetings (>=1.34.0,<1.35.0)", "mypy-boto3-chime-sdk-messaging (>=1.34.0,<1.35.0)", "mypy-boto3-chime-sdk-voice (>=1.34.0,<1.35.0)", "mypy-boto3-cleanrooms (>=1.34.0,<1.35.0)", "mypy-boto3-cleanroomsml (>=1.34.0,<1.35.0)", "mypy-boto3-cloud9 (>=1.34.0,<1.35.0)", "mypy-boto3-cloudcontrol (>=1.34.0,<1.35.0)", "mypy-boto3-clouddirectory (>=1.34.0,<1.35.0)", "mypy-boto3-cloudformation (>=1.34.0,<1.35.0)", "mypy-boto3-cloudfront (>=1.34.0,<1.35.0)", "mypy-boto3-cloudfront-keyvaluestore (>=1.34.0,<1.35.0)", "mypy-boto3-cloudhsm (>=1.34.0,<1.35.0)", "mypy-boto3-cloudhsmv2 (>=1.34.0,<1.35.0)", "mypy-boto3-cloudsearch (>=1.34.0,<1.35.0)", "mypy-boto3-cloudsearchdomain (>=1.34.0,<1.35.0)", "mypy-boto3-cloudtrail (>=1.34.0,<1.35.0)", "mypy-boto3-cloudtrail-data (>=1.34.0,<1.35.0)", "mypy-boto3-cloudwatch (>=1.34.0,<1.35.0)", "mypy-boto3-codeartifact (>=1.34.0,<1.35.0)", "mypy-boto3-codebuild (>=1.34.0,<1.35.0)", "mypy-boto3-codecatalyst (>=1.34.0,<1.35.0)", "mypy-boto3-codecommit (>=1.34.0,<1.35.0)", "mypy-boto3-codeconnections (>=1.34.0,<1.35.0)", "mypy-boto3-codedeploy (>=1.34.0,<1.35.0)", "mypy-boto3-codeguru-reviewer (>=1.34.0,<1.35.0)", "mypy-boto3-codeguru-security (>=1.34.0,<1.35.0)", "mypy-boto3-codeguruprofiler (>=1.34.0,<1.35.0)", "mypy-boto3-codepipeline (>=1.34.0,<1.35.0)", "mypy-boto3-codestar (>=1.34.0,<1.35.0)", "mypy-boto3-codestar-connections (>=1.34.0,<1.35.0)", "mypy-boto3-codestar-notifications (>=1.34.0,<1.35.0)", "mypy-boto3-cognito-identity (>=1.34.0,<1.35.0)", "mypy-boto3-cognito-idp (>=1.34.0,<1.35.0)", "mypy-boto3-cognito-sync (>=1.34.0,<1.35.0)", "mypy-boto3-comprehend (>=1.34.0,<1.35.0)", "mypy-boto3-comprehendmedical (>=1.34.0,<1.35.0)", "mypy-boto3-compute-optimizer (>=1.34.0,<1.35.0)", "mypy-boto3-config (>=1.34.0,<1.35.0)", "mypy-boto3-connect (>=1.34.0,<1.35.0)", "mypy-boto3-connect-contact-lens (>=1.34.0,<1.35.0)", "mypy-boto3-connectcampaigns (>=1.34.0,<1.35.0)", "mypy-boto3-connectcases (>=1.34.0,<1.35.0)", "mypy-boto3-connectparticipant (>=1.34.0,<1.35.0)", "mypy-boto3-controlcatalog (>=1.34.0,<1.35.0)", "mypy-boto3-controltower (>=1.34.0,<1.35.0)", "mypy-boto3-cost-optimization-hub (>=1.34.0,<1.35.0)", "mypy-boto3-cur (>=1.34.0,<1.35.0)", "mypy-boto3-customer-profiles (>=1.34.0,<1.35.0)", "mypy-boto3-databrew (>=1.34.0,<1.35.0)", "mypy-boto3-dataexchange (>=1.34.0,<1.35.0)", "mypy-boto3-datapipeline (>=1.34.0,<1.35.0)", "mypy-boto3-datasync (>=1.34.0,<1.35.0)", "mypy-boto3-datazone (>=1.34.0,<1.35.0)", "mypy-boto3-dax (>=1.34.0,<1.35.0)", "mypy-boto3-deadline (>=1.34.0,<1.35.0)", "mypy-boto3-detective (>=1.34.0,<1.35.0)", "mypy-boto3-devicefarm (>=1.34.0,<1.35.0)", "mypy-boto3-devops-guru (>=1.34.0,<1.35.0)", "mypy-boto3-directconnect (>=1.34.0,<1.35.0)", "mypy-boto3-discovery (>=1.34.0,<1.35.0)", "mypy-boto3-dlm (>=1.34.0,<1.35.0)", "mypy-boto3-dms (>=1.34.0,<1.35.0)", "mypy-boto3-docdb (>=1.34.0,<1.35.0)", "mypy-boto3-docdb-elastic (>=1.34.0,<1.35.0)", "mypy-boto3-drs (>=1.34.0,<1.35.0)", "mypy-boto3-ds (>=1.34.0,<1.35.0)", "mypy-boto3-dynamodb (>=1.34.0,<1.35.0)", "mypy-boto3-dynamodbstreams (>=1.34.0,<1.35.0)", "mypy-boto3-ebs (>=1.34.0,<1.35.0)", "mypy-boto3-ec2 (>=1.34.0,<1.35.0)", "mypy-boto3-ec2-instance-connect (>=1.34.0,<1.35.0)", "mypy-boto3-ecr (>=1.34.0,<1.35.0)", "mypy-boto3-ecr-public (>=1.34.0,<1.35.0)", "mypy-boto3-ecs (>=1.34.0,<1.35.0)", "mypy-boto3-efs (>=1.34.0,<1.35.0)", "mypy-boto3-eks (>=1.34.0,<1.35.0)", "mypy-boto3-eks-auth (>=1.34.0,<1.35.0)", "mypy-boto3-elastic-inference (>=1.34.0,<1.35.0)", "mypy-boto3-elasticache (>=1.34.0,<1.35.0)", "mypy-boto3-elasticbeanstalk (>=1.34.0,<1.35.0)", "mypy-boto3-elastictranscoder (>=1.34.0,<1.35.0)", "mypy-boto3-elb (>=1.34.0,<1.35.0)", "mypy-boto3-elbv2 (>=1.34.0,<1.35.0)", "mypy-boto3-emr (>=1.34.0,<1.35.0)", "mypy-boto3-emr-containers (>=1.34.0,<1.35.0)", "mypy-boto3-emr-serverless (>=1.34.0,<1.35.0)", "mypy-boto3-entityresolution (>=1.34.0,<1.35.0)", "mypy-boto3-es (>=1.34.0,<1.35.0)", "mypy-boto3-events (>=1.34.0,<1.35.0)", "mypy-boto3-evidently (>=1.34.0,<1.35.0)", "mypy-boto3-finspace (>=1.34.0,<1.35.0)", "mypy-boto3-finspace-data (>=1.34.0,<1.35.0)", "mypy-boto3-firehose (>=1.34.0,<1.35.0)", "mypy-boto3-fis (>=1.34.0,<1.35.0)", "mypy-boto3-fms (>=1.34.0,<1.35.0)", "mypy-boto3-forecast (>=1.34.0,<1.35.0)", "mypy-boto3-forecastquery (>=1.34.0,<1.35.0)", "mypy-boto3-frauddetector (>=1.34.0,<1.35.0)", "mypy-boto3-freetier (>=1.34.0,<1.35.0)", "mypy-boto3-fsx (>=1.34.0,<1.35.0)", "mypy-boto3-gamelift (>=1.34.0,<1.35.0)", "mypy-boto3-glacier (>=1.34.0,<1.35.0)", "mypy-boto3-globalaccelerator (>=1.34.0,<1.35.0)", "mypy-boto3-glue (>=1.34.0,<1.35.0)", "mypy-boto3-grafana (>=1.34.0,<1.35.0)", "mypy-boto3-greengrass (>=1.34.0,<1.35.0)", "mypy-boto3-greengrassv2 (>=1.34.0,<1.35.0)", "mypy-boto3-groundstation (>=1.34.0,<1.35.0)", "mypy-boto3-guardduty (>=1.34.0,<1.35.0)", "mypy-boto3-health (>=1.34.0,<1.35.0)", "mypy-boto3-healthlake (>=1.34.0,<1.35.0)", "mypy-boto3-iam (>=1.34.0,<1.35.0)", "mypy-boto3-identitystore (>=1.34.0,<1.35.0)", "mypy-boto3-imagebuilder (>=1.34.0,<1.35.0)", "mypy-boto3-importexport (>=1.34.0,<1.35.0)", "mypy-boto3-inspector (>=1.34.0,<1.35.0)", "mypy-boto3-inspector-scan (>=1.34.0,<1.35.0)", "mypy-boto3-inspector2 (>=1.34.0,<1.35.0)", "mypy-boto3-internetmonitor (>=1.34.0,<1.35.0)", "mypy-boto3-iot (>=1.34.0,<1.35.0)", "mypy-boto3-iot-data (>=1.34.0,<1.35.0)", "mypy-boto3-iot-jobs-data (>=1.34.0,<1.35.0)", "mypy-boto3-iot1click-devices (>=1.34.0,<1.35.0)", "mypy-boto3-iot1click-projects (>=1.34.0,<1.35.0)", "mypy-boto3-iotanalytics (>=1.34.0,<1.35.0)", "mypy-boto3-iotdeviceadvisor (>=1.34.0,<1.35.0)", "mypy-boto3-iotevents (>=1.34.0,<1.35.0)", "mypy-boto3-iotevents-data (>=1.34.0,<1.35.0)", "mypy-boto3-iotfleethub (>=1.34.0,<1.35.0)", "mypy-boto3-iotfleetwise (>=1.34.0,<1.35.0)", "mypy-boto3-iotsecuretunneling (>=1.34.0,<1.35.0)", "mypy-boto3-iotsitewise (>=1.34.0,<1.35.0)", "mypy-boto3-iotthingsgraph (>=1.34.0,<1.35.0)", "mypy-boto3-iottwinmaker (>=1.34.0,<1.35.0)", "mypy-boto3-iotwireless (>=1.34.0,<1.35.0)", "mypy-boto3-ivs (>=1.34.0,<1.35.0)", "mypy-boto3-ivs-realtime (>=1.34.0,<1.35.0)", "mypy-boto3-ivschat (>=1.34.0,<1.35.0)", "mypy-boto3-kafka (>=1.34.0,<1.35.0)", "mypy-boto3-kafkaconnect (>=1.34.0,<1.35.0)", "mypy-boto3-kendra (>=1.34.0,<1.35.0)", "mypy-boto3-kendra-ranking (>=1.34.0,<1.35.0)", "mypy-boto3-keyspaces (>=1.34.0,<1.35.0)", "mypy-boto3-kinesis (>=1.34.0,<1.35.0)", "mypy-boto3-kinesis-video-archived-media (>=1.34.0,<1.35.0)", "mypy-boto3-kinesis-video-media (>=1.34.0,<1.35.0)", "mypy-boto3-kinesis-video-signaling (>=1.34.0,<1.35.0)", "mypy-boto3-kinesis-video-webrtc-storage (>=1.34.0,<1.35.0)", "mypy-boto3-kinesisanalytics (>=1.34.0,<1.35.0)", "mypy-boto3-kinesisanalyticsv2 (>=1.34.0,<1.35.0)", "mypy-boto3-kinesisvideo (>=1.34.0,<1.35.0)", "mypy-boto3-kms (>=1.34.0,<1.35.0)", "mypy-boto3-lakeformation (>=1.34.0,<1.35.0)", "mypy-boto3-lambda (>=1.34.0,<1.35.0)", "mypy-boto3-launch-wizard (>=1.34.0,<1.35.0)", "mypy-boto3-lex-models (>=1.34.0,<1.35.0)", "mypy-boto3-lex-runtime (>=1.34.0,<1.35.0)", "mypy-boto3-lexv2-models (>=1.34.0,<1.35.0)", "mypy-boto3-lexv2-runtime (>=1.34.0,<1.35.0)", "mypy-boto3-license-manager (>=1.34.0,<1.35.0)", "mypy-boto3-license-manager-linux-subscriptions (>=1.34.0,<1.35.0)", "mypy-boto3-license-manager-user-subscriptions (>=1.34.0,<1.35.0)", "mypy-boto3-lightsail (>=1.34.0,<1.35.0)", "mypy-boto3-location (>=1.34.0,<1.35.0)", "mypy-boto3-logs (>=1.34.0,<1.35.0)", "mypy-boto3-lookoutequipment (>=1.34.0,<1.35.0)", "mypy-boto3-lookoutmetrics (>=1.34.0,<1.35.0)", "mypy-boto3-lookoutvision (>=1.34.0,<1.35.0)", "mypy-boto3-m2 (>=1.34.0,<1.35.0)", "mypy-boto3-machinelearning (>=1.34.0,<1.35.0)", "mypy-boto3-macie2 (>=1.34.0,<1.35.0)", "mypy-boto3-mailmanager (>=1.34.0,<1.35.0)", "mypy-boto3-managedblockchain (>=1.34.0,<1.35.0)", "mypy-boto3-managedblockchain-query (>=1.34.0,<1.35.0)", "mypy-boto3-marketplace-agreement (>=1.34.0,<1.35.0)", "mypy-boto3-marketplace-catalog (>=1.34.0,<1.35.0)", "mypy-boto3-marketplace-deployment (>=1.34.0,<1.35.0)", "mypy-boto3-marketplace-entitlement (>=1.34.0,<1.35.0)", "mypy-boto3-marketplacecommerceanalytics (>=1.34.0,<1.35.0)", "mypy-boto3-mediaconnect (>=1.34.0,<1.35.0)", "mypy-boto3-mediaconvert (>=1.34.0,<1.35.0)", "mypy-boto3-medialive (>=1.34.0,<1.35.0)", "mypy-boto3-mediapackage (>=1.34.0,<1.35.0)", "mypy-boto3-mediapackage-vod (>=1.34.0,<1.35.0)", "mypy-boto3-mediapackagev2 (>=1.34.0,<1.35.0)", "mypy-boto3-mediastore (>=1.34.0,<1.35.0)", "mypy-boto3-mediastore-data (>=1.34.0,<1.35.0)", "mypy-boto3-mediatailor (>=1.34.0,<1.35.0)", "mypy-boto3-medical-imaging (>=1.34.0,<1.35.0)", "mypy-boto3-memorydb (>=1.34.0,<1.35.0)", "mypy-boto3-meteringmarketplace (>=1.34.0,<1.35.0)", "mypy-boto3-mgh (>=1.34.0,<1.35.0)", "mypy-boto3-mgn (>=1.34.0,<1.35.0)", "mypy-boto3-migration-hub-refactor-spaces (>=1.34.0,<1.35.0)", "mypy-boto3-migrationhub-config (>=1.34.0,<1.35.0)", "mypy-boto3-migrationhuborchestrator (>=1.34.0,<1.35.0)", "mypy-boto3-migrationhubstrategy (>=1.34.0,<1.35.0)", "mypy-boto3-mq (>=1.34.0,<1.35.0)", "mypy-boto3-mturk (>=1.34.0,<1.35.0)", "mypy-boto3-mwaa (>=1.34.0,<1.35.0)", "mypy-boto3-neptune (>=1.34.0,<1.35.0)", "mypy-boto3-neptune-graph (>=1.34.0,<1.35.0)", "mypy-boto3-neptunedata (>=1.34.0,<1.35.0)", "mypy-boto3-network-firewall (>=1.34.0,<1.35.0)", "mypy-boto3-networkmanager (>=1.34.0,<1.35.0)", "mypy-boto3-networkmonitor (>=1.34.0,<1.35.0)", "mypy-boto3-nimble (>=1.34.0,<1.35.0)", "mypy-boto3-oam (>=1.34.0,<1.35.0)", "mypy-boto3-omics (>=1.34.0,<1.35.0)", "mypy-boto3-opensearch (>=1.34.0,<1.35.0)", "mypy-boto3-opensearchserverless (>=1.34.0,<1.35.0)", "mypy-boto3-opsworks (>=1.34.0,<1.35.0)", "mypy-boto3-opsworkscm (>=1.34.0,<1.35.0)", "mypy-boto3-organizations (>=1.34.0,<1.35.0)", "mypy-boto3-osis (>=1.34.0,<1.35.0)", "mypy-boto3-outposts (>=1.34.0,<1.35.0)", "mypy-boto3-panorama (>=1.34.0,<1.35.0)", "mypy-boto3-payment-cryptography (>=1.34.0,<1.35.0)", "mypy-boto3-payment-cryptography-data (>=1.34.0,<1.35.0)", "mypy-boto3-pca-connector-ad (>=1.34.0,<1.35.0)", "mypy-boto3-pca-connector-scep (>=1.34.0,<1.35.0)", "mypy-boto3-personalize (>=1.34.0,<1.35.0)", "mypy-boto3-personalize-events (>=1.34.0,<1.35.0)", "mypy-boto3-personalize-runtime (>=1.34.0,<1.35.0)", "mypy-boto3-pi (>=1.34.0,<1.35.0)", "mypy-boto3-pinpoint (>=1.34.0,<1.35.0)", "mypy-boto3-pinpoint-email (>=1.34.0,<1.35.0)", "mypy-boto3-pinpoint-sms-voice (>=1.34.0,<1.35.0)", "mypy-boto3-pinpoint-sms-voice-v2 (>=1.34.0,<1.35.0)", "mypy-boto3-pipes (>=1.34.0,<1.35.0)", "mypy-boto3-polly (>=1.34.0,<1.35.0)", "mypy-boto3-pricing (>=1.34.0,<1.35.0)", "mypy-boto3-privatenetworks (>=1.34.0,<1.35.0)", "mypy-boto3-proton (>=1.34.0,<1.35.0)", "mypy-boto3-qapps (>=1.34.0,<1.35.0)", "mypy-boto3-qbusiness (>=1.34.0,<1.35.0)", "mypy-boto3-qconnect (>=1.34.0,<1.35.0)", "mypy-boto3-qldb (>=1.34.0,<1.35.0)", "mypy-boto3-qldb-session (>=1.34.0,<1.35.0)", "mypy-boto3-quicksight (>=1.34.0,<1.35.0)", "mypy-boto3-ram (>=1.34.0,<1.35.0)", "mypy-boto3-rbin (>=1.34.0,<1.35.0)", "mypy-boto3-rds (>=1.34.0,<1.35.0)", "mypy-boto3-rds-data (>=1.34.0,<1.35.0)", "mypy-boto3-redshift (>=1.34.0,<1.35.0)", "mypy-boto3-redshift-data (>=1.34.0,<1.35.0)", "mypy-boto3-redshift-serverless (>=1.34.0,<1.35.0)", "mypy-boto3-rekognition (>=1.34.0,<1.35.0)", "mypy-boto3-repostspace (>=1.34.0,<1.35.0)", "mypy-boto3-resiliencehub (>=1.34.0,<1.35.0)", "mypy-boto3-resource-explorer-2 (>=1.34.0,<1.35.0)", "mypy-boto3-resource-groups (>=1.34.0,<1.35.0)", "mypy-boto3-resourcegroupstaggingapi (>=1.34.0,<1.35.0)", "mypy-boto3-robomaker (>=1.34.0,<1.35.0)", "mypy-boto3-rolesanywhere (>=1.34.0,<1.35.0)", "mypy-boto3-route53 (>=1.34.0,<1.35.0)", "mypy-boto3-route53-recovery-cluster (>=1.34.0,<1.35.0)", "mypy-boto3-route53-recovery-control-config (>=1.34.0,<1.35.0)", "mypy-boto3-route53-recovery-readiness (>=1.34.0,<1.35.0)", "mypy-boto3-route53domains (>=1.34.0,<1.35.0)", "mypy-boto3-route53profiles (>=1.34.0,<1.35.0)", "mypy-boto3-route53resolver (>=1.34.0,<1.35.0)", "mypy-boto3-rum (>=1.34.0,<1.35.0)", "mypy-boto3-s3 (>=1.34.0,<1.35.0)", "mypy-boto3-s3control (>=1.34.0,<1.35.0)", "mypy-boto3-s3outposts (>=1.34.0,<1.35.0)", "mypy-boto3-sagemaker (>=1.34.0,<1.35.0)", "mypy-boto3-sagemaker-a2i-runtime (>=1.34.0,<1.35.0)", "mypy-boto3-sagemaker-edge (>=1.34.0,<1.35.0)", "mypy-boto3-sagemaker-featurestore-runtime (>=1.34.0,<1.35.0)", "mypy-boto3-sagemaker-geospatial (>=1.34.0,<1.35.0)", "mypy-boto3-sagemaker-metrics (>=1.34.0,<1.35.0)", "mypy-boto3-sagemaker-runtime (>=1.34.0,<1.35.0)", "mypy-boto3-savingsplans (>=1.34.0,<1.35.0)", "mypy-boto3-scheduler (>=1.34.0,<1.35.0)", "mypy-boto3-schemas (>=1.34.0,<1.35.0)", "mypy-boto3-sdb (>=1.34.0,<1.35.0)", "mypy-boto3-secretsmanager (>=1.34.0,<1.35.0)", "mypy-boto3-securityhub (>=1.34.0,<1.35.0)", "mypy-boto3-securitylake (>=1.34.0,<1.35.0)", "mypy-boto3-serverlessrepo (>=1.34.0,<1.35.0)", "mypy-boto3-service-quotas (>=1.34.0,<1.35.0)", "mypy-boto3-servicecatalog (>=1.34.0,<1.35.0)", "mypy-boto3-servicecatalog-appregistry (>=1.34.0,<1.35.0)", "mypy-boto3-servicediscovery (>=1.34.0,<1.35.0)", "mypy-boto3-ses (>=1.34.0,<1.35.0)", "mypy-boto3-sesv2 (>=1.34.0,<1.35.0)", "mypy-boto3-shield (>=1.34.0,<1.35.0)", "mypy-boto3-signer (>=1.34.0,<1.35.0)", "mypy-boto3-simspaceweaver (>=1.34.0,<1.35.0)", "mypy-boto3-sms (>=1.34.0,<1.35.0)", "mypy-boto3-sms-voice (>=1.34.0,<1.35.0)", "mypy-boto3-snow-device-management (>=1.34.0,<1.35.0)", "mypy-boto3-snowball (>=1.34.0,<1.35.0)", "mypy-boto3-sns (>=1.34.0,<1.35.0)", "mypy-boto3-sqs (>=1.34.0,<1.35.0)", "mypy-boto3-ssm (>=1.34.0,<1.35.0)", "mypy-boto3-ssm-contacts (>=1.34.0,<1.35.0)", "mypy-boto3-ssm-incidents (>=1.34.0,<1.35.0)", "mypy-boto3-ssm-quicksetup (>=1.34.0,<1.35.0)", "mypy-boto3-ssm-sap (>=1.34.0,<1.35.0)", "mypy-boto3-sso (>=1.34.0,<1.35.0)", "mypy-boto3-sso-admin (>=1.34.0,<1.35.0)", "mypy-boto3-sso-oidc (>=1.34.0,<1.35.0)", "mypy-boto3-stepfunctions (>=1.34.0,<1.35.0)", "mypy-boto3-storagegateway (>=1.34.0,<1.35.0)", "mypy-boto3-sts (>=1.34.0,<1.35.0)", "mypy-boto3-supplychain (>=1.34.0,<1.35.0)", "mypy-boto3-support (>=1.34.0,<1.35.0)", "mypy-boto3-support-app (>=1.34.0,<1.35.0)", "mypy-boto3-swf (>=1.34.0,<1.35.0)", "mypy-boto3-synthetics (>=1.34.0,<1.35.0)", "mypy-boto3-taxsettings (>=1.34.0,<1.35.0)", "mypy-boto3-textract (>=1.34.0,<1.35.0)", "mypy-boto3-timestream-influxdb (>=1.34.0,<1.35.0)", "mypy-boto3-timestream-query (>=1.34.0,<1.35.0)", "mypy-boto3-timestream-write (>=1.34.0,<1.35.0)", "mypy-boto3-tnb (>=1.34.0,<1.35.0)", "mypy-boto3-transcribe (>=1.34.0,<1.35.0)", "mypy-boto3-transfer (>=1.34.0,<1.35.0)", "mypy-boto3-translate (>=1.34.0,<1.35.0)", "mypy-boto3-trustedadvisor (>=1.34.0,<1.35.0)", "mypy-boto3-verifiedpermissions (>=1.34.0,<1.35.0)", "mypy-boto3-voice-id (>=1.34.0,<1.35.0)", "mypy-boto3-vpc-lattice (>=1.34.0,<1.35.0)", "mypy-boto3-waf (>=1.34.0,<1.35.0)", "mypy-boto3-waf-regional (>=1.34.0,<1.35.0)", "mypy-boto3-wafv2 (>=1.34.0,<1.35.0)", "mypy-boto3-wellarchitected (>=1.34.0,<1.35.0)", "mypy-boto3-wisdom (>=1.34.0,<1.35.0)", "mypy-boto3-workdocs (>=1.34.0,<1.35.0)", "mypy-boto3-worklink (>=1.34.0,<1.35.0)", "mypy-boto3-workmail (>=1.34.0,<1.35.0)", "mypy-boto3-workmailmessageflow (>=1.34.0,<1.35.0)", "mypy-boto3-workspaces (>=1.34.0,<1.35.0)", "mypy-boto3-workspaces-thin-client (>=1.34.0,<1.35.0)", "mypy-boto3-workspaces-web (>=1.34.0,<1.35.0)", "mypy-boto3-xray (>=1.34.0,<1.35.0)"] amp = ["mypy-boto3-amp (>=1.34.0,<1.35.0)"] amplify = ["mypy-boto3-amplify (>=1.34.0,<1.35.0)"] amplifybackend = ["mypy-boto3-amplifybackend (>=1.34.0,<1.35.0)"] @@ -395,7 +415,7 @@ bedrock-agent = ["mypy-boto3-bedrock-agent (>=1.34.0,<1.35.0)"] bedrock-agent-runtime = ["mypy-boto3-bedrock-agent-runtime (>=1.34.0,<1.35.0)"] bedrock-runtime = ["mypy-boto3-bedrock-runtime (>=1.34.0,<1.35.0)"] billingconductor = ["mypy-boto3-billingconductor (>=1.34.0,<1.35.0)"] -boto3 = ["boto3 (==1.34.146)", "botocore (==1.34.146)"] +boto3 = ["boto3 (==1.34.161)", "botocore (==1.34.161)"] braket = ["mypy-boto3-braket (>=1.34.0,<1.35.0)"] budgets = ["mypy-boto3-budgets (>=1.34.0,<1.35.0)"] ce = ["mypy-boto3-ce (>=1.34.0,<1.35.0)"] @@ -703,6 +723,7 @@ sqs = ["mypy-boto3-sqs (>=1.34.0,<1.35.0)"] ssm = ["mypy-boto3-ssm (>=1.34.0,<1.35.0)"] ssm-contacts = ["mypy-boto3-ssm-contacts (>=1.34.0,<1.35.0)"] ssm-incidents = ["mypy-boto3-ssm-incidents (>=1.34.0,<1.35.0)"] +ssm-quicksetup = ["mypy-boto3-ssm-quicksetup (>=1.34.0,<1.35.0)"] ssm-sap = ["mypy-boto3-ssm-sap (>=1.34.0,<1.35.0)"] sso = ["mypy-boto3-sso (>=1.34.0,<1.35.0)"] sso-admin = ["mypy-boto3-sso-admin (>=1.34.0,<1.35.0)"] @@ -744,13 +765,13 @@ xray = ["mypy-boto3-xray (>=1.34.0,<1.35.0)"] [[package]] name = "botocore" -version = "1.34.146" +version = "1.34.161" description = "Low-level, data-driven core of boto 3." optional = false python-versions = ">=3.8" files = [ - {file = "botocore-1.34.146-py3-none-any.whl", hash = "sha256:3fd4782362bd29c192704ebf859c5c8c5189ad05719e391eefe23088434427ae"}, - {file = "botocore-1.34.146.tar.gz", hash = "sha256:849cb8e54e042443aeabcd7822b5f2b76cb5cfe33fe3a71f91c7c069748a869c"}, + {file = "botocore-1.34.161-py3-none-any.whl", hash = "sha256:6c606d2da6f62fde06880aff1190566af208875c29938b6b68741e607817975a"}, + {file = "botocore-1.34.161.tar.gz", hash = "sha256:16381bfb786142099abf170ce734b95a402a3a7f8e4016358712ac333c5568b2"}, ] [package.dependencies] @@ -762,17 +783,17 @@ urllib3 = [ ] [package.extras] -crt = ["awscrt (==0.20.11)"] +crt = ["awscrt (==0.21.2)"] [[package]] name = "botocore-stubs" -version = "1.34.146" +version = "1.34.161" description = "Type annotations and code completion for botocore" optional = false python-versions = "<4.0,>=3.8" files = [ - {file = "botocore_stubs-1.34.146-py3-none-any.whl", hash = "sha256:f77385f2b6af64b04e00dffc0f684879f308c48ce05deefabbec46b290841fc1"}, - {file = "botocore_stubs-1.34.146.tar.gz", hash = "sha256:a7d990c165e3151180a8c1c754183c2f687520117c918bdd6057f26baad4e1d2"}, + {file = "botocore_stubs-1.34.161-py3-none-any.whl", hash = "sha256:fff186b749b60814e01abbeca447d7c2d38d363c726bc23ee2f52da2dbcda868"}, + {file = "botocore_stubs-1.34.161.tar.gz", hash = "sha256:59d9493c9724dff1a76004dc3ec1eca9290ccb46ddc057acf3ac44071d02d3cb"}, ] [package.dependencies] @@ -792,6 +813,69 @@ files = [ {file = "cachetools-5.4.0.tar.gz", hash = "sha256:b8adc2e7c07f105ced7bc56dbb6dfbe7c4a00acce20e2227b3f355be89bc6827"}, ] +[[package]] +name = "cassandra-driver" +version = "3.29.1" +description = "DataStax Driver for Apache Cassandra" +optional = true +python-versions = "*" +files = [ + {file = "cassandra-driver-3.29.1.tar.gz", hash = "sha256:38e9c2a2f2a9664bb03f1f852d5fccaeff2163942b5db35dffcf8bf32a51cfe5"}, + {file = "cassandra_driver-3.29.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:a8f175c7616a63ca48cb8bd4acc443e2a3d889964d5157cead761f23cc8db7bd"}, + {file = "cassandra_driver-3.29.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:7d66398952b9cd21c40edff56e22b6d3bce765edc94b207ddb5896e7bc9aa088"}, + {file = "cassandra_driver-3.29.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5bbc6f575ef109ce5d4abfa2033bf36c394032abd83e32ab671159ce68e7e17b"}, + {file = "cassandra_driver-3.29.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:78f241af75696adb3e470209e2fbb498804c99e2b197d24d74774eee6784f283"}, + {file = "cassandra_driver-3.29.1-cp310-cp310-win32.whl", hash = "sha256:54d9e651a742d6ca3d874ef8d06a40fa032d2dba97142da2d36f60c5675e39f8"}, + {file = "cassandra_driver-3.29.1-cp310-cp310-win_amd64.whl", hash = "sha256:630dc5423cd40eba0ee9db31065e2238098ff1a25a6b1bd36360f85738f26e4b"}, + {file = "cassandra_driver-3.29.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:0b841d38c96bb878d31df393954863652d6d3a85f47bcc00fd1d70a5ea73023f"}, + {file = "cassandra_driver-3.29.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:19cc7375f673e215bd4cbbefae2de9f07830be7dabef55284a2d2ff8d8691efe"}, + {file = "cassandra_driver-3.29.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2b74b355be3dcafe652fffda8f14f385ccc1a8dae9df28e6080cc660da39b45f"}, + {file = "cassandra_driver-3.29.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0e6dac7eddd3f4581859f180383574068a3f113907811b4dad755a8ace4c3fbd"}, + {file = "cassandra_driver-3.29.1-cp311-cp311-win32.whl", hash = "sha256:293a79dba417112b56320ed0013d71fd7520f5fc4a5fd2ac8000c762c6dd5b07"}, + {file = "cassandra_driver-3.29.1-cp311-cp311-win_amd64.whl", hash = "sha256:7c2374fdf1099047a6c9c8329c79d71ad11e61d9cca7de92a0f49655da4bdd8a"}, + {file = "cassandra_driver-3.29.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:4431a0c836f33a33c733c84997fbdb6398be005c4d18a8c8525c469fdc29393c"}, + {file = "cassandra_driver-3.29.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:d23b08381b171a9e42ace483a82457edcddada9e8367e31677b97538cde2dc34"}, + {file = "cassandra_driver-3.29.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4beb29a0139e63a10a5b9a3c7b72c30a4e6e20c9f0574f9d22c0d4144fe3d348"}, + {file = "cassandra_driver-3.29.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1b206423cc454a78f16b411e7cb641dddc26168ac2e18f2c13665f5f3c89868c"}, + {file = "cassandra_driver-3.29.1-cp312-cp312-win32.whl", hash = "sha256:ac898cca7303a3a2a3070513eee12ef0f1be1a0796935c5b8aa13dae8c0a7f7e"}, + {file = "cassandra_driver-3.29.1-cp312-cp312-win_amd64.whl", hash = "sha256:4ad0c9fb2229048ad6ff8c6ddbf1fdc78b111f2b061c66237c2257fcc4a31b14"}, + {file = "cassandra_driver-3.29.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:4282c5deac462e4bb0f6fd0553a33d514dbd5ee99d0812594210080330ddd1a2"}, + {file = "cassandra_driver-3.29.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:41ca7eea069754002418d3bdfbd3dfd150ea12cb9db474ab1a01fa4679a05bcb"}, + {file = "cassandra_driver-3.29.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f6639ccb268c4dc754bc45e03551711780d0e02cb298ab26cde1f42b7bcc74f8"}, + {file = "cassandra_driver-3.29.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1a9d7d3b1be24a7f113b5404186ccccc977520401303a8fe78ba34134cad2482"}, + {file = "cassandra_driver-3.29.1-cp38-cp38-win32.whl", hash = "sha256:81c8fd556c6e1bb93577e69c1f10a3fadf7ddb93958d226ccbb72389396e9a92"}, + {file = "cassandra_driver-3.29.1-cp38-cp38-win_amd64.whl", hash = "sha256:cfe70ed0f27af949de2767ea9cef4092584e8748759374a55bf23c30746c7b23"}, + {file = "cassandra_driver-3.29.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:a2c03c1d834ac1a0ae39f9af297a8cd38829003ce910b08b324fb3abe488ce2b"}, + {file = "cassandra_driver-3.29.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:9a3e1e2b01f3b7a5cf75c97401bce830071d99c42464352087d7475e0161af93"}, + {file = "cassandra_driver-3.29.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:90c42006665a4e490b0766b70f3d637f36a30accbef2da35d6d4081c0e0bafc3"}, + {file = "cassandra_driver-3.29.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0c1aca41f45772f9759e8246030907d92bc35fbbdc91525a3cb9b49939b80ad7"}, + {file = "cassandra_driver-3.29.1-cp39-cp39-win32.whl", hash = "sha256:ce4a66245d4a0c8b07fdcb6398698c2c42eb71245fb49cff39435bb702ff7be6"}, + {file = "cassandra_driver-3.29.1-cp39-cp39-win_amd64.whl", hash = "sha256:4cae69ceb1b1d9383e988a1b790115253eacf7867ceb15ed2adb736e3ce981be"}, +] + +[package.dependencies] +geomet = ">=0.1,<0.3" + +[package.extras] +cle = ["cryptography (>=35.0)"] +graph = ["gremlinpython (==3.4.6)"] + +[[package]] +name = "cassio" +version = "0.1.8" +description = "A framework-agnostic Python library to seamlessly integrate Apache Cassandra(R) with ML/LLM/genAI workloads." +optional = true +python-versions = "<4.0,>=3.8" +files = [ + {file = "cassio-0.1.8-py3-none-any.whl", hash = "sha256:c09e7c884ba7227ff5277c86f3b0f31c523672ea407f56d093c7227e69c54d94"}, + {file = "cassio-0.1.8.tar.gz", hash = "sha256:4e09929506cb3dd6fad217e89846d0a1a59069afd24b82c72526ef6f2e9271af"}, +] + +[package.dependencies] +cassandra-driver = ">=3.28.0,<4.0.0" +numpy = ">=1.0" +requests = ">=2.31.0,<3.0.0" + [[package]] name = "certifi" version = "2024.7.4" @@ -805,63 +889,78 @@ files = [ [[package]] name = "cffi" -version = "1.16.0" +version = "1.17.0" description = "Foreign Function Interface for Python calling C code." optional = false python-versions = ">=3.8" files = [ - {file = "cffi-1.16.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:6b3d6606d369fc1da4fd8c357d026317fbb9c9b75d36dc16e90e84c26854b088"}, - {file = "cffi-1.16.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:ac0f5edd2360eea2f1daa9e26a41db02dd4b0451b48f7c318e217ee092a213e9"}, - {file = "cffi-1.16.0-cp310-cp310-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7e61e3e4fa664a8588aa25c883eab612a188c725755afff6289454d6362b9673"}, - {file = "cffi-1.16.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a72e8961a86d19bdb45851d8f1f08b041ea37d2bd8d4fd19903bc3083d80c896"}, - {file = "cffi-1.16.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5b50bf3f55561dac5438f8e70bfcdfd74543fd60df5fa5f62d94e5867deca684"}, - {file = "cffi-1.16.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7651c50c8c5ef7bdb41108b7b8c5a83013bfaa8a935590c5d74627c047a583c7"}, - {file = "cffi-1.16.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e4108df7fe9b707191e55f33efbcb2d81928e10cea45527879a4749cbe472614"}, - {file = "cffi-1.16.0-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:32c68ef735dbe5857c810328cb2481e24722a59a2003018885514d4c09af9743"}, - {file = "cffi-1.16.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:673739cb539f8cdaa07d92d02efa93c9ccf87e345b9a0b556e3ecc666718468d"}, - {file = "cffi-1.16.0-cp310-cp310-win32.whl", hash = "sha256:9f90389693731ff1f659e55c7d1640e2ec43ff725cc61b04b2f9c6d8d017df6a"}, - {file = "cffi-1.16.0-cp310-cp310-win_amd64.whl", hash = "sha256:e6024675e67af929088fda399b2094574609396b1decb609c55fa58b028a32a1"}, - {file = "cffi-1.16.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:b84834d0cf97e7d27dd5b7f3aca7b6e9263c56308ab9dc8aae9784abb774d404"}, - {file = "cffi-1.16.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:1b8ebc27c014c59692bb2664c7d13ce7a6e9a629be20e54e7271fa696ff2b417"}, - {file = "cffi-1.16.0-cp311-cp311-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ee07e47c12890ef248766a6e55bd38ebfb2bb8edd4142d56db91b21ea68b7627"}, - {file = "cffi-1.16.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d8a9d3ebe49f084ad71f9269834ceccbf398253c9fac910c4fd7053ff1386936"}, - {file = "cffi-1.16.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e70f54f1796669ef691ca07d046cd81a29cb4deb1e5f942003f401c0c4a2695d"}, - {file = "cffi-1.16.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5bf44d66cdf9e893637896c7faa22298baebcd18d1ddb6d2626a6e39793a1d56"}, - {file = "cffi-1.16.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7b78010e7b97fef4bee1e896df8a4bbb6712b7f05b7ef630f9d1da00f6444d2e"}, - {file = "cffi-1.16.0-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:c6a164aa47843fb1b01e941d385aab7215563bb8816d80ff3a363a9f8448a8dc"}, - {file = "cffi-1.16.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:e09f3ff613345df5e8c3667da1d918f9149bd623cd9070c983c013792a9a62eb"}, - {file = "cffi-1.16.0-cp311-cp311-win32.whl", hash = "sha256:2c56b361916f390cd758a57f2e16233eb4f64bcbeee88a4881ea90fca14dc6ab"}, - {file = "cffi-1.16.0-cp311-cp311-win_amd64.whl", hash = "sha256:db8e577c19c0fda0beb7e0d4e09e0ba74b1e4c092e0e40bfa12fe05b6f6d75ba"}, - {file = "cffi-1.16.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:fa3a0128b152627161ce47201262d3140edb5a5c3da88d73a1b790a959126956"}, - {file = "cffi-1.16.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:68e7c44931cc171c54ccb702482e9fc723192e88d25a0e133edd7aff8fcd1f6e"}, - {file = "cffi-1.16.0-cp312-cp312-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:abd808f9c129ba2beda4cfc53bde801e5bcf9d6e0f22f095e45327c038bfe68e"}, - {file = "cffi-1.16.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:88e2b3c14bdb32e440be531ade29d3c50a1a59cd4e51b1dd8b0865c54ea5d2e2"}, - {file = "cffi-1.16.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:fcc8eb6d5902bb1cf6dc4f187ee3ea80a1eba0a89aba40a5cb20a5087d961357"}, - {file = "cffi-1.16.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b7be2d771cdba2942e13215c4e340bfd76398e9227ad10402a8767ab1865d2e6"}, - {file = "cffi-1.16.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e715596e683d2ce000574bae5d07bd522c781a822866c20495e52520564f0969"}, - {file = "cffi-1.16.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:2d92b25dbf6cae33f65005baf472d2c245c050b1ce709cc4588cdcdd5495b520"}, - {file = "cffi-1.16.0-cp312-cp312-win32.whl", hash = "sha256:b2ca4e77f9f47c55c194982e10f058db063937845bb2b7a86c84a6cfe0aefa8b"}, - {file = "cffi-1.16.0-cp312-cp312-win_amd64.whl", hash = "sha256:68678abf380b42ce21a5f2abde8efee05c114c2fdb2e9eef2efdb0257fba1235"}, - {file = "cffi-1.16.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:0c9ef6ff37e974b73c25eecc13952c55bceed9112be2d9d938ded8e856138bcc"}, - {file = "cffi-1.16.0-cp38-cp38-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a09582f178759ee8128d9270cd1344154fd473bb77d94ce0aeb2a93ebf0feaf0"}, - {file = "cffi-1.16.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e760191dd42581e023a68b758769e2da259b5d52e3103c6060ddc02c9edb8d7b"}, - {file = "cffi-1.16.0-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:80876338e19c951fdfed6198e70bc88f1c9758b94578d5a7c4c91a87af3cf31c"}, - {file = "cffi-1.16.0-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a6a14b17d7e17fa0d207ac08642c8820f84f25ce17a442fd15e27ea18d67c59b"}, - {file = "cffi-1.16.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6602bc8dc6f3a9e02b6c22c4fc1e47aa50f8f8e6d3f78a5e16ac33ef5fefa324"}, - {file = "cffi-1.16.0-cp38-cp38-win32.whl", hash = "sha256:131fd094d1065b19540c3d72594260f118b231090295d8c34e19a7bbcf2e860a"}, - {file = "cffi-1.16.0-cp38-cp38-win_amd64.whl", hash = "sha256:31d13b0f99e0836b7ff893d37af07366ebc90b678b6664c955b54561fc36ef36"}, - {file = "cffi-1.16.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:582215a0e9adbe0e379761260553ba11c58943e4bbe9c36430c4ca6ac74b15ed"}, - {file = "cffi-1.16.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:b29ebffcf550f9da55bec9e02ad430c992a87e5f512cd63388abb76f1036d8d2"}, - {file = "cffi-1.16.0-cp39-cp39-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:dc9b18bf40cc75f66f40a7379f6a9513244fe33c0e8aa72e2d56b0196a7ef872"}, - {file = "cffi-1.16.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9cb4a35b3642fc5c005a6755a5d17c6c8b6bcb6981baf81cea8bfbc8903e8ba8"}, - {file = "cffi-1.16.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b86851a328eedc692acf81fb05444bdf1891747c25af7529e39ddafaf68a4f3f"}, - {file = "cffi-1.16.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c0f31130ebc2d37cdd8e44605fb5fa7ad59049298b3f745c74fa74c62fbfcfc4"}, - {file = "cffi-1.16.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8f8e709127c6c77446a8c0a8c8bf3c8ee706a06cd44b1e827c3e6a2ee6b8c098"}, - {file = "cffi-1.16.0-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:748dcd1e3d3d7cd5443ef03ce8685043294ad6bd7c02a38d1bd367cfd968e000"}, - {file = "cffi-1.16.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:8895613bcc094d4a1b2dbe179d88d7fb4a15cee43c052e8885783fac397d91fe"}, - {file = "cffi-1.16.0-cp39-cp39-win32.whl", hash = "sha256:ed86a35631f7bfbb28e108dd96773b9d5a6ce4811cf6ea468bb6a359b256b1e4"}, - {file = "cffi-1.16.0-cp39-cp39-win_amd64.whl", hash = "sha256:3686dffb02459559c74dd3d81748269ffb0eb027c39a6fc99502de37d501faa8"}, - {file = "cffi-1.16.0.tar.gz", hash = "sha256:bcb3ef43e58665bbda2fb198698fcae6776483e0c4a631aa5647806c25e02cc0"}, + {file = "cffi-1.17.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:f9338cc05451f1942d0d8203ec2c346c830f8e86469903d5126c1f0a13a2bcbb"}, + {file = "cffi-1.17.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:a0ce71725cacc9ebf839630772b07eeec220cbb5f03be1399e0457a1464f8e1a"}, + {file = "cffi-1.17.0-cp310-cp310-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c815270206f983309915a6844fe994b2fa47e5d05c4c4cef267c3b30e34dbe42"}, + {file = "cffi-1.17.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d6bdcd415ba87846fd317bee0774e412e8792832e7805938987e4ede1d13046d"}, + {file = "cffi-1.17.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8a98748ed1a1df4ee1d6f927e151ed6c1a09d5ec21684de879c7ea6aa96f58f2"}, + {file = "cffi-1.17.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:0a048d4f6630113e54bb4b77e315e1ba32a5a31512c31a273807d0027a7e69ab"}, + {file = "cffi-1.17.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:24aa705a5f5bd3a8bcfa4d123f03413de5d86e497435693b638cbffb7d5d8a1b"}, + {file = "cffi-1.17.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:856bf0924d24e7f93b8aee12a3a1095c34085600aa805693fb7f5d1962393206"}, + {file = "cffi-1.17.0-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:4304d4416ff032ed50ad6bb87416d802e67139e31c0bde4628f36a47a3164bfa"}, + {file = "cffi-1.17.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:331ad15c39c9fe9186ceaf87203a9ecf5ae0ba2538c9e898e3a6967e8ad3db6f"}, + {file = "cffi-1.17.0-cp310-cp310-win32.whl", hash = "sha256:669b29a9eca6146465cc574659058ed949748f0809a2582d1f1a324eb91054dc"}, + {file = "cffi-1.17.0-cp310-cp310-win_amd64.whl", hash = "sha256:48b389b1fd5144603d61d752afd7167dfd205973a43151ae5045b35793232aa2"}, + {file = "cffi-1.17.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:c5d97162c196ce54af6700949ddf9409e9833ef1003b4741c2b39ef46f1d9720"}, + {file = "cffi-1.17.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:5ba5c243f4004c750836f81606a9fcb7841f8874ad8f3bf204ff5e56332b72b9"}, + {file = "cffi-1.17.0-cp311-cp311-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:bb9333f58fc3a2296fb1d54576138d4cf5d496a2cc118422bd77835e6ae0b9cb"}, + {file = "cffi-1.17.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:435a22d00ec7d7ea533db494da8581b05977f9c37338c80bc86314bec2619424"}, + {file = "cffi-1.17.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d1df34588123fcc88c872f5acb6f74ae59e9d182a2707097f9e28275ec26a12d"}, + {file = "cffi-1.17.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:df8bb0010fdd0a743b7542589223a2816bdde4d94bb5ad67884348fa2c1c67e8"}, + {file = "cffi-1.17.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a8b5b9712783415695663bd463990e2f00c6750562e6ad1d28e072a611c5f2a6"}, + {file = "cffi-1.17.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:ffef8fd58a36fb5f1196919638f73dd3ae0db1a878982b27a9a5a176ede4ba91"}, + {file = "cffi-1.17.0-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:4e67d26532bfd8b7f7c05d5a766d6f437b362c1bf203a3a5ce3593a645e870b8"}, + {file = "cffi-1.17.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:45f7cd36186db767d803b1473b3c659d57a23b5fa491ad83c6d40f2af58e4dbb"}, + {file = "cffi-1.17.0-cp311-cp311-win32.whl", hash = "sha256:a9015f5b8af1bb6837a3fcb0cdf3b874fe3385ff6274e8b7925d81ccaec3c5c9"}, + {file = "cffi-1.17.0-cp311-cp311-win_amd64.whl", hash = "sha256:b50aaac7d05c2c26dfd50c3321199f019ba76bb650e346a6ef3616306eed67b0"}, + {file = "cffi-1.17.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:aec510255ce690d240f7cb23d7114f6b351c733a74c279a84def763660a2c3bc"}, + {file = "cffi-1.17.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:2770bb0d5e3cc0e31e7318db06efcbcdb7b31bcb1a70086d3177692a02256f59"}, + {file = "cffi-1.17.0-cp312-cp312-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:db9a30ec064129d605d0f1aedc93e00894b9334ec74ba9c6bdd08147434b33eb"}, + {file = "cffi-1.17.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a47eef975d2b8b721775a0fa286f50eab535b9d56c70a6e62842134cf7841195"}, + {file = "cffi-1.17.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f3e0992f23bbb0be00a921eae5363329253c3b86287db27092461c887b791e5e"}, + {file = "cffi-1.17.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:6107e445faf057c118d5050560695e46d272e5301feffda3c41849641222a828"}, + {file = "cffi-1.17.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:eb862356ee9391dc5a0b3cbc00f416b48c1b9a52d252d898e5b7696a5f9fe150"}, + {file = "cffi-1.17.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:c1c13185b90bbd3f8b5963cd8ce7ad4ff441924c31e23c975cb150e27c2bf67a"}, + {file = "cffi-1.17.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:17c6d6d3260c7f2d94f657e6872591fe8733872a86ed1345bda872cfc8c74885"}, + {file = "cffi-1.17.0-cp312-cp312-win32.whl", hash = "sha256:c3b8bd3133cd50f6b637bb4322822c94c5ce4bf0d724ed5ae70afce62187c492"}, + {file = "cffi-1.17.0-cp312-cp312-win_amd64.whl", hash = "sha256:dca802c8db0720ce1c49cce1149ff7b06e91ba15fa84b1d59144fef1a1bc7ac2"}, + {file = "cffi-1.17.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:6ce01337d23884b21c03869d2f68c5523d43174d4fc405490eb0091057943118"}, + {file = "cffi-1.17.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:cab2eba3830bf4f6d91e2d6718e0e1c14a2f5ad1af68a89d24ace0c6b17cced7"}, + {file = "cffi-1.17.0-cp313-cp313-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:14b9cbc8f7ac98a739558eb86fabc283d4d564dafed50216e7f7ee62d0d25377"}, + {file = "cffi-1.17.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b00e7bcd71caa0282cbe3c90966f738e2db91e64092a877c3ff7f19a1628fdcb"}, + {file = "cffi-1.17.0-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:41f4915e09218744d8bae14759f983e466ab69b178de38066f7579892ff2a555"}, + {file = "cffi-1.17.0-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e4760a68cab57bfaa628938e9c2971137e05ce48e762a9cb53b76c9b569f1204"}, + {file = "cffi-1.17.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:011aff3524d578a9412c8b3cfaa50f2c0bd78e03eb7af7aa5e0df59b158efb2f"}, + {file = "cffi-1.17.0-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:a003ac9edc22d99ae1286b0875c460351f4e101f8c9d9d2576e78d7e048f64e0"}, + {file = "cffi-1.17.0-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:ef9528915df81b8f4c7612b19b8628214c65c9b7f74db2e34a646a0a2a0da2d4"}, + {file = "cffi-1.17.0-cp313-cp313-win32.whl", hash = "sha256:70d2aa9fb00cf52034feac4b913181a6e10356019b18ef89bc7c12a283bf5f5a"}, + {file = "cffi-1.17.0-cp313-cp313-win_amd64.whl", hash = "sha256:b7b6ea9e36d32582cda3465f54c4b454f62f23cb083ebc7a94e2ca6ef011c3a7"}, + {file = "cffi-1.17.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:964823b2fc77b55355999ade496c54dde161c621cb1f6eac61dc30ed1b63cd4c"}, + {file = "cffi-1.17.0-cp38-cp38-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:516a405f174fd3b88829eabfe4bb296ac602d6a0f68e0d64d5ac9456194a5b7e"}, + {file = "cffi-1.17.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:dec6b307ce928e8e112a6bb9921a1cb00a0e14979bf28b98e084a4b8a742bd9b"}, + {file = "cffi-1.17.0-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e4094c7b464cf0a858e75cd14b03509e84789abf7b79f8537e6a72152109c76e"}, + {file = "cffi-1.17.0-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2404f3de742f47cb62d023f0ba7c5a916c9c653d5b368cc966382ae4e57da401"}, + {file = "cffi-1.17.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3aa9d43b02a0c681f0bfbc12d476d47b2b2b6a3f9287f11ee42989a268a1833c"}, + {file = "cffi-1.17.0-cp38-cp38-win32.whl", hash = "sha256:0bb15e7acf8ab35ca8b24b90af52c8b391690ef5c4aec3d31f38f0d37d2cc499"}, + {file = "cffi-1.17.0-cp38-cp38-win_amd64.whl", hash = "sha256:93a7350f6706b31f457c1457d3a3259ff9071a66f312ae64dc024f049055f72c"}, + {file = "cffi-1.17.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:1a2ddbac59dc3716bc79f27906c010406155031a1c801410f1bafff17ea304d2"}, + {file = "cffi-1.17.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:6327b572f5770293fc062a7ec04160e89741e8552bf1c358d1a23eba68166759"}, + {file = "cffi-1.17.0-cp39-cp39-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:dbc183e7bef690c9abe5ea67b7b60fdbca81aa8da43468287dae7b5c046107d4"}, + {file = "cffi-1.17.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5bdc0f1f610d067c70aa3737ed06e2726fd9d6f7bfee4a351f4c40b6831f4e82"}, + {file = "cffi-1.17.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:6d872186c1617d143969defeadac5a904e6e374183e07977eedef9c07c8953bf"}, + {file = "cffi-1.17.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:0d46ee4764b88b91f16661a8befc6bfb24806d885e27436fdc292ed7e6f6d058"}, + {file = "cffi-1.17.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6f76a90c345796c01d85e6332e81cab6d70de83b829cf1d9762d0a3da59c7932"}, + {file = "cffi-1.17.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:0e60821d312f99d3e1569202518dddf10ae547e799d75aef3bca3a2d9e8ee693"}, + {file = "cffi-1.17.0-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:eb09b82377233b902d4c3fbeeb7ad731cdab579c6c6fda1f763cd779139e47c3"}, + {file = "cffi-1.17.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:24658baf6224d8f280e827f0a50c46ad819ec8ba380a42448e24459daf809cf4"}, + {file = "cffi-1.17.0-cp39-cp39-win32.whl", hash = "sha256:0fdacad9e0d9fc23e519efd5ea24a70348305e8d7d85ecbb1a5fa66dc834e7fb"}, + {file = "cffi-1.17.0-cp39-cp39-win_amd64.whl", hash = "sha256:7cbc78dc018596315d4e7841c8c3a7ae31cc4d638c9b627f87d52e8abaaf2d29"}, + {file = "cffi-1.17.0.tar.gz", hash = "sha256:f3157624b7558b914cb039fd1af735e5e8049a87c817cc215109ad1c8779df76"}, ] [package.dependencies] @@ -993,22 +1092,23 @@ colorama = {version = "*", markers = "platform_system == \"Windows\""} [[package]] name = "cohere" -version = "5.6.2" +version = "5.8.1" description = "" optional = true python-versions = "<4.0,>=3.8" files = [ - {file = "cohere-5.6.2-py3-none-any.whl", hash = "sha256:cfecf1343bcaa4091266c5a231fbcb3ccbd80cad05ea093ef80024a117aa3a2f"}, - {file = "cohere-5.6.2.tar.gz", hash = "sha256:6bb901afdfb02f62ad8ed2d82f12d8ea87a6869710f5f880cb89190c4e994805"}, + {file = "cohere-5.8.1-py3-none-any.whl", hash = "sha256:92362c651dfbfef8c5d34e95de394578d7197ed7875c6fcbf101e84b60db7fbd"}, + {file = "cohere-5.8.1.tar.gz", hash = "sha256:4c0c4468f15f9ad7fb7af15cc9f7305cd6df51243d69e203682be87e9efa5071"}, ] [package.dependencies] boto3 = ">=1.34.0,<2.0.0" fastavro = ">=1.9.4,<2.0.0" httpx = ">=0.21.2" -httpx-sse = ">=0.4.0,<0.5.0" +httpx-sse = "0.4.0" parameterized = ">=0.9.0,<0.10.0" pydantic = ">=1.9.2" +pydantic-core = ">=2.18.2,<3.0.0" requests = ">=2.0.0,<3.0.0" tokenizers = ">=0.15,<1" types-requests = ">=2.0.0,<3.0.0" @@ -1027,79 +1127,102 @@ files = [ [[package]] name = "courlan" -version = "1.2.0" +version = "1.3.0" description = "Clean, filter and sample URLs to optimize data collection – includes spam, content type and language filters." optional = true -python-versions = ">=3.6" +python-versions = ">=3.8" files = [ - {file = "courlan-1.2.0-py3-none-any.whl", hash = "sha256:df9d3735b611e717c52a813a49d17a8b4d3a9d8b87bbace9065171fc5d084397"}, - {file = "courlan-1.2.0.tar.gz", hash = "sha256:0cbc9cac83970c651b937a7823a5d92cbebb6b601454ea0fb6cb4d0ee5d1845d"}, + {file = "courlan-1.3.0-py3-none-any.whl", hash = "sha256:bb30982108ef987731b127f1ecf5dfd5b7e46c825630e3c9313c80b4a454954c"}, + {file = "courlan-1.3.0.tar.gz", hash = "sha256:3868f388122f2b09d154802043fe92dfd62c3ea7a700eaae8abc05198cf8bc25"}, ] [package.dependencies] -babel = ">=2.11.0" -tld = {version = ">=0.13", markers = "python_version >= \"3.7\""} -urllib3 = {version = ">=1.26,<3", markers = "python_version >= \"3.7\""} +babel = ">=2.15.0" +tld = ">=0.13" +urllib3 = ">=1.26,<3" + +[package.extras] +dev = ["black", "mypy", "pytest", "pytest-cov"] [[package]] name = "coverage" -version = "7.6.0" +version = "7.6.1" description = "Code coverage measurement for Python" optional = false python-versions = ">=3.8" files = [ - {file = "coverage-7.6.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:dff044f661f59dace805eedb4a7404c573b6ff0cdba4a524141bc63d7be5c7fd"}, - {file = "coverage-7.6.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:a8659fd33ee9e6ca03950cfdcdf271d645cf681609153f218826dd9805ab585c"}, - {file = "coverage-7.6.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7792f0ab20df8071d669d929c75c97fecfa6bcab82c10ee4adb91c7a54055463"}, - {file = "coverage-7.6.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d4b3cd1ca7cd73d229487fa5caca9e4bc1f0bca96526b922d61053ea751fe791"}, - {file = "coverage-7.6.0-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e7e128f85c0b419907d1f38e616c4f1e9f1d1b37a7949f44df9a73d5da5cd53c"}, - {file = "coverage-7.6.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:a94925102c89247530ae1dab7dc02c690942566f22e189cbd53579b0693c0783"}, - {file = "coverage-7.6.0-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:dcd070b5b585b50e6617e8972f3fbbee786afca71b1936ac06257f7e178f00f6"}, - {file = "coverage-7.6.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:d50a252b23b9b4dfeefc1f663c568a221092cbaded20a05a11665d0dbec9b8fb"}, - {file = "coverage-7.6.0-cp310-cp310-win32.whl", hash = "sha256:0e7b27d04131c46e6894f23a4ae186a6a2207209a05df5b6ad4caee6d54a222c"}, - {file = "coverage-7.6.0-cp310-cp310-win_amd64.whl", hash = "sha256:54dece71673b3187c86226c3ca793c5f891f9fc3d8aa183f2e3653da18566169"}, - {file = "coverage-7.6.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:c7b525ab52ce18c57ae232ba6f7010297a87ced82a2383b1afd238849c1ff933"}, - {file = "coverage-7.6.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:4bea27c4269234e06f621f3fac3925f56ff34bc14521484b8f66a580aacc2e7d"}, - {file = "coverage-7.6.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ed8d1d1821ba5fc88d4a4f45387b65de52382fa3ef1f0115a4f7a20cdfab0e94"}, - {file = "coverage-7.6.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:01c322ef2bbe15057bc4bf132b525b7e3f7206f071799eb8aa6ad1940bcf5fb1"}, - {file = "coverage-7.6.0-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:03cafe82c1b32b770a29fd6de923625ccac3185a54a5e66606da26d105f37dac"}, - {file = "coverage-7.6.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:0d1b923fc4a40c5832be4f35a5dab0e5ff89cddf83bb4174499e02ea089daf57"}, - {file = "coverage-7.6.0-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:4b03741e70fb811d1a9a1d75355cf391f274ed85847f4b78e35459899f57af4d"}, - {file = "coverage-7.6.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:a73d18625f6a8a1cbb11eadc1d03929f9510f4131879288e3f7922097a429f63"}, - {file = "coverage-7.6.0-cp311-cp311-win32.whl", hash = "sha256:65fa405b837060db569a61ec368b74688f429b32fa47a8929a7a2f9b47183713"}, - {file = "coverage-7.6.0-cp311-cp311-win_amd64.whl", hash = "sha256:6379688fb4cfa921ae349c76eb1a9ab26b65f32b03d46bb0eed841fd4cb6afb1"}, - {file = "coverage-7.6.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:f7db0b6ae1f96ae41afe626095149ecd1b212b424626175a6633c2999eaad45b"}, - {file = "coverage-7.6.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:bbdf9a72403110a3bdae77948b8011f644571311c2fb35ee15f0f10a8fc082e8"}, - {file = "coverage-7.6.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9cc44bf0315268e253bf563f3560e6c004efe38f76db03a1558274a6e04bf5d5"}, - {file = "coverage-7.6.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:da8549d17489cd52f85a9829d0e1d91059359b3c54a26f28bec2c5d369524807"}, - {file = "coverage-7.6.0-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0086cd4fc71b7d485ac93ca4239c8f75732c2ae3ba83f6be1c9be59d9e2c6382"}, - {file = "coverage-7.6.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:1fad32ee9b27350687035cb5fdf9145bc9cf0a094a9577d43e909948ebcfa27b"}, - {file = "coverage-7.6.0-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:044a0985a4f25b335882b0966625270a8d9db3d3409ddc49a4eb00b0ef5e8cee"}, - {file = "coverage-7.6.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:76d5f82213aa78098b9b964ea89de4617e70e0d43e97900c2778a50856dac605"}, - {file = "coverage-7.6.0-cp312-cp312-win32.whl", hash = "sha256:3c59105f8d58ce500f348c5b56163a4113a440dad6daa2294b5052a10db866da"}, - {file = "coverage-7.6.0-cp312-cp312-win_amd64.whl", hash = "sha256:ca5d79cfdae420a1d52bf177de4bc2289c321d6c961ae321503b2ca59c17ae67"}, - {file = "coverage-7.6.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:d39bd10f0ae453554798b125d2f39884290c480f56e8a02ba7a6ed552005243b"}, - {file = "coverage-7.6.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:beb08e8508e53a568811016e59f3234d29c2583f6b6e28572f0954a6b4f7e03d"}, - {file = "coverage-7.6.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b2e16f4cd2bc4d88ba30ca2d3bbf2f21f00f382cf4e1ce3b1ddc96c634bc48ca"}, - {file = "coverage-7.6.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6616d1c9bf1e3faea78711ee42a8b972367d82ceae233ec0ac61cc7fec09fa6b"}, - {file = "coverage-7.6.0-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ad4567d6c334c46046d1c4c20024de2a1c3abc626817ae21ae3da600f5779b44"}, - {file = "coverage-7.6.0-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:d17c6a415d68cfe1091d3296ba5749d3d8696e42c37fca5d4860c5bf7b729f03"}, - {file = "coverage-7.6.0-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:9146579352d7b5f6412735d0f203bbd8d00113a680b66565e205bc605ef81bc6"}, - {file = "coverage-7.6.0-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:cdab02a0a941af190df8782aafc591ef3ad08824f97850b015c8c6a8b3877b0b"}, - {file = "coverage-7.6.0-cp38-cp38-win32.whl", hash = "sha256:df423f351b162a702c053d5dddc0fc0ef9a9e27ea3f449781ace5f906b664428"}, - {file = "coverage-7.6.0-cp38-cp38-win_amd64.whl", hash = "sha256:f2501d60d7497fd55e391f423f965bbe9e650e9ffc3c627d5f0ac516026000b8"}, - {file = "coverage-7.6.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:7221f9ac9dad9492cecab6f676b3eaf9185141539d5c9689d13fd6b0d7de840c"}, - {file = "coverage-7.6.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:ddaaa91bfc4477d2871442bbf30a125e8fe6b05da8a0015507bfbf4718228ab2"}, - {file = "coverage-7.6.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c4cbe651f3904e28f3a55d6f371203049034b4ddbce65a54527a3f189ca3b390"}, - {file = "coverage-7.6.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:831b476d79408ab6ccfadaaf199906c833f02fdb32c9ab907b1d4aa0713cfa3b"}, - {file = "coverage-7.6.0-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:46c3d091059ad0b9c59d1034de74a7f36dcfa7f6d3bde782c49deb42438f2450"}, - {file = "coverage-7.6.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:4d5fae0a22dc86259dee66f2cc6c1d3e490c4a1214d7daa2a93d07491c5c04b6"}, - {file = "coverage-7.6.0-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:07ed352205574aad067482e53dd606926afebcb5590653121063fbf4e2175166"}, - {file = "coverage-7.6.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:49c76cdfa13015c4560702574bad67f0e15ca5a2872c6a125f6327ead2b731dd"}, - {file = "coverage-7.6.0-cp39-cp39-win32.whl", hash = "sha256:482855914928c8175735a2a59c8dc5806cf7d8f032e4820d52e845d1f731dca2"}, - {file = "coverage-7.6.0-cp39-cp39-win_amd64.whl", hash = "sha256:543ef9179bc55edfd895154a51792b01c017c87af0ebaae092720152e19e42ca"}, - {file = "coverage-7.6.0-pp38.pp39.pp310-none-any.whl", hash = "sha256:6fe885135c8a479d3e37a7aae61cbd3a0fb2deccb4dda3c25f92a49189f766d6"}, - {file = "coverage-7.6.0.tar.gz", hash = "sha256:289cc803fa1dc901f84701ac10c9ee873619320f2f9aff38794db4a4a0268d51"}, + {file = "coverage-7.6.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:b06079abebbc0e89e6163b8e8f0e16270124c154dc6e4a47b413dd538859af16"}, + {file = "coverage-7.6.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:cf4b19715bccd7ee27b6b120e7e9dd56037b9c0681dcc1adc9ba9db3d417fa36"}, + {file = "coverage-7.6.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e61c0abb4c85b095a784ef23fdd4aede7a2628478e7baba7c5e3deba61070a02"}, + {file = "coverage-7.6.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:fd21f6ae3f08b41004dfb433fa895d858f3f5979e7762d052b12aef444e29afc"}, + {file = "coverage-7.6.1-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8f59d57baca39b32db42b83b2a7ba6f47ad9c394ec2076b084c3f029b7afca23"}, + {file = "coverage-7.6.1-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:a1ac0ae2b8bd743b88ed0502544847c3053d7171a3cff9228af618a068ed9c34"}, + {file = "coverage-7.6.1-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:e6a08c0be454c3b3beb105c0596ebdc2371fab6bb90c0c0297f4e58fd7e1012c"}, + {file = "coverage-7.6.1-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:f5796e664fe802da4f57a168c85359a8fbf3eab5e55cd4e4569fbacecc903959"}, + {file = "coverage-7.6.1-cp310-cp310-win32.whl", hash = "sha256:7bb65125fcbef8d989fa1dd0e8a060999497629ca5b0efbca209588a73356232"}, + {file = "coverage-7.6.1-cp310-cp310-win_amd64.whl", hash = "sha256:3115a95daa9bdba70aea750db7b96b37259a81a709223c8448fa97727d546fe0"}, + {file = "coverage-7.6.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:7dea0889685db8550f839fa202744652e87c60015029ce3f60e006f8c4462c93"}, + {file = "coverage-7.6.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:ed37bd3c3b063412f7620464a9ac1314d33100329f39799255fb8d3027da50d3"}, + {file = "coverage-7.6.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d85f5e9a5f8b73e2350097c3756ef7e785f55bd71205defa0bfdaf96c31616ff"}, + {file = "coverage-7.6.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9bc572be474cafb617672c43fe989d6e48d3c83af02ce8de73fff1c6bb3c198d"}, + {file = "coverage-7.6.1-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0c0420b573964c760df9e9e86d1a9a622d0d27f417e1a949a8a66dd7bcee7bc6"}, + {file = "coverage-7.6.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:1f4aa8219db826ce6be7099d559f8ec311549bfc4046f7f9fe9b5cea5c581c56"}, + {file = "coverage-7.6.1-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:fc5a77d0c516700ebad189b587de289a20a78324bc54baee03dd486f0855d234"}, + {file = "coverage-7.6.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:b48f312cca9621272ae49008c7f613337c53fadca647d6384cc129d2996d1133"}, + {file = "coverage-7.6.1-cp311-cp311-win32.whl", hash = "sha256:1125ca0e5fd475cbbba3bb67ae20bd2c23a98fac4e32412883f9bcbaa81c314c"}, + {file = "coverage-7.6.1-cp311-cp311-win_amd64.whl", hash = "sha256:8ae539519c4c040c5ffd0632784e21b2f03fc1340752af711f33e5be83a9d6c6"}, + {file = "coverage-7.6.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:95cae0efeb032af8458fc27d191f85d1717b1d4e49f7cb226cf526ff28179778"}, + {file = "coverage-7.6.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:5621a9175cf9d0b0c84c2ef2b12e9f5f5071357c4d2ea6ca1cf01814f45d2391"}, + {file = "coverage-7.6.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:260933720fdcd75340e7dbe9060655aff3af1f0c5d20f46b57f262ab6c86a5e8"}, + {file = "coverage-7.6.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:07e2ca0ad381b91350c0ed49d52699b625aab2b44b65e1b4e02fa9df0e92ad2d"}, + {file = "coverage-7.6.1-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c44fee9975f04b33331cb8eb272827111efc8930cfd582e0320613263ca849ca"}, + {file = "coverage-7.6.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:877abb17e6339d96bf08e7a622d05095e72b71f8afd8a9fefc82cf30ed944163"}, + {file = "coverage-7.6.1-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:3e0cadcf6733c09154b461f1ca72d5416635e5e4ec4e536192180d34ec160f8a"}, + {file = "coverage-7.6.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:c3c02d12f837d9683e5ab2f3d9844dc57655b92c74e286c262e0fc54213c216d"}, + {file = "coverage-7.6.1-cp312-cp312-win32.whl", hash = "sha256:e05882b70b87a18d937ca6768ff33cc3f72847cbc4de4491c8e73880766718e5"}, + {file = "coverage-7.6.1-cp312-cp312-win_amd64.whl", hash = "sha256:b5d7b556859dd85f3a541db6a4e0167b86e7273e1cdc973e5b175166bb634fdb"}, + {file = "coverage-7.6.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:a4acd025ecc06185ba2b801f2de85546e0b8ac787cf9d3b06e7e2a69f925b106"}, + {file = "coverage-7.6.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:a6d3adcf24b624a7b778533480e32434a39ad8fa30c315208f6d3e5542aeb6e9"}, + {file = "coverage-7.6.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d0c212c49b6c10e6951362f7c6df3329f04c2b1c28499563d4035d964ab8e08c"}, + {file = "coverage-7.6.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6e81d7a3e58882450ec4186ca59a3f20a5d4440f25b1cff6f0902ad890e6748a"}, + {file = "coverage-7.6.1-cp313-cp313-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:78b260de9790fd81e69401c2dc8b17da47c8038176a79092a89cb2b7d945d060"}, + {file = "coverage-7.6.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:a78d169acd38300060b28d600344a803628c3fd585c912cacc9ea8790fe96862"}, + {file = "coverage-7.6.1-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:2c09f4ce52cb99dd7505cd0fc8e0e37c77b87f46bc9c1eb03fe3bc9991085388"}, + {file = "coverage-7.6.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:6878ef48d4227aace338d88c48738a4258213cd7b74fd9a3d4d7582bb1d8a155"}, + {file = "coverage-7.6.1-cp313-cp313-win32.whl", hash = "sha256:44df346d5215a8c0e360307d46ffaabe0f5d3502c8a1cefd700b34baf31d411a"}, + {file = "coverage-7.6.1-cp313-cp313-win_amd64.whl", hash = "sha256:8284cf8c0dd272a247bc154eb6c95548722dce90d098c17a883ed36e67cdb129"}, + {file = "coverage-7.6.1-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:d3296782ca4eab572a1a4eca686d8bfb00226300dcefdf43faa25b5242ab8a3e"}, + {file = "coverage-7.6.1-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:502753043567491d3ff6d08629270127e0c31d4184c4c8d98f92c26f65019962"}, + {file = "coverage-7.6.1-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6a89ecca80709d4076b95f89f308544ec8f7b4727e8a547913a35f16717856cb"}, + {file = "coverage-7.6.1-cp313-cp313t-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a318d68e92e80af8b00fa99609796fdbcdfef3629c77c6283566c6f02c6d6704"}, + {file = "coverage-7.6.1-cp313-cp313t-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:13b0a73a0896988f053e4fbb7de6d93388e6dd292b0d87ee51d106f2c11b465b"}, + {file = "coverage-7.6.1-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:4421712dbfc5562150f7554f13dde997a2e932a6b5f352edcce948a815efee6f"}, + {file = "coverage-7.6.1-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:166811d20dfea725e2e4baa71fffd6c968a958577848d2131f39b60043400223"}, + {file = "coverage-7.6.1-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:225667980479a17db1048cb2bf8bfb39b8e5be8f164b8f6628b64f78a72cf9d3"}, + {file = "coverage-7.6.1-cp313-cp313t-win32.whl", hash = "sha256:170d444ab405852903b7d04ea9ae9b98f98ab6d7e63e1115e82620807519797f"}, + {file = "coverage-7.6.1-cp313-cp313t-win_amd64.whl", hash = "sha256:b9f222de8cded79c49bf184bdbc06630d4c58eec9459b939b4a690c82ed05657"}, + {file = "coverage-7.6.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:6db04803b6c7291985a761004e9060b2bca08da6d04f26a7f2294b8623a0c1a0"}, + {file = "coverage-7.6.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:f1adfc8ac319e1a348af294106bc6a8458a0f1633cc62a1446aebc30c5fa186a"}, + {file = "coverage-7.6.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a95324a9de9650a729239daea117df21f4b9868ce32e63f8b650ebe6cef5595b"}, + {file = "coverage-7.6.1-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b43c03669dc4618ec25270b06ecd3ee4fa94c7f9b3c14bae6571ca00ef98b0d3"}, + {file = "coverage-7.6.1-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8929543a7192c13d177b770008bc4e8119f2e1f881d563fc6b6305d2d0ebe9de"}, + {file = "coverage-7.6.1-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:a09ece4a69cf399510c8ab25e0950d9cf2b42f7b3cb0374f95d2e2ff594478a6"}, + {file = "coverage-7.6.1-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:9054a0754de38d9dbd01a46621636689124d666bad1936d76c0341f7d71bf569"}, + {file = "coverage-7.6.1-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:0dbde0f4aa9a16fa4d754356a8f2e36296ff4d83994b2c9d8398aa32f222f989"}, + {file = "coverage-7.6.1-cp38-cp38-win32.whl", hash = "sha256:da511e6ad4f7323ee5702e6633085fb76c2f893aaf8ce4c51a0ba4fc07580ea7"}, + {file = "coverage-7.6.1-cp38-cp38-win_amd64.whl", hash = "sha256:3f1156e3e8f2872197af3840d8ad307a9dd18e615dc64d9ee41696f287c57ad8"}, + {file = "coverage-7.6.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:abd5fd0db5f4dc9289408aaf34908072f805ff7792632250dcb36dc591d24255"}, + {file = "coverage-7.6.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:547f45fa1a93154bd82050a7f3cddbc1a7a4dd2a9bf5cb7d06f4ae29fe94eaf8"}, + {file = "coverage-7.6.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:645786266c8f18a931b65bfcefdbf6952dd0dea98feee39bd188607a9d307ed2"}, + {file = "coverage-7.6.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9e0b2df163b8ed01d515807af24f63de04bebcecbd6c3bfeff88385789fdf75a"}, + {file = "coverage-7.6.1-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:609b06f178fe8e9f89ef676532760ec0b4deea15e9969bf754b37f7c40326dbc"}, + {file = "coverage-7.6.1-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:702855feff378050ae4f741045e19a32d57d19f3e0676d589df0575008ea5004"}, + {file = "coverage-7.6.1-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:2bdb062ea438f22d99cba0d7829c2ef0af1d768d1e4a4f528087224c90b132cb"}, + {file = "coverage-7.6.1-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:9c56863d44bd1c4fe2abb8a4d6f5371d197f1ac0ebdee542f07f35895fc07f36"}, + {file = "coverage-7.6.1-cp39-cp39-win32.whl", hash = "sha256:6e2cd258d7d927d09493c8df1ce9174ad01b381d4729a9d8d4e38670ca24774c"}, + {file = "coverage-7.6.1-cp39-cp39-win_amd64.whl", hash = "sha256:06a737c882bd26d0d6ee7269b20b12f14a8704807a01056c80bb881a4b2ce6ca"}, + {file = "coverage-7.6.1-pp38.pp39.pp310-none-any.whl", hash = "sha256:e9a6e0eb86070e8ccaedfbd9d38fec54864f3125ab95419970575b42af7541df"}, + {file = "coverage-7.6.1.tar.gz", hash = "sha256:953510dfb7b12ab69d20135a0662397f077c59b1e6379a768e97c59d852ee51d"}, ] [package.dependencies] @@ -1201,6 +1324,20 @@ wrapt = ">=1.10,<2" [package.extras] dev = ["PyTest", "PyTest-Cov", "bump2version (<1)", "sphinx (<2)", "tox"] +[[package]] +name = "deprecation" +version = "2.1.0" +description = "A library to handle automated deprecations" +optional = true +python-versions = "*" +files = [ + {file = "deprecation-2.1.0-py2.py3-none-any.whl", hash = "sha256:a10811591210e1fb0e768a8c25517cabeabcba6f0bf96564f8ff45189f90b14a"}, + {file = "deprecation-2.1.0.tar.gz", hash = "sha256:72b3bde64e5d778694b0cf68178aed03d15e15477116add3fb773e581f9518ff"}, +] + +[package.dependencies] +packaging = "*" + [[package]] name = "diffusers" version = "0.29.2" @@ -1308,37 +1445,38 @@ files = [ [[package]] name = "duckduckgo-search" -version = "6.2.1" +version = "6.2.7" description = "Search for words, documents, images, news, maps and text translation using the DuckDuckGo.com search engine." optional = true python-versions = ">=3.8" files = [ - {file = "duckduckgo_search-6.2.1-py3-none-any.whl", hash = "sha256:1a03f799b85fdfa08d5e6478624683f373b9dc35e6f145544b9cab72a4f575fa"}, - {file = "duckduckgo_search-6.2.1.tar.gz", hash = "sha256:d664ec096193e3fb43bdfae4b0ad9c04e44094b58f41998adcdd20a86ee1ed74"}, + {file = "duckduckgo_search-6.2.7-py3-none-any.whl", hash = "sha256:cf3027786aea64e21fbd822b804a17e5d894e989c9e8e724be37031910312932"}, + {file = "duckduckgo_search-6.2.7.tar.gz", hash = "sha256:dd8ed40daf5a6d1f992d0155e586a9355b97d9e0f4d909bae23e3d00266148c7"}, ] [package.dependencies] click = ">=8.1.7" -pyreqwest-impersonate = ">=0.5.0" +primp = ">=0.5.5" [package.extras] -dev = ["mypy (>=1.10.1)", "pytest (>=8.2.2)", "pytest-asyncio (>=0.23.7)", "ruff (>=0.5.2)"] +dev = ["mypy (>=1.11.0)", "pytest (>=8.3.1)", "pytest-asyncio (>=0.23.8)", "ruff (>=0.5.5)"] lxml = ["lxml (>=5.2.2)"] [[package]] name = "elevenlabs" -version = "1.5.0" +version = "1.7.0" description = "" optional = true python-versions = "<4.0,>=3.8" files = [ - {file = "elevenlabs-1.5.0-py3-none-any.whl", hash = "sha256:cc28257ad535adf57fdc70c9a3b8ddd6c34166f310bbc11a733523894e73ceca"}, - {file = "elevenlabs-1.5.0.tar.gz", hash = "sha256:d56a282f933e2eb991dac2b09cf7e928f1899f1cb5a9e9fa1d500be6edd7ee1d"}, + {file = "elevenlabs-1.7.0-py3-none-any.whl", hash = "sha256:fbdde75ba0a9d904427f7c14297dd6a2c4f64ae1d34ea3a08c022cdac5be6688"}, + {file = "elevenlabs-1.7.0.tar.gz", hash = "sha256:42ca044778d7f0bfd23da82c9a32d539877118e6ba2e4f89343b6f41bbc3516a"}, ] [package.dependencies] httpx = ">=0.21.2" pydantic = ">=1.9.2" +pydantic-core = ">=2.18.2,<3.0.0" requests = ">=2.20" typing_extensions = ">=4.0.0" websockets = ">=11.0" @@ -1593,6 +1731,21 @@ files = [ [package.extras] speedup = ["python-levenshtein (>=0.12)"] +[[package]] +name = "geomet" +version = "0.2.1.post1" +description = "GeoJSON <-> WKT/WKB conversion utilities" +optional = true +python-versions = ">2.6, !=3.3.*, <4" +files = [ + {file = "geomet-0.2.1.post1-py3-none-any.whl", hash = "sha256:a41a1e336b381416d6cbed7f1745c848e91defaa4d4c1bdc1312732e46ffad2b"}, + {file = "geomet-0.2.1.post1.tar.gz", hash = "sha256:91d754f7c298cbfcabd3befdb69c641c27fe75e808b27aa55028605761d17e95"}, +] + +[package.dependencies] +click = "*" +six = "*" + [[package]] name = "ghp-import" version = "2.1.0" @@ -1660,13 +1813,13 @@ grpcio-gcp = ["grpcio-gcp (>=0.2.2,<1.0.dev0)"] [[package]] name = "google-api-python-client" -version = "2.137.0" +version = "2.141.0" description = "Google API Client Library for Python" optional = true python-versions = ">=3.7" files = [ - {file = "google_api_python_client-2.137.0-py2.py3-none-any.whl", hash = "sha256:a8b5c5724885e5be9f5368739aa0ccf416627da4ebd914b410a090c18f84d692"}, - {file = "google_api_python_client-2.137.0.tar.gz", hash = "sha256:e739cb74aac8258b1886cb853b0722d47c81fe07ad649d7f2206f06530513c04"}, + {file = "google_api_python_client-2.141.0-py2.py3-none-any.whl", hash = "sha256:43c05322b91791204465291b3852718fae38d4f84b411d8be847c4f86882652a"}, + {file = "google_api_python_client-2.141.0.tar.gz", hash = "sha256:0f225b1f45d5a6f8c2a400f48729f5d6da9a81138e81e0478d61fdd8edf6563a"}, ] [package.dependencies] @@ -1678,13 +1831,13 @@ uritemplate = ">=3.0.1,<5" [[package]] name = "google-auth" -version = "2.32.0" +version = "2.33.0" description = "Google Authentication Library" optional = true python-versions = ">=3.7" files = [ - {file = "google_auth-2.32.0-py2.py3-none-any.whl", hash = "sha256:53326ea2ebec768070a94bee4e1b9194c9646ea0c2bd72422785bd0f9abfad7b"}, - {file = "google_auth-2.32.0.tar.gz", hash = "sha256:49315be72c55a6a37d62819e3573f6b416aca00721f7e3e31a008d928bf64022"}, + {file = "google_auth-2.33.0-py2.py3-none-any.whl", hash = "sha256:8eff47d0d4a34ab6265c50a106a3362de6a9975bb08998700e389f857e4d39df"}, + {file = "google_auth-2.33.0.tar.gz", hash = "sha256:d6a52342160d7290e334b4d47ba390767e4438ad0d45b7630774533e82655b95"}, ] [package.dependencies] @@ -1827,158 +1980,151 @@ test = ["objgraph", "psutil"] [[package]] name = "griffe" -version = "0.48.0" +version = "0.49.0" description = "Signatures for entire Python programs. Extract the structure, the frame, the skeleton of your project, to generate API documentation or find breaking changes in your API." optional = false python-versions = ">=3.8" files = [ - {file = "griffe-0.48.0-py3-none-any.whl", hash = "sha256:f944c6ff7bd31cf76f264adcd6ab8f3d00a2f972ae5cc8db2d7b6dcffeff65a2"}, - {file = "griffe-0.48.0.tar.gz", hash = "sha256:f099461c02f016b6be4af386d5aa92b01fb4efe6c1c2c360dda9a5d0a863bb7f"}, + {file = "griffe-0.49.0-py3-none-any.whl", hash = "sha256:c0d505f2a444ac342b22f4647d6444c8db64964b6a379c14f401fc467c0741a3"}, + {file = "griffe-0.49.0.tar.gz", hash = "sha256:a7e1235c27d8139e0fd24a5258deef6061bc876a9fda8117a5cf7b53ee940a91"}, ] [package.dependencies] -backports-strenum = {version = ">=1.3", markers = "python_version < \"3.11\""} colorama = ">=0.4" [[package]] name = "grpcio" -version = "1.65.1" +version = "1.65.4" description = "HTTP/2-based RPC framework" optional = true python-versions = ">=3.8" files = [ - {file = "grpcio-1.65.1-cp310-cp310-linux_armv7l.whl", hash = "sha256:3dc5f928815b8972fb83b78d8db5039559f39e004ec93ebac316403fe031a062"}, - {file = "grpcio-1.65.1-cp310-cp310-macosx_12_0_universal2.whl", hash = "sha256:8333ca46053c35484c9f2f7e8d8ec98c1383a8675a449163cea31a2076d93de8"}, - {file = "grpcio-1.65.1-cp310-cp310-manylinux_2_17_aarch64.whl", hash = "sha256:7af64838b6e615fff0ec711960ed9b6ee83086edfa8c32670eafb736f169d719"}, - {file = "grpcio-1.65.1-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:dbb64b4166362d9326f7efbf75b1c72106c1aa87f13a8c8b56a1224fac152f5c"}, - {file = "grpcio-1.65.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a8422dc13ad93ec8caa2612b5032a2b9cd6421c13ed87f54db4a3a2c93afaf77"}, - {file = "grpcio-1.65.1-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:4effc0562b6c65d4add6a873ca132e46ba5e5a46f07c93502c37a9ae7f043857"}, - {file = "grpcio-1.65.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:a6c71575a2fedf259724981fd73a18906513d2f306169c46262a5bae956e6364"}, - {file = "grpcio-1.65.1-cp310-cp310-win32.whl", hash = "sha256:34966cf526ef0ea616e008d40d989463e3db157abb213b2f20c6ce0ae7928875"}, - {file = "grpcio-1.65.1-cp310-cp310-win_amd64.whl", hash = "sha256:ca931de5dd6d9eb94ff19a2c9434b23923bce6f767179fef04dfa991f282eaad"}, - {file = "grpcio-1.65.1-cp311-cp311-linux_armv7l.whl", hash = "sha256:bbb46330cc643ecf10bd9bd4ca8e7419a14b6b9dedd05f671c90fb2c813c6037"}, - {file = "grpcio-1.65.1-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:d827a6fb9215b961eb73459ad7977edb9e748b23e3407d21c845d1d8ef6597e5"}, - {file = "grpcio-1.65.1-cp311-cp311-manylinux_2_17_aarch64.whl", hash = "sha256:6e71aed8835f8d9fbcb84babc93a9da95955d1685021cceb7089f4f1e717d719"}, - {file = "grpcio-1.65.1-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9a1c84560b3b2d34695c9ba53ab0264e2802721c530678a8f0a227951f453462"}, - {file = "grpcio-1.65.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:27adee2338d697e71143ed147fe286c05810965d5d30ec14dd09c22479bfe48a"}, - {file = "grpcio-1.65.1-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:f62652ddcadc75d0e7aa629e96bb61658f85a993e748333715b4ab667192e4e8"}, - {file = "grpcio-1.65.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:71a05fd814700dd9cb7d9a507f2f6a1ef85866733ccaf557eedacec32d65e4c2"}, - {file = "grpcio-1.65.1-cp311-cp311-win32.whl", hash = "sha256:b590f1ad056294dfaeac0b7e1b71d3d5ace638d8dd1f1147ce4bd13458783ba8"}, - {file = "grpcio-1.65.1-cp311-cp311-win_amd64.whl", hash = "sha256:12e9bdf3b5fd48e5fbe5b3da382ad8f97c08b47969f3cca81dd9b36b86ed39e2"}, - {file = "grpcio-1.65.1-cp312-cp312-linux_armv7l.whl", hash = "sha256:54cb822e177374b318b233e54b6856c692c24cdbd5a3ba5335f18a47396bac8f"}, - {file = "grpcio-1.65.1-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:aaf3c54419a28d45bd1681372029f40e5bfb58e5265e3882eaf21e4a5f81a119"}, - {file = "grpcio-1.65.1-cp312-cp312-manylinux_2_17_aarch64.whl", hash = "sha256:557de35bdfbe8bafea0a003dbd0f4da6d89223ac6c4c7549d78e20f92ead95d9"}, - {file = "grpcio-1.65.1-cp312-cp312-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:8bfd95ef3b097f0cc86ade54eafefa1c8ed623aa01a26fbbdcd1a3650494dd11"}, - {file = "grpcio-1.65.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9e6a8f3d6c41e6b642870afe6cafbaf7b61c57317f9ec66d0efdaf19db992b90"}, - {file = "grpcio-1.65.1-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:1faaf7355ceed07ceaef0b9dcefa4c98daf1dd8840ed75c2de128c3f4a4d859d"}, - {file = "grpcio-1.65.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:60f1f38eed830488ad2a1b11579ef0f345ff16fffdad1d24d9fbc97ba31804ff"}, - {file = "grpcio-1.65.1-cp312-cp312-win32.whl", hash = "sha256:e75acfa52daf5ea0712e8aa82f0003bba964de7ae22c26d208cbd7bc08500177"}, - {file = "grpcio-1.65.1-cp312-cp312-win_amd64.whl", hash = "sha256:ff5a84907e51924973aa05ed8759210d8cdae7ffcf9e44fd17646cf4a902df59"}, - {file = "grpcio-1.65.1-cp38-cp38-linux_armv7l.whl", hash = "sha256:1fbd6331f18c3acd7e09d17fd840c096f56eaf0ef830fbd50af45ae9dc8dfd83"}, - {file = "grpcio-1.65.1-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:de5b6be29116e094c5ef9d9e4252e7eb143e3d5f6bd6d50a78075553ab4930b0"}, - {file = "grpcio-1.65.1-cp38-cp38-manylinux_2_17_aarch64.whl", hash = "sha256:e4a3cdba62b2d6aeae6027ae65f350de6dc082b72e6215eccf82628e79efe9ba"}, - {file = "grpcio-1.65.1-cp38-cp38-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:941c4869aa229d88706b78187d60d66aca77fe5c32518b79e3c3e03fc26109a2"}, - {file = "grpcio-1.65.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f40cebe5edb518d78b8131e87cb83b3ee688984de38a232024b9b44e74ee53d3"}, - {file = "grpcio-1.65.1-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:2ca684ba331fb249d8a1ce88db5394e70dbcd96e58d8c4b7e0d7b141a453dce9"}, - {file = "grpcio-1.65.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:8558f0083ddaf5de64a59c790bffd7568e353914c0c551eae2955f54ee4b857f"}, - {file = "grpcio-1.65.1-cp38-cp38-win32.whl", hash = "sha256:8d8143a3e3966f85dce6c5cc45387ec36552174ba5712c5dc6fcc0898fb324c0"}, - {file = "grpcio-1.65.1-cp38-cp38-win_amd64.whl", hash = "sha256:76e81a86424d6ca1ce7c16b15bdd6a964a42b40544bf796a48da241fdaf61153"}, - {file = "grpcio-1.65.1-cp39-cp39-linux_armv7l.whl", hash = "sha256:cb5175f45c980ff418998723ea1b3869cce3766d2ab4e4916fbd3cedbc9d0ed3"}, - {file = "grpcio-1.65.1-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:b12c1aa7b95abe73b3e04e052c8b362655b41c7798da69f1eaf8d186c7d204df"}, - {file = "grpcio-1.65.1-cp39-cp39-manylinux_2_17_aarch64.whl", hash = "sha256:3019fb50128b21a5e018d89569ffaaaa361680e1346c2f261bb84a91082eb3d3"}, - {file = "grpcio-1.65.1-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7ae15275ed98ea267f64ee9ddedf8ecd5306a5b5bb87972a48bfe24af24153e8"}, - {file = "grpcio-1.65.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5f096ffb881f37e8d4f958b63c74bfc400c7cebd7a944b027357cd2fb8d91a57"}, - {file = "grpcio-1.65.1-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:2f56b5a68fdcf17a0a1d524bf177218c3c69b3947cb239ea222c6f1867c3ab68"}, - {file = "grpcio-1.65.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:941596d419b9736ab548aa0feb5bbba922f98872668847bf0720b42d1d227b9e"}, - {file = "grpcio-1.65.1-cp39-cp39-win32.whl", hash = "sha256:5fd7337a823b890215f07d429f4f193d24b80d62a5485cf88ee06648591a0c57"}, - {file = "grpcio-1.65.1-cp39-cp39-win_amd64.whl", hash = "sha256:1bceeec568372cbebf554eae1b436b06c2ff24cfaf04afade729fb9035408c6c"}, - {file = "grpcio-1.65.1.tar.gz", hash = "sha256:3c492301988cd720cd145d84e17318d45af342e29ef93141228f9cd73222368b"}, + {file = "grpcio-1.65.4-cp310-cp310-linux_armv7l.whl", hash = "sha256:0e85c8766cf7f004ab01aff6a0393935a30d84388fa3c58d77849fcf27f3e98c"}, + {file = "grpcio-1.65.4-cp310-cp310-macosx_12_0_universal2.whl", hash = "sha256:e4a795c02405c7dfa8affd98c14d980f4acea16ea3b539e7404c645329460e5a"}, + {file = "grpcio-1.65.4-cp310-cp310-manylinux_2_17_aarch64.whl", hash = "sha256:d7b984a8dd975d949c2042b9b5ebcf297d6d5af57dcd47f946849ee15d3c2fb8"}, + {file = "grpcio-1.65.4-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:644a783ce604a7d7c91412bd51cf9418b942cf71896344b6dc8d55713c71ce82"}, + {file = "grpcio-1.65.4-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5764237d751d3031a36fafd57eb7d36fd2c10c658d2b4057c516ccf114849a3e"}, + {file = "grpcio-1.65.4-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:ee40d058cf20e1dd4cacec9c39e9bce13fedd38ce32f9ba00f639464fcb757de"}, + {file = "grpcio-1.65.4-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:4482a44ce7cf577a1f8082e807a5b909236bce35b3e3897f839f2fbd9ae6982d"}, + {file = "grpcio-1.65.4-cp310-cp310-win32.whl", hash = "sha256:66bb051881c84aa82e4f22d8ebc9d1704b2e35d7867757f0740c6ef7b902f9b1"}, + {file = "grpcio-1.65.4-cp310-cp310-win_amd64.whl", hash = "sha256:870370524eff3144304da4d1bbe901d39bdd24f858ce849b7197e530c8c8f2ec"}, + {file = "grpcio-1.65.4-cp311-cp311-linux_armv7l.whl", hash = "sha256:85e9c69378af02e483bc626fc19a218451b24a402bdf44c7531e4c9253fb49ef"}, + {file = "grpcio-1.65.4-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:2bd672e005afab8bf0d6aad5ad659e72a06dd713020554182a66d7c0c8f47e18"}, + {file = "grpcio-1.65.4-cp311-cp311-manylinux_2_17_aarch64.whl", hash = "sha256:abccc5d73f5988e8f512eb29341ed9ced923b586bb72e785f265131c160231d8"}, + {file = "grpcio-1.65.4-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:886b45b29f3793b0c2576201947258782d7e54a218fe15d4a0468d9a6e00ce17"}, + {file = "grpcio-1.65.4-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:be952436571dacc93ccc7796db06b7daf37b3b56bb97e3420e6503dccfe2f1b4"}, + {file = "grpcio-1.65.4-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:8dc9ddc4603ec43f6238a5c95400c9a901b6d079feb824e890623da7194ff11e"}, + {file = "grpcio-1.65.4-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:ade1256c98cba5a333ef54636095f2c09e6882c35f76acb04412f3b1aa3c29a5"}, + {file = "grpcio-1.65.4-cp311-cp311-win32.whl", hash = "sha256:280e93356fba6058cbbfc6f91a18e958062ef1bdaf5b1caf46c615ba1ae71b5b"}, + {file = "grpcio-1.65.4-cp311-cp311-win_amd64.whl", hash = "sha256:d2b819f9ee27ed4e3e737a4f3920e337e00bc53f9e254377dd26fc7027c4d558"}, + {file = "grpcio-1.65.4-cp312-cp312-linux_armv7l.whl", hash = "sha256:926a0750a5e6fb002542e80f7fa6cab8b1a2ce5513a1c24641da33e088ca4c56"}, + {file = "grpcio-1.65.4-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:2a1d4c84d9e657f72bfbab8bedf31bdfc6bfc4a1efb10b8f2d28241efabfaaf2"}, + {file = "grpcio-1.65.4-cp312-cp312-manylinux_2_17_aarch64.whl", hash = "sha256:17de4fda50967679677712eec0a5c13e8904b76ec90ac845d83386b65da0ae1e"}, + {file = "grpcio-1.65.4-cp312-cp312-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3dee50c1b69754a4228e933696408ea87f7e896e8d9797a3ed2aeed8dbd04b74"}, + {file = "grpcio-1.65.4-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:74c34fc7562bdd169b77966068434a93040bfca990e235f7a67cdf26e1bd5c63"}, + {file = "grpcio-1.65.4-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:24a2246e80a059b9eb981e4c2a6d8111b1b5e03a44421adbf2736cc1d4988a8a"}, + {file = "grpcio-1.65.4-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:18c10f0d054d2dce34dd15855fcca7cc44ec3b811139437543226776730c0f28"}, + {file = "grpcio-1.65.4-cp312-cp312-win32.whl", hash = "sha256:d72962788b6c22ddbcdb70b10c11fbb37d60ae598c51eb47ec019db66ccfdff0"}, + {file = "grpcio-1.65.4-cp312-cp312-win_amd64.whl", hash = "sha256:7656376821fed8c89e68206a522522317787a3d9ed66fb5110b1dff736a5e416"}, + {file = "grpcio-1.65.4-cp38-cp38-linux_armv7l.whl", hash = "sha256:4934077b33aa6fe0b451de8b71dabde96bf2d9b4cb2b3187be86e5adebcba021"}, + {file = "grpcio-1.65.4-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:0cef8c919a3359847c357cb4314e50ed1f0cca070f828ee8f878d362fd744d52"}, + {file = "grpcio-1.65.4-cp38-cp38-manylinux_2_17_aarch64.whl", hash = "sha256:a925446e6aa12ca37114840d8550f308e29026cdc423a73da3043fd1603a6385"}, + {file = "grpcio-1.65.4-cp38-cp38-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:cf53e6247f1e2af93657e62e240e4f12e11ee0b9cef4ddcb37eab03d501ca864"}, + {file = "grpcio-1.65.4-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cdb34278e4ceb224c89704cd23db0d902e5e3c1c9687ec9d7c5bb4c150f86816"}, + {file = "grpcio-1.65.4-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:e6cbdd107e56bde55c565da5fd16f08e1b4e9b0674851d7749e7f32d8645f524"}, + {file = "grpcio-1.65.4-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:626319a156b1f19513156a3b0dbfe977f5f93db63ca673a0703238ebd40670d7"}, + {file = "grpcio-1.65.4-cp38-cp38-win32.whl", hash = "sha256:3d1bbf7e1dd1096378bd83c83f554d3b93819b91161deaf63e03b7022a85224a"}, + {file = "grpcio-1.65.4-cp38-cp38-win_amd64.whl", hash = "sha256:a99e6dffefd3027b438116f33ed1261c8d360f0dd4f943cb44541a2782eba72f"}, + {file = "grpcio-1.65.4-cp39-cp39-linux_armv7l.whl", hash = "sha256:874acd010e60a2ec1e30d5e505b0651ab12eb968157cd244f852b27c6dbed733"}, + {file = "grpcio-1.65.4-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:b07f36faf01fca5427d4aa23645e2d492157d56c91fab7e06fe5697d7e171ad4"}, + {file = "grpcio-1.65.4-cp39-cp39-manylinux_2_17_aarch64.whl", hash = "sha256:b81711bf4ec08a3710b534e8054c7dcf90f2edc22bebe11c1775a23f145595fe"}, + {file = "grpcio-1.65.4-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:88fcabc332a4aef8bcefadc34a02e9ab9407ab975d2c7d981a8e12c1aed92aa1"}, + {file = "grpcio-1.65.4-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c9ba3e63108a8749994f02c7c0e156afb39ba5bdf755337de8e75eb685be244b"}, + {file = "grpcio-1.65.4-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:8eb485801957a486bf5de15f2c792d9f9c897a86f2f18db8f3f6795a094b4bb2"}, + {file = "grpcio-1.65.4-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:075f3903bc1749ace93f2b0664f72964ee5f2da5c15d4b47e0ab68e4f442c257"}, + {file = "grpcio-1.65.4-cp39-cp39-win32.whl", hash = "sha256:0a0720299bdb2cc7306737295d56e41ce8827d5669d4a3cd870af832e3b17c4d"}, + {file = "grpcio-1.65.4-cp39-cp39-win_amd64.whl", hash = "sha256:a146bc40fa78769f22e1e9ff4f110ef36ad271b79707577bf2a31e3e931141b9"}, + {file = "grpcio-1.65.4.tar.gz", hash = "sha256:2a4f476209acffec056360d3e647ae0e14ae13dcf3dfb130c227ae1c594cbe39"}, ] [package.extras] -protobuf = ["grpcio-tools (>=1.65.1)"] +protobuf = ["grpcio-tools (>=1.65.4)"] [[package]] name = "grpcio-status" -version = "1.62.2" +version = "1.62.3" description = "Status proto mapping for gRPC" optional = true python-versions = ">=3.6" files = [ - {file = "grpcio-status-1.62.2.tar.gz", hash = "sha256:62e1bfcb02025a1cd73732a2d33672d3e9d0df4d21c12c51e0bbcaf09bab742a"}, - {file = "grpcio_status-1.62.2-py3-none-any.whl", hash = "sha256:206ddf0eb36bc99b033f03b2c8e95d319f0044defae9b41ae21408e7e0cda48f"}, + {file = "grpcio-status-1.62.3.tar.gz", hash = "sha256:289bdd7b2459794a12cf95dc0cb727bd4a1742c37bd823f760236c937e53a485"}, + {file = "grpcio_status-1.62.3-py3-none-any.whl", hash = "sha256:f9049b762ba8de6b1086789d8315846e094edac2c50beaf462338b301a8fd4b8"}, ] [package.dependencies] googleapis-common-protos = ">=1.5.5" -grpcio = ">=1.62.2" +grpcio = ">=1.62.3" protobuf = ">=4.21.6" [[package]] name = "grpcio-tools" -version = "1.62.2" +version = "1.62.3" description = "Protobuf code generator for gRPC" optional = true python-versions = ">=3.7" files = [ - {file = "grpcio-tools-1.62.2.tar.gz", hash = "sha256:5fd5e1582b678e6b941ee5f5809340be5e0724691df5299aae8226640f94e18f"}, - {file = "grpcio_tools-1.62.2-cp310-cp310-linux_armv7l.whl", hash = "sha256:1679b4903aed2dc5bd8cb22a452225b05dc8470a076f14fd703581efc0740cdb"}, - {file = "grpcio_tools-1.62.2-cp310-cp310-macosx_12_0_universal2.whl", hash = "sha256:9d41e0e47dd075c075bb8f103422968a65dd0d8dc8613288f573ae91eb1053ba"}, - {file = "grpcio_tools-1.62.2-cp310-cp310-manylinux_2_17_aarch64.whl", hash = "sha256:987e774f74296842bbffd55ea8826370f70c499e5b5f71a8cf3103838b6ee9c3"}, - {file = "grpcio_tools-1.62.2-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:40cd4eeea4b25bcb6903b82930d579027d034ba944393c4751cdefd9c49e6989"}, - {file = "grpcio_tools-1.62.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b6746bc823958499a3cf8963cc1de00072962fb5e629f26d658882d3f4c35095"}, - {file = "grpcio_tools-1.62.2-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:2ed775e844566ce9ce089be9a81a8b928623b8ee5820f5e4d58c1a9d33dfc5ae"}, - {file = "grpcio_tools-1.62.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:bdc5dd3f57b5368d5d661d5d3703bcaa38bceca59d25955dff66244dbc987271"}, - {file = "grpcio_tools-1.62.2-cp310-cp310-win32.whl", hash = "sha256:3a8d6f07e64c0c7756f4e0c4781d9d5a2b9cc9cbd28f7032a6fb8d4f847d0445"}, - {file = "grpcio_tools-1.62.2-cp310-cp310-win_amd64.whl", hash = "sha256:e33b59fb3efdddeb97ded988a871710033e8638534c826567738d3edce528752"}, - {file = "grpcio_tools-1.62.2-cp311-cp311-linux_armv7l.whl", hash = "sha256:472505d030135d73afe4143b0873efe0dcb385bd6d847553b4f3afe07679af00"}, - {file = "grpcio_tools-1.62.2-cp311-cp311-macosx_10_10_universal2.whl", hash = "sha256:ec674b4440ef4311ac1245a709e87b36aca493ddc6850eebe0b278d1f2b6e7d1"}, - {file = "grpcio_tools-1.62.2-cp311-cp311-manylinux_2_17_aarch64.whl", hash = "sha256:184b4174d4bd82089d706e8223e46c42390a6ebac191073b9772abc77308f9fa"}, - {file = "grpcio_tools-1.62.2-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c195d74fe98541178ece7a50dad2197d43991e0f77372b9a88da438be2486f12"}, - {file = "grpcio_tools-1.62.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a34d97c62e61bfe9e6cff0410fe144ac8cca2fc979ad0be46b7edf026339d161"}, - {file = "grpcio_tools-1.62.2-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:cbb8453ae83a1db2452b7fe0f4b78e4a8dd32be0f2b2b73591ae620d4d784d3d"}, - {file = "grpcio_tools-1.62.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:4f989e5cebead3ae92c6abf6bf7b19949e1563a776aea896ac5933f143f0c45d"}, - {file = "grpcio_tools-1.62.2-cp311-cp311-win32.whl", hash = "sha256:c48fabe40b9170f4e3d7dd2c252e4f1ff395dc24e49ac15fc724b1b6f11724da"}, - {file = "grpcio_tools-1.62.2-cp311-cp311-win_amd64.whl", hash = "sha256:8c616d0ad872e3780693fce6a3ac8ef00fc0963e6d7815ce9dcfae68ba0fc287"}, - {file = "grpcio_tools-1.62.2-cp312-cp312-linux_armv7l.whl", hash = "sha256:10cc3321704ecd17c93cf68c99c35467a8a97ffaaed53207e9b2da6ae0308ee1"}, - {file = "grpcio_tools-1.62.2-cp312-cp312-macosx_10_10_universal2.whl", hash = "sha256:9be84ff6d47fd61462be7523b49d7ba01adf67ce4e1447eae37721ab32464dd8"}, - {file = "grpcio_tools-1.62.2-cp312-cp312-manylinux_2_17_aarch64.whl", hash = "sha256:d82f681c9a9d933a9d8068e8e382977768e7779ddb8870fa0cf918d8250d1532"}, - {file = "grpcio_tools-1.62.2-cp312-cp312-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:04c607029ae3660fb1624ed273811ffe09d57d84287d37e63b5b802a35897329"}, - {file = "grpcio_tools-1.62.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:72b61332f1b439c14cbd3815174a8f1d35067a02047c32decd406b3a09bb9890"}, - {file = "grpcio_tools-1.62.2-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:8214820990d01b52845f9fbcb92d2b7384a0c321b303e3ac614c219dc7d1d3af"}, - {file = "grpcio_tools-1.62.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:462e0ab8dd7c7b70bfd6e3195eebc177549ede5cf3189814850c76f9a340d7ce"}, - {file = "grpcio_tools-1.62.2-cp312-cp312-win32.whl", hash = "sha256:fa107460c842e4c1a6266150881694fefd4f33baa544ea9489601810c2210ef8"}, - {file = "grpcio_tools-1.62.2-cp312-cp312-win_amd64.whl", hash = "sha256:759c60f24c33a181bbbc1232a6752f9b49fbb1583312a4917e2b389fea0fb0f2"}, - {file = "grpcio_tools-1.62.2-cp37-cp37m-linux_armv7l.whl", hash = "sha256:45db5da2bcfa88f2b86b57ef35daaae85c60bd6754a051d35d9449c959925b57"}, - {file = "grpcio_tools-1.62.2-cp37-cp37m-macosx_10_10_universal2.whl", hash = "sha256:ab84bae88597133f6ea7a2bdc57b2fda98a266fe8d8d4763652cbefd20e73ad7"}, - {file = "grpcio_tools-1.62.2-cp37-cp37m-manylinux_2_17_aarch64.whl", hash = "sha256:7a49bccae1c7d154b78e991885c3111c9ad8c8fa98e91233de425718f47c6139"}, - {file = "grpcio_tools-1.62.2-cp37-cp37m-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a7e439476b29d6dac363b321781a113794397afceeb97dad85349db5f1cb5e9a"}, - {file = "grpcio_tools-1.62.2-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7ea369c4d1567d1acdf69c8ea74144f4ccad9e545df7f9a4fc64c94fa7684ba3"}, - {file = "grpcio_tools-1.62.2-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:4f955702dc4b530696375251319d05223b729ed24e8673c2129f7a75d2caefbb"}, - {file = "grpcio_tools-1.62.2-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:3708a747aa4b6b505727282ca887041174e146ae030ebcadaf4c1d346858df62"}, - {file = "grpcio_tools-1.62.2-cp37-cp37m-win_amd64.whl", hash = "sha256:2ce149ea55eadb486a7fb75a20f63ef3ac065ee6a0240ed25f3549ce7954c653"}, - {file = "grpcio_tools-1.62.2-cp38-cp38-linux_armv7l.whl", hash = "sha256:58cbb24b3fa6ae35aa9c210fcea3a51aa5fef0cd25618eb4fd94f746d5a9b703"}, - {file = "grpcio_tools-1.62.2-cp38-cp38-macosx_10_10_universal2.whl", hash = "sha256:6413581e14a80e0b4532577766cf0586de4dd33766a31b3eb5374a746771c07d"}, - {file = "grpcio_tools-1.62.2-cp38-cp38-manylinux_2_17_aarch64.whl", hash = "sha256:47117c8a7e861382470d0e22d336e5a91fdc5f851d1db44fa784b9acea190d87"}, - {file = "grpcio_tools-1.62.2-cp38-cp38-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9f1ba79a253df9e553d20319c615fa2b429684580fa042dba618d7f6649ac7e4"}, - {file = "grpcio_tools-1.62.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:04a394cf5e51ba9be412eb9f6c482b6270bd81016e033e8eb7d21b8cc28fe8b5"}, - {file = "grpcio_tools-1.62.2-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:3c53b221378b035ae2f1881cbc3aca42a6075a8e90e1a342c2f205eb1d1aa6a1"}, - {file = "grpcio_tools-1.62.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:c384c838b34d1b67068e51b5bbe49caa6aa3633acd158f1ab16b5da8d226bc53"}, - {file = "grpcio_tools-1.62.2-cp38-cp38-win32.whl", hash = "sha256:19ea69e41c3565932aa28a202d1875ec56786aea46a2eab54a3b28e8a27f9517"}, - {file = "grpcio_tools-1.62.2-cp38-cp38-win_amd64.whl", hash = "sha256:1d768a5c07279a4c461ebf52d0cec1c6ca85c6291c71ec2703fe3c3e7e28e8c4"}, - {file = "grpcio_tools-1.62.2-cp39-cp39-linux_armv7l.whl", hash = "sha256:5b07b5874187e170edfbd7aa2ca3a54ebf3b2952487653e8c0b0d83601c33035"}, - {file = "grpcio_tools-1.62.2-cp39-cp39-macosx_10_10_universal2.whl", hash = "sha256:d58389fe8be206ddfb4fa703db1e24c956856fcb9a81da62b13577b3a8f7fda7"}, - {file = "grpcio_tools-1.62.2-cp39-cp39-manylinux_2_17_aarch64.whl", hash = "sha256:7d8b4e00c3d7237b92260fc18a561cd81f1da82e8be100db1b7d816250defc66"}, - {file = "grpcio_tools-1.62.2-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1fe08d2038f2b7c53259b5c49e0ad08c8e0ce2b548d8185993e7ef67e8592cca"}, - {file = "grpcio_tools-1.62.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:19216e1fb26dbe23d12a810517e1b3fbb8d4f98b1a3fbebeec9d93a79f092de4"}, - {file = "grpcio_tools-1.62.2-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:b8574469ecc4ff41d6bb95f44e0297cdb0d95bade388552a9a444db9cd7485cd"}, - {file = "grpcio_tools-1.62.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:4f6f32d39283ea834a493fccf0ebe9cfddee7577bdcc27736ad4be1732a36399"}, - {file = "grpcio_tools-1.62.2-cp39-cp39-win32.whl", hash = "sha256:76eb459bdf3fb666e01883270beee18f3f11ed44488486b61cd210b4e0e17cc1"}, - {file = "grpcio_tools-1.62.2-cp39-cp39-win_amd64.whl", hash = "sha256:217c2ee6a7ce519a55958b8622e21804f6fdb774db08c322f4c9536c35fdce7c"}, -] - -[package.dependencies] -grpcio = ">=1.62.2" + {file = "grpcio-tools-1.62.3.tar.gz", hash = "sha256:7c7136015c3d62c3eef493efabaf9e3380e3e66d24ee8e94c01cb71377f57833"}, + {file = "grpcio_tools-1.62.3-cp310-cp310-macosx_12_0_universal2.whl", hash = "sha256:2f968b049c2849540751ec2100ab05e8086c24bead769ca734fdab58698408c1"}, + {file = "grpcio_tools-1.62.3-cp310-cp310-manylinux_2_17_aarch64.whl", hash = "sha256:0a8c0c4724ae9c2181b7dbc9b186df46e4f62cb18dc184e46d06c0ebeccf569e"}, + {file = "grpcio_tools-1.62.3-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:5782883a27d3fae8c425b29a9d3dcf5f47d992848a1b76970da3b5a28d424b26"}, + {file = "grpcio_tools-1.62.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f3d812daffd0c2d2794756bd45a353f89e55dc8f91eb2fc840c51b9f6be62667"}, + {file = "grpcio_tools-1.62.3-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:b47d0dda1bdb0a0ba7a9a6de88e5a1ed61f07fad613964879954961e36d49193"}, + {file = "grpcio_tools-1.62.3-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:ca246dffeca0498be9b4e1ee169b62e64694b0f92e6d0be2573e65522f39eea9"}, + {file = "grpcio_tools-1.62.3-cp310-cp310-win32.whl", hash = "sha256:6a56d344b0bab30bf342a67e33d386b0b3c4e65868ffe93c341c51e1a8853ca5"}, + {file = "grpcio_tools-1.62.3-cp310-cp310-win_amd64.whl", hash = "sha256:710fecf6a171dcbfa263a0a3e7070e0df65ba73158d4c539cec50978f11dad5d"}, + {file = "grpcio_tools-1.62.3-cp311-cp311-macosx_10_10_universal2.whl", hash = "sha256:703f46e0012af83a36082b5f30341113474ed0d91e36640da713355cd0ea5d23"}, + {file = "grpcio_tools-1.62.3-cp311-cp311-manylinux_2_17_aarch64.whl", hash = "sha256:7cc83023acd8bc72cf74c2edbe85b52098501d5b74d8377bfa06f3e929803492"}, + {file = "grpcio_tools-1.62.3-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7ff7d58a45b75df67d25f8f144936a3e44aabd91afec833ee06826bd02b7fbe7"}, + {file = "grpcio_tools-1.62.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7f2483ea232bd72d98a6dc6d7aefd97e5bc80b15cd909b9e356d6f3e326b6e43"}, + {file = "grpcio_tools-1.62.3-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:962c84b4da0f3b14b3cdb10bc3837ebc5f136b67d919aea8d7bb3fd3df39528a"}, + {file = "grpcio_tools-1.62.3-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:8ad0473af5544f89fc5a1ece8676dd03bdf160fb3230f967e05d0f4bf89620e3"}, + {file = "grpcio_tools-1.62.3-cp311-cp311-win32.whl", hash = "sha256:db3bc9fa39afc5e4e2767da4459df82b095ef0cab2f257707be06c44a1c2c3e5"}, + {file = "grpcio_tools-1.62.3-cp311-cp311-win_amd64.whl", hash = "sha256:e0898d412a434e768a0c7e365acabe13ff1558b767e400936e26b5b6ed1ee51f"}, + {file = "grpcio_tools-1.62.3-cp312-cp312-macosx_10_10_universal2.whl", hash = "sha256:d102b9b21c4e1e40af9a2ab3c6d41afba6bd29c0aa50ca013bf85c99cdc44ac5"}, + {file = "grpcio_tools-1.62.3-cp312-cp312-manylinux_2_17_aarch64.whl", hash = "sha256:0a52cc9444df978438b8d2332c0ca99000521895229934a59f94f37ed896b133"}, + {file = "grpcio_tools-1.62.3-cp312-cp312-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:141d028bf5762d4a97f981c501da873589df3f7e02f4c1260e1921e565b376fa"}, + {file = "grpcio_tools-1.62.3-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:47a5c093ab256dec5714a7a345f8cc89315cb57c298b276fa244f37a0ba507f0"}, + {file = "grpcio_tools-1.62.3-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:f6831fdec2b853c9daa3358535c55eed3694325889aa714070528cf8f92d7d6d"}, + {file = "grpcio_tools-1.62.3-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:e02d7c1a02e3814c94ba0cfe43d93e872c758bd8fd5c2797f894d0c49b4a1dfc"}, + {file = "grpcio_tools-1.62.3-cp312-cp312-win32.whl", hash = "sha256:b881fd9505a84457e9f7e99362eeedd86497b659030cf57c6f0070df6d9c2b9b"}, + {file = "grpcio_tools-1.62.3-cp312-cp312-win_amd64.whl", hash = "sha256:11c625eebefd1fd40a228fc8bae385e448c7e32a6ae134e43cf13bbc23f902b7"}, + {file = "grpcio_tools-1.62.3-cp37-cp37m-macosx_10_10_universal2.whl", hash = "sha256:ec6fbded0c61afe6f84e3c2a43e6d656791d95747d6d28b73eff1af64108c434"}, + {file = "grpcio_tools-1.62.3-cp37-cp37m-manylinux_2_17_aarch64.whl", hash = "sha256:bfda6ee8990997a9df95c5606f3096dae65f09af7ca03a1e9ca28f088caca5cf"}, + {file = "grpcio_tools-1.62.3-cp37-cp37m-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b77f9f9cee87cd798f0fe26b7024344d1b03a7cd2d2cba7035f8433b13986325"}, + {file = "grpcio_tools-1.62.3-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2e02d3b96f2d0e4bab9ceaa30f37d4f75571e40c6272e95364bff3125a64d184"}, + {file = "grpcio_tools-1.62.3-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:1da38070738da53556a4b35ab67c1b9884a5dd48fa2f243db35dc14079ea3d0c"}, + {file = "grpcio_tools-1.62.3-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:ace43b26d88a58dcff16c20d23ff72b04d0a415f64d2820f4ff06b1166f50557"}, + {file = "grpcio_tools-1.62.3-cp37-cp37m-win_amd64.whl", hash = "sha256:350a80485e302daaa95d335a931f97b693e170e02d43767ab06552c708808950"}, + {file = "grpcio_tools-1.62.3-cp38-cp38-macosx_10_10_universal2.whl", hash = "sha256:c3a1ac9d394f8e229eb28eec2e04b9a6f5433fa19c9d32f1cb6066e3c5114a1d"}, + {file = "grpcio_tools-1.62.3-cp38-cp38-manylinux_2_17_aarch64.whl", hash = "sha256:11f363570dea661dde99e04a51bd108a5807b5df32a6f8bdf4860e34e94a4dbf"}, + {file = "grpcio_tools-1.62.3-cp38-cp38-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:dc9ad9950119d8ae27634e68b7663cc8d340ae535a0f80d85a55e56a6973ab1f"}, + {file = "grpcio_tools-1.62.3-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8c5d22b252dcef11dd1e0fbbe5bbfb9b4ae048e8880d33338215e8ccbdb03edc"}, + {file = "grpcio_tools-1.62.3-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:27cd9ef5c5d68d5ed104b6dcb96fe9c66b82050e546c9e255716903c3d8f0373"}, + {file = "grpcio_tools-1.62.3-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:f4b1615adf67bd8bb71f3464146a6f9949972d06d21a4f5e87e73f6464d97f57"}, + {file = "grpcio_tools-1.62.3-cp38-cp38-win32.whl", hash = "sha256:e18e15287c31baf574fcdf8251fb7f997d64e96c6ecf467906e576da0a079af6"}, + {file = "grpcio_tools-1.62.3-cp38-cp38-win_amd64.whl", hash = "sha256:6c3064610826f50bd69410c63101954676edc703e03f9e8f978a135f1aaf97c1"}, + {file = "grpcio_tools-1.62.3-cp39-cp39-macosx_10_10_universal2.whl", hash = "sha256:8e62cc7164b0b7c5128e637e394eb2ef3db0e61fc798e80c301de3b2379203ed"}, + {file = "grpcio_tools-1.62.3-cp39-cp39-manylinux_2_17_aarch64.whl", hash = "sha256:c8ad5cce554e2fcaf8842dee5d9462583b601a3a78f8b76a153c38c963f58c10"}, + {file = "grpcio_tools-1.62.3-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ec279dcf3518201fc592c65002754f58a6b542798cd7f3ecd4af086422f33f29"}, + {file = "grpcio_tools-1.62.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1c989246c2aebc13253f08be32538a4039a64e12d9c18f6d662d7aee641dc8b5"}, + {file = "grpcio_tools-1.62.3-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:ca4f5eeadbb57cf03317d6a2857823239a63a59cc935f5bd6cf6e8b7af7a7ecc"}, + {file = "grpcio_tools-1.62.3-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:0cb3a3436ac119cbd37a7d3331d9bdf85dad21a6ac233a3411dff716dcbf401e"}, + {file = "grpcio_tools-1.62.3-cp39-cp39-win32.whl", hash = "sha256:3eae6ea76d62fcac091e1f15c2dcedf1dc3f114f8df1a972a8a0745e89f4cf61"}, + {file = "grpcio_tools-1.62.3-cp39-cp39-win_amd64.whl", hash = "sha256:eec73a005443061f4759b71a056f745e3b000dc0dc125c9f20560232dfbcbd14"}, +] + +[package.dependencies] +grpcio = ">=1.62.3" protobuf = ">=4.21.6,<5.0dev" setuptools = "*" @@ -2114,13 +2260,13 @@ files = [ [[package]] name = "huggingface-hub" -version = "0.24.0" +version = "0.24.5" description = "Client library to download and publish models, datasets and other repos on the huggingface.co hub" optional = true python-versions = ">=3.8.0" files = [ - {file = "huggingface_hub-0.24.0-py3-none-any.whl", hash = "sha256:7ad92edefb93d8145c061f6df8d99df2ff85f8379ba5fac8a95aca0642afa5d7"}, - {file = "huggingface_hub-0.24.0.tar.gz", hash = "sha256:6c7092736b577d89d57b3cdfea026f1b0dc2234ae783fa0d59caf1bf7d52dfa7"}, + {file = "huggingface_hub-0.24.5-py3-none-any.whl", hash = "sha256:d93fb63b1f1a919a22ce91a14518974e81fc4610bf344dfe7572343ce8d3aced"}, + {file = "huggingface_hub-0.24.5.tar.gz", hash = "sha256:7b45d6744dd53ce9cbf9880957de00e9d10a9ae837f1c9b7255fc8fa4e8264f3"}, ] [package.dependencies] @@ -2184,22 +2330,22 @@ files = [ [[package]] name = "importlib-metadata" -version = "7.1.0" +version = "8.0.0" description = "Read metadata from Python packages" optional = false python-versions = ">=3.8" files = [ - {file = "importlib_metadata-7.1.0-py3-none-any.whl", hash = "sha256:30962b96c0c223483ed6cc7280e7f0199feb01a0e40cfae4d4450fc6fab1f570"}, - {file = "importlib_metadata-7.1.0.tar.gz", hash = "sha256:b78938b926ee8d5f020fc4772d487045805a55ddbad2ecf21c6d60938dc7fcd2"}, + {file = "importlib_metadata-8.0.0-py3-none-any.whl", hash = "sha256:15584cf2b1bf449d98ff8a6ff1abef57bf20f3ac6454f431736cd3e660921b2f"}, + {file = "importlib_metadata-8.0.0.tar.gz", hash = "sha256:188bd24e4c346d3f0a933f275c2fec67050326a856b9a359881d7c2a697e8812"}, ] [package.dependencies] zipp = ">=0.5" [package.extras] -docs = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-lint"] +doc = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-lint"] perf = ["ipython"] -testing = ["flufl.flake8", "importlib-resources (>=1.3)", "jaraco.test (>=5.4)", "packaging", "pyfakefs", "pytest (>=6)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=2.2)", "pytest-mypy", "pytest-perf (>=0.9.2)", "pytest-ruff (>=0.2.1)"] +test = ["flufl.flake8", "importlib-resources (>=1.3)", "jaraco.test (>=5.4)", "packaging", "pyfakefs", "pytest (>=6,!=8.1.*)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=2.2)", "pytest-mypy", "pytest-perf (>=0.9.2)", "pytest-ruff (>=0.2.1)"] [[package]] name = "iniconfig" @@ -2212,20 +2358,6 @@ files = [ {file = "iniconfig-2.0.0.tar.gz", hash = "sha256:2d91e135bf72d31a410b17c16da610a82cb55f6b0477d1a902134b24a455b8b3"}, ] -[[package]] -name = "intel-openmp" -version = "2021.4.0" -description = "Intel OpenMP* Runtime Library" -optional = true -python-versions = "*" -files = [ - {file = "intel_openmp-2021.4.0-py2.py3-none-macosx_10_15_x86_64.macosx_11_0_x86_64.whl", hash = "sha256:41c01e266a7fdb631a7609191709322da2bbf24b252ba763f125dd651bcc7675"}, - {file = "intel_openmp-2021.4.0-py2.py3-none-manylinux1_i686.whl", hash = "sha256:3b921236a38384e2016f0f3d65af6732cf2c12918087128a9163225451e776f2"}, - {file = "intel_openmp-2021.4.0-py2.py3-none-manylinux1_x86_64.whl", hash = "sha256:e2240ab8d01472fed04f3544a878cda5da16c26232b7ea1b59132dbfb48b186e"}, - {file = "intel_openmp-2021.4.0-py2.py3-none-win32.whl", hash = "sha256:6e863d8fd3d7e8ef389d52cf97a50fe2afe1a19247e8c0d168ce021546f96fc9"}, - {file = "intel_openmp-2021.4.0-py2.py3-none-win_amd64.whl", hash = "sha256:eef4c8bcc8acefd7f5cd3b9384dbf73d59e2c99fc56545712ded913f43c4a94f"}, -] - [[package]] name = "jaraco-classes" version = "3.4.0" @@ -2264,21 +2396,21 @@ testing = ["portend", "pytest (>=6,!=8.1.1)", "pytest-checkdocs (>=2.4)", "pytes [[package]] name = "jaraco-functools" -version = "4.0.1" +version = "4.0.2" description = "Functools like those found in stdlib" optional = false python-versions = ">=3.8" files = [ - {file = "jaraco.functools-4.0.1-py3-none-any.whl", hash = "sha256:3b24ccb921d6b593bdceb56ce14799204f473976e2a9d4b15b04d0f2c2326664"}, - {file = "jaraco_functools-4.0.1.tar.gz", hash = "sha256:d33fa765374c0611b52f8b3a795f8900869aa88c84769d4d1746cd68fb28c3e8"}, + {file = "jaraco.functools-4.0.2-py3-none-any.whl", hash = "sha256:c9d16a3ed4ccb5a889ad8e0b7a343401ee5b2a71cee6ed192d3f68bc351e94e3"}, + {file = "jaraco_functools-4.0.2.tar.gz", hash = "sha256:3460c74cd0d32bf82b9576bbb3527c4364d5b27a21f5158a62aed6c4b42e23f5"}, ] [package.dependencies] more-itertools = "*" [package.extras] -docs = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (<7.2.5)", "sphinx (>=3.5)", "sphinx-lint"] -testing = ["jaraco.classes", "pytest (>=6)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=2.2)", "pytest-mypy", "pytest-ruff (>=0.2.1)"] +doc = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-lint"] +test = ["jaraco.classes", "pytest (>=6,!=8.1.*)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=2.2)", "pytest-mypy", "pytest-ruff (>=0.2.1)"] [[package]] name = "jeepney" @@ -2316,7 +2448,7 @@ i18n = ["Babel (>=2.7)"] name = "jiter" version = "0.5.0" description = "Fast iterable JSON parser." -optional = true +optional = false python-versions = ">=3.8" files = [ {file = "jiter-0.5.0-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:b599f4e89b3def9a94091e6ee52e1d7ad7bc33e238ebb9c4c63f211d74822c3f"}, @@ -2426,13 +2558,13 @@ lxml = {version = ">=4.4.2", extras = ["html-clean"]} [[package]] name = "keyring" -version = "25.2.1" +version = "25.3.0" description = "Store and access your passwords safely." optional = false python-versions = ">=3.8" files = [ - {file = "keyring-25.2.1-py3-none-any.whl", hash = "sha256:2458681cdefc0dbc0b7eb6cf75d0b98e59f9ad9b2d4edd319d18f68bdca95e50"}, - {file = "keyring-25.2.1.tar.gz", hash = "sha256:daaffd42dbda25ddafb1ad5fec4024e5bbcfe424597ca1ca452b299861e49f1b"}, + {file = "keyring-25.3.0-py3-none-any.whl", hash = "sha256:8d963da00ccdf06e356acd9bf3b743208878751032d8599c6cc89eb51310ffae"}, + {file = "keyring-25.3.0.tar.gz", hash = "sha256:8d85a1ea5d6db8515b59e1c5d1d1678b03cf7fc8b8dcfb1651e8c4a524eb42ef"}, ] [package.dependencies] @@ -2446,158 +2578,154 @@ SecretStorage = {version = ">=3.2", markers = "sys_platform == \"linux\""} [package.extras] completion = ["shtab (>=1.1.0)"] -docs = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-lint"] -testing = ["pytest (>=6,!=8.1.*)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=2.2)", "pytest-mypy", "pytest-ruff (>=0.2.1)"] +doc = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-lint"] +test = ["pyfakefs", "pytest (>=6,!=8.1.*)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=2.2)", "pytest-mypy", "pytest-ruff (>=0.2.1)"] [[package]] name = "lxml" -version = "5.2.2" +version = "5.3.0" description = "Powerful and Pythonic XML processing library combining libxml2/libxslt with the ElementTree API." optional = true python-versions = ">=3.6" files = [ - {file = "lxml-5.2.2-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:364d03207f3e603922d0d3932ef363d55bbf48e3647395765f9bfcbdf6d23632"}, - {file = "lxml-5.2.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:50127c186f191b8917ea2fb8b206fbebe87fd414a6084d15568c27d0a21d60db"}, - {file = "lxml-5.2.2-cp310-cp310-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:74e4f025ef3db1c6da4460dd27c118d8cd136d0391da4e387a15e48e5c975147"}, - {file = "lxml-5.2.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:981a06a3076997adf7c743dcd0d7a0415582661e2517c7d961493572e909aa1d"}, - {file = "lxml-5.2.2-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:aef5474d913d3b05e613906ba4090433c515e13ea49c837aca18bde190853dff"}, - {file = "lxml-5.2.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:1e275ea572389e41e8b039ac076a46cb87ee6b8542df3fff26f5baab43713bca"}, - {file = "lxml-5.2.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f5b65529bb2f21ac7861a0e94fdbf5dc0daab41497d18223b46ee8515e5ad297"}, - {file = "lxml-5.2.2-cp310-cp310-manylinux_2_28_aarch64.whl", hash = "sha256:bcc98f911f10278d1daf14b87d65325851a1d29153caaf146877ec37031d5f36"}, - {file = "lxml-5.2.2-cp310-cp310-manylinux_2_28_ppc64le.whl", hash = "sha256:b47633251727c8fe279f34025844b3b3a3e40cd1b198356d003aa146258d13a2"}, - {file = "lxml-5.2.2-cp310-cp310-manylinux_2_28_s390x.whl", hash = "sha256:fbc9d316552f9ef7bba39f4edfad4a734d3d6f93341232a9dddadec4f15d425f"}, - {file = "lxml-5.2.2-cp310-cp310-manylinux_2_28_x86_64.whl", hash = "sha256:13e69be35391ce72712184f69000cda04fc89689429179bc4c0ae5f0b7a8c21b"}, - {file = "lxml-5.2.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:3b6a30a9ab040b3f545b697cb3adbf3696c05a3a68aad172e3fd7ca73ab3c835"}, - {file = "lxml-5.2.2-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:a233bb68625a85126ac9f1fc66d24337d6e8a0f9207b688eec2e7c880f012ec0"}, - {file = "lxml-5.2.2-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:dfa7c241073d8f2b8e8dbc7803c434f57dbb83ae2a3d7892dd068d99e96efe2c"}, - {file = "lxml-5.2.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:1a7aca7964ac4bb07680d5c9d63b9d7028cace3e2d43175cb50bba8c5ad33316"}, - {file = "lxml-5.2.2-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:ae4073a60ab98529ab8a72ebf429f2a8cc612619a8c04e08bed27450d52103c0"}, - {file = "lxml-5.2.2-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:ffb2be176fed4457e445fe540617f0252a72a8bc56208fd65a690fdb1f57660b"}, - {file = "lxml-5.2.2-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:e290d79a4107d7d794634ce3e985b9ae4f920380a813717adf61804904dc4393"}, - {file = "lxml-5.2.2-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:96e85aa09274955bb6bd483eaf5b12abadade01010478154b0ec70284c1b1526"}, - {file = "lxml-5.2.2-cp310-cp310-win32.whl", hash = "sha256:f956196ef61369f1685d14dad80611488d8dc1ef00be57c0c5a03064005b0f30"}, - {file = "lxml-5.2.2-cp310-cp310-win_amd64.whl", hash = "sha256:875a3f90d7eb5c5d77e529080d95140eacb3c6d13ad5b616ee8095447b1d22e7"}, - {file = "lxml-5.2.2-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:45f9494613160d0405682f9eee781c7e6d1bf45f819654eb249f8f46a2c22545"}, - {file = "lxml-5.2.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:b0b3f2df149efb242cee2ffdeb6674b7f30d23c9a7af26595099afaf46ef4e88"}, - {file = "lxml-5.2.2-cp311-cp311-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d28cb356f119a437cc58a13f8135ab8a4c8ece18159eb9194b0d269ec4e28083"}, - {file = "lxml-5.2.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:657a972f46bbefdbba2d4f14413c0d079f9ae243bd68193cb5061b9732fa54c1"}, - {file = "lxml-5.2.2-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b74b9ea10063efb77a965a8d5f4182806fbf59ed068b3c3fd6f30d2ac7bee734"}, - {file = "lxml-5.2.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:07542787f86112d46d07d4f3c4e7c760282011b354d012dc4141cc12a68cef5f"}, - {file = "lxml-5.2.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:303f540ad2dddd35b92415b74b900c749ec2010e703ab3bfd6660979d01fd4ed"}, - {file = "lxml-5.2.2-cp311-cp311-manylinux_2_28_aarch64.whl", hash = "sha256:2eb2227ce1ff998faf0cd7fe85bbf086aa41dfc5af3b1d80867ecfe75fb68df3"}, - {file = "lxml-5.2.2-cp311-cp311-manylinux_2_28_ppc64le.whl", hash = "sha256:1d8a701774dfc42a2f0b8ccdfe7dbc140500d1049e0632a611985d943fcf12df"}, - {file = "lxml-5.2.2-cp311-cp311-manylinux_2_28_s390x.whl", hash = "sha256:56793b7a1a091a7c286b5f4aa1fe4ae5d1446fe742d00cdf2ffb1077865db10d"}, - {file = "lxml-5.2.2-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:eb00b549b13bd6d884c863554566095bf6fa9c3cecb2e7b399c4bc7904cb33b5"}, - {file = "lxml-5.2.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:1a2569a1f15ae6c8c64108a2cd2b4a858fc1e13d25846be0666fc144715e32ab"}, - {file = "lxml-5.2.2-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:8cf85a6e40ff1f37fe0f25719aadf443686b1ac7652593dc53c7ef9b8492b115"}, - {file = "lxml-5.2.2-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:d237ba6664b8e60fd90b8549a149a74fcc675272e0e95539a00522e4ca688b04"}, - {file = "lxml-5.2.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:0b3f5016e00ae7630a4b83d0868fca1e3d494c78a75b1c7252606a3a1c5fc2ad"}, - {file = "lxml-5.2.2-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:23441e2b5339bc54dc949e9e675fa35efe858108404ef9aa92f0456929ef6fe8"}, - {file = "lxml-5.2.2-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:2fb0ba3e8566548d6c8e7dd82a8229ff47bd8fb8c2da237607ac8e5a1b8312e5"}, - {file = "lxml-5.2.2-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:79d1fb9252e7e2cfe4de6e9a6610c7cbb99b9708e2c3e29057f487de5a9eaefa"}, - {file = "lxml-5.2.2-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:6dcc3d17eac1df7859ae01202e9bb11ffa8c98949dcbeb1069c8b9a75917e01b"}, - {file = "lxml-5.2.2-cp311-cp311-win32.whl", hash = "sha256:4c30a2f83677876465f44c018830f608fa3c6a8a466eb223535035fbc16f3438"}, - {file = "lxml-5.2.2-cp311-cp311-win_amd64.whl", hash = "sha256:49095a38eb333aaf44c06052fd2ec3b8f23e19747ca7ec6f6c954ffea6dbf7be"}, - {file = "lxml-5.2.2-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:7429e7faa1a60cad26ae4227f4dd0459efde239e494c7312624ce228e04f6391"}, - {file = "lxml-5.2.2-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:50ccb5d355961c0f12f6cf24b7187dbabd5433f29e15147a67995474f27d1776"}, - {file = "lxml-5.2.2-cp312-cp312-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:dc911208b18842a3a57266d8e51fc3cfaccee90a5351b92079beed912a7914c2"}, - {file = "lxml-5.2.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:33ce9e786753743159799fdf8e92a5da351158c4bfb6f2db0bf31e7892a1feb5"}, - {file = "lxml-5.2.2-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ec87c44f619380878bd49ca109669c9f221d9ae6883a5bcb3616785fa8f94c97"}, - {file = "lxml-5.2.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:08ea0f606808354eb8f2dfaac095963cb25d9d28e27edcc375d7b30ab01abbf6"}, - {file = "lxml-5.2.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:75a9632f1d4f698b2e6e2e1ada40e71f369b15d69baddb8968dcc8e683839b18"}, - {file = "lxml-5.2.2-cp312-cp312-manylinux_2_28_aarch64.whl", hash = "sha256:74da9f97daec6928567b48c90ea2c82a106b2d500f397eeb8941e47d30b1ca85"}, - {file = "lxml-5.2.2-cp312-cp312-manylinux_2_28_ppc64le.whl", hash = "sha256:0969e92af09c5687d769731e3f39ed62427cc72176cebb54b7a9d52cc4fa3b73"}, - {file = "lxml-5.2.2-cp312-cp312-manylinux_2_28_s390x.whl", hash = "sha256:9164361769b6ca7769079f4d426a41df6164879f7f3568be9086e15baca61466"}, - {file = "lxml-5.2.2-cp312-cp312-manylinux_2_28_x86_64.whl", hash = "sha256:d26a618ae1766279f2660aca0081b2220aca6bd1aa06b2cf73f07383faf48927"}, - {file = "lxml-5.2.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:ab67ed772c584b7ef2379797bf14b82df9aa5f7438c5b9a09624dd834c1c1aaf"}, - {file = "lxml-5.2.2-cp312-cp312-musllinux_1_1_ppc64le.whl", hash = "sha256:3d1e35572a56941b32c239774d7e9ad724074d37f90c7a7d499ab98761bd80cf"}, - {file = "lxml-5.2.2-cp312-cp312-musllinux_1_1_s390x.whl", hash = "sha256:8268cbcd48c5375f46e000adb1390572c98879eb4f77910c6053d25cc3ac2c67"}, - {file = "lxml-5.2.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:e282aedd63c639c07c3857097fc0e236f984ceb4089a8b284da1c526491e3f3d"}, - {file = "lxml-5.2.2-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:6dfdc2bfe69e9adf0df4915949c22a25b39d175d599bf98e7ddf620a13678585"}, - {file = "lxml-5.2.2-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:4aefd911793b5d2d7a921233a54c90329bf3d4a6817dc465f12ffdfe4fc7b8fe"}, - {file = "lxml-5.2.2-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:8b8df03a9e995b6211dafa63b32f9d405881518ff1ddd775db4e7b98fb545e1c"}, - {file = "lxml-5.2.2-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:f11ae142f3a322d44513de1018b50f474f8f736bc3cd91d969f464b5bfef8836"}, - {file = "lxml-5.2.2-cp312-cp312-win32.whl", hash = "sha256:16a8326e51fcdffc886294c1e70b11ddccec836516a343f9ed0f82aac043c24a"}, - {file = "lxml-5.2.2-cp312-cp312-win_amd64.whl", hash = "sha256:bbc4b80af581e18568ff07f6395c02114d05f4865c2812a1f02f2eaecf0bfd48"}, - {file = "lxml-5.2.2-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:e3d9d13603410b72787579769469af730c38f2f25505573a5888a94b62b920f8"}, - {file = "lxml-5.2.2-cp36-cp36m-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:38b67afb0a06b8575948641c1d6d68e41b83a3abeae2ca9eed2ac59892b36706"}, - {file = "lxml-5.2.2-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c689d0d5381f56de7bd6966a4541bff6e08bf8d3871bbd89a0c6ab18aa699573"}, - {file = "lxml-5.2.2-cp36-cp36m-manylinux_2_28_x86_64.whl", hash = "sha256:cf2a978c795b54c539f47964ec05e35c05bd045db5ca1e8366988c7f2fe6b3ce"}, - {file = "lxml-5.2.2-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:739e36ef7412b2bd940f75b278749106e6d025e40027c0b94a17ef7968d55d56"}, - {file = "lxml-5.2.2-cp36-cp36m-musllinux_1_1_x86_64.whl", hash = "sha256:d8bbcd21769594dbba9c37d3c819e2d5847656ca99c747ddb31ac1701d0c0ed9"}, - {file = "lxml-5.2.2-cp36-cp36m-musllinux_1_2_x86_64.whl", hash = "sha256:2304d3c93f2258ccf2cf7a6ba8c761d76ef84948d87bf9664e14d203da2cd264"}, - {file = "lxml-5.2.2-cp36-cp36m-win32.whl", hash = "sha256:02437fb7308386867c8b7b0e5bc4cd4b04548b1c5d089ffb8e7b31009b961dc3"}, - {file = "lxml-5.2.2-cp36-cp36m-win_amd64.whl", hash = "sha256:edcfa83e03370032a489430215c1e7783128808fd3e2e0a3225deee278585196"}, - {file = "lxml-5.2.2-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:28bf95177400066596cdbcfc933312493799382879da504633d16cf60bba735b"}, - {file = "lxml-5.2.2-cp37-cp37m-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3a745cc98d504d5bd2c19b10c79c61c7c3df9222629f1b6210c0368177589fb8"}, - {file = "lxml-5.2.2-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1b590b39ef90c6b22ec0be925b211298e810b4856909c8ca60d27ffbca6c12e6"}, - {file = "lxml-5.2.2-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b336b0416828022bfd5a2e3083e7f5ba54b96242159f83c7e3eebaec752f1716"}, - {file = "lxml-5.2.2-cp37-cp37m-manylinux_2_28_aarch64.whl", hash = "sha256:c2faf60c583af0d135e853c86ac2735ce178f0e338a3c7f9ae8f622fd2eb788c"}, - {file = "lxml-5.2.2-cp37-cp37m-manylinux_2_28_x86_64.whl", hash = "sha256:4bc6cb140a7a0ad1f7bc37e018d0ed690b7b6520ade518285dc3171f7a117905"}, - {file = "lxml-5.2.2-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:7ff762670cada8e05b32bf1e4dc50b140790909caa8303cfddc4d702b71ea184"}, - {file = "lxml-5.2.2-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:57f0a0bbc9868e10ebe874e9f129d2917750adf008fe7b9c1598c0fbbfdde6a6"}, - {file = "lxml-5.2.2-cp37-cp37m-musllinux_1_2_aarch64.whl", hash = "sha256:a6d2092797b388342c1bc932077ad232f914351932353e2e8706851c870bca1f"}, - {file = "lxml-5.2.2-cp37-cp37m-musllinux_1_2_x86_64.whl", hash = "sha256:60499fe961b21264e17a471ec296dcbf4365fbea611bf9e303ab69db7159ce61"}, - {file = "lxml-5.2.2-cp37-cp37m-win32.whl", hash = "sha256:d9b342c76003c6b9336a80efcc766748a333573abf9350f4094ee46b006ec18f"}, - {file = "lxml-5.2.2-cp37-cp37m-win_amd64.whl", hash = "sha256:b16db2770517b8799c79aa80f4053cd6f8b716f21f8aca962725a9565ce3ee40"}, - {file = "lxml-5.2.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:7ed07b3062b055d7a7f9d6557a251cc655eed0b3152b76de619516621c56f5d3"}, - {file = "lxml-5.2.2-cp38-cp38-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f60fdd125d85bf9c279ffb8e94c78c51b3b6a37711464e1f5f31078b45002421"}, - {file = "lxml-5.2.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8a7e24cb69ee5f32e003f50e016d5fde438010c1022c96738b04fc2423e61706"}, - {file = "lxml-5.2.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:23cfafd56887eaed93d07bc4547abd5e09d837a002b791e9767765492a75883f"}, - {file = "lxml-5.2.2-cp38-cp38-manylinux_2_28_aarch64.whl", hash = "sha256:19b4e485cd07b7d83e3fe3b72132e7df70bfac22b14fe4bf7a23822c3a35bff5"}, - {file = "lxml-5.2.2-cp38-cp38-manylinux_2_28_x86_64.whl", hash = "sha256:7ce7ad8abebe737ad6143d9d3bf94b88b93365ea30a5b81f6877ec9c0dee0a48"}, - {file = "lxml-5.2.2-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:e49b052b768bb74f58c7dda4e0bdf7b79d43a9204ca584ffe1fb48a6f3c84c66"}, - {file = "lxml-5.2.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:d14a0d029a4e176795cef99c056d58067c06195e0c7e2dbb293bf95c08f772a3"}, - {file = "lxml-5.2.2-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:be49ad33819d7dcc28a309b86d4ed98e1a65f3075c6acd3cd4fe32103235222b"}, - {file = "lxml-5.2.2-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:a6d17e0370d2516d5bb9062c7b4cb731cff921fc875644c3d751ad857ba9c5b1"}, - {file = "lxml-5.2.2-cp38-cp38-win32.whl", hash = "sha256:5b8c041b6265e08eac8a724b74b655404070b636a8dd6d7a13c3adc07882ef30"}, - {file = "lxml-5.2.2-cp38-cp38-win_amd64.whl", hash = "sha256:f61efaf4bed1cc0860e567d2ecb2363974d414f7f1f124b1df368bbf183453a6"}, - {file = "lxml-5.2.2-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:fb91819461b1b56d06fa4bcf86617fac795f6a99d12239fb0c68dbeba41a0a30"}, - {file = "lxml-5.2.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:d4ed0c7cbecde7194cd3228c044e86bf73e30a23505af852857c09c24e77ec5d"}, - {file = "lxml-5.2.2-cp39-cp39-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:54401c77a63cc7d6dc4b4e173bb484f28a5607f3df71484709fe037c92d4f0ed"}, - {file = "lxml-5.2.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:625e3ef310e7fa3a761d48ca7ea1f9d8718a32b1542e727d584d82f4453d5eeb"}, - {file = "lxml-5.2.2-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:519895c99c815a1a24a926d5b60627ce5ea48e9f639a5cd328bda0515ea0f10c"}, - {file = "lxml-5.2.2-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c7079d5eb1c1315a858bbf180000757db8ad904a89476653232db835c3114001"}, - {file = "lxml-5.2.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:343ab62e9ca78094f2306aefed67dcfad61c4683f87eee48ff2fd74902447726"}, - {file = "lxml-5.2.2-cp39-cp39-manylinux_2_28_aarch64.whl", hash = "sha256:cd9e78285da6c9ba2d5c769628f43ef66d96ac3085e59b10ad4f3707980710d3"}, - {file = "lxml-5.2.2-cp39-cp39-manylinux_2_28_ppc64le.whl", hash = "sha256:546cf886f6242dff9ec206331209db9c8e1643ae642dea5fdbecae2453cb50fd"}, - {file = "lxml-5.2.2-cp39-cp39-manylinux_2_28_s390x.whl", hash = "sha256:02f6a8eb6512fdc2fd4ca10a49c341c4e109aa6e9448cc4859af5b949622715a"}, - {file = "lxml-5.2.2-cp39-cp39-manylinux_2_28_x86_64.whl", hash = "sha256:339ee4a4704bc724757cd5dd9dc8cf4d00980f5d3e6e06d5847c1b594ace68ab"}, - {file = "lxml-5.2.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:0a028b61a2e357ace98b1615fc03f76eb517cc028993964fe08ad514b1e8892d"}, - {file = "lxml-5.2.2-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:f90e552ecbad426eab352e7b2933091f2be77115bb16f09f78404861c8322981"}, - {file = "lxml-5.2.2-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:d83e2d94b69bf31ead2fa45f0acdef0757fa0458a129734f59f67f3d2eb7ef32"}, - {file = "lxml-5.2.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:a02d3c48f9bb1e10c7788d92c0c7db6f2002d024ab6e74d6f45ae33e3d0288a3"}, - {file = "lxml-5.2.2-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:6d68ce8e7b2075390e8ac1e1d3a99e8b6372c694bbe612632606d1d546794207"}, - {file = "lxml-5.2.2-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:453d037e09a5176d92ec0fd282e934ed26d806331a8b70ab431a81e2fbabf56d"}, - {file = "lxml-5.2.2-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:3b019d4ee84b683342af793b56bb35034bd749e4cbdd3d33f7d1107790f8c472"}, - {file = "lxml-5.2.2-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:cb3942960f0beb9f46e2a71a3aca220d1ca32feb5a398656be934320804c0df9"}, - {file = "lxml-5.2.2-cp39-cp39-win32.whl", hash = "sha256:ac6540c9fff6e3813d29d0403ee7a81897f1d8ecc09a8ff84d2eea70ede1cdbf"}, - {file = "lxml-5.2.2-cp39-cp39-win_amd64.whl", hash = "sha256:610b5c77428a50269f38a534057444c249976433f40f53e3b47e68349cca1425"}, - {file = "lxml-5.2.2-pp310-pypy310_pp73-macosx_10_9_x86_64.whl", hash = "sha256:b537bd04d7ccd7c6350cdaaaad911f6312cbd61e6e6045542f781c7f8b2e99d2"}, - {file = "lxml-5.2.2-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4820c02195d6dfb7b8508ff276752f6b2ff8b64ae5d13ebe02e7667e035000b9"}, - {file = "lxml-5.2.2-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f2a09f6184f17a80897172863a655467da2b11151ec98ba8d7af89f17bf63dae"}, - {file = "lxml-5.2.2-pp310-pypy310_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:76acba4c66c47d27c8365e7c10b3d8016a7da83d3191d053a58382311a8bf4e1"}, - {file = "lxml-5.2.2-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:b128092c927eaf485928cec0c28f6b8bead277e28acf56800e972aa2c2abd7a2"}, - {file = "lxml-5.2.2-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:ae791f6bd43305aade8c0e22f816b34f3b72b6c820477aab4d18473a37e8090b"}, - {file = "lxml-5.2.2-pp37-pypy37_pp73-macosx_10_9_x86_64.whl", hash = "sha256:a2f6a1bc2460e643785a2cde17293bd7a8f990884b822f7bca47bee0a82fc66b"}, - {file = "lxml-5.2.2-pp37-pypy37_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8e8d351ff44c1638cb6e980623d517abd9f580d2e53bfcd18d8941c052a5a009"}, - {file = "lxml-5.2.2-pp37-pypy37_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bec4bd9133420c5c52d562469c754f27c5c9e36ee06abc169612c959bd7dbb07"}, - {file = "lxml-5.2.2-pp37-pypy37_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:55ce6b6d803890bd3cc89975fca9de1dff39729b43b73cb15ddd933b8bc20484"}, - {file = "lxml-5.2.2-pp37-pypy37_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:8ab6a358d1286498d80fe67bd3d69fcbc7d1359b45b41e74c4a26964ca99c3f8"}, - {file = "lxml-5.2.2-pp37-pypy37_pp73-win_amd64.whl", hash = "sha256:06668e39e1f3c065349c51ac27ae430719d7806c026fec462e5693b08b95696b"}, - {file = "lxml-5.2.2-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:9cd5323344d8ebb9fb5e96da5de5ad4ebab993bbf51674259dbe9d7a18049525"}, - {file = "lxml-5.2.2-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:89feb82ca055af0fe797a2323ec9043b26bc371365847dbe83c7fd2e2f181c34"}, - {file = "lxml-5.2.2-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e481bba1e11ba585fb06db666bfc23dbe181dbafc7b25776156120bf12e0d5a6"}, - {file = "lxml-5.2.2-pp38-pypy38_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:9d6c6ea6a11ca0ff9cd0390b885984ed31157c168565702959c25e2191674a14"}, - {file = "lxml-5.2.2-pp38-pypy38_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:3d98de734abee23e61f6b8c2e08a88453ada7d6486dc7cdc82922a03968928db"}, - {file = "lxml-5.2.2-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:69ab77a1373f1e7563e0fb5a29a8440367dec051da6c7405333699d07444f511"}, - {file = "lxml-5.2.2-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:34e17913c431f5ae01d8658dbf792fdc457073dcdfbb31dc0cc6ab256e664a8d"}, - {file = "lxml-5.2.2-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:05f8757b03208c3f50097761be2dea0aba02e94f0dc7023ed73a7bb14ff11eb0"}, - {file = "lxml-5.2.2-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6a520b4f9974b0a0a6ed73c2154de57cdfd0c8800f4f15ab2b73238ffed0b36e"}, - {file = "lxml-5.2.2-pp39-pypy39_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:5e097646944b66207023bc3c634827de858aebc226d5d4d6d16f0b77566ea182"}, - {file = "lxml-5.2.2-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:b5e4ef22ff25bfd4ede5f8fb30f7b24446345f3e79d9b7455aef2836437bc38a"}, - {file = "lxml-5.2.2-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:ff69a9a0b4b17d78170c73abe2ab12084bdf1691550c5629ad1fe7849433f324"}, - {file = "lxml-5.2.2.tar.gz", hash = "sha256:bb2dc4898180bea79863d5487e5f9c7c34297414bad54bcd0f0852aee9cfdb87"}, + {file = "lxml-5.3.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:dd36439be765e2dde7660212b5275641edbc813e7b24668831a5c8ac91180656"}, + {file = "lxml-5.3.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:ae5fe5c4b525aa82b8076c1a59d642c17b6e8739ecf852522c6321852178119d"}, + {file = "lxml-5.3.0-cp310-cp310-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:501d0d7e26b4d261fca8132854d845e4988097611ba2531408ec91cf3fd9d20a"}, + {file = "lxml-5.3.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fb66442c2546446944437df74379e9cf9e9db353e61301d1a0e26482f43f0dd8"}, + {file = "lxml-5.3.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9e41506fec7a7f9405b14aa2d5c8abbb4dbbd09d88f9496958b6d00cb4d45330"}, + {file = "lxml-5.3.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f7d4a670107d75dfe5ad080bed6c341d18c4442f9378c9f58e5851e86eb79965"}, + {file = "lxml-5.3.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:41ce1f1e2c7755abfc7e759dc34d7d05fd221723ff822947132dc934d122fe22"}, + {file = "lxml-5.3.0-cp310-cp310-manylinux_2_28_aarch64.whl", hash = "sha256:44264ecae91b30e5633013fb66f6ddd05c006d3e0e884f75ce0b4755b3e3847b"}, + {file = "lxml-5.3.0-cp310-cp310-manylinux_2_28_ppc64le.whl", hash = "sha256:3c174dc350d3ec52deb77f2faf05c439331d6ed5e702fc247ccb4e6b62d884b7"}, + {file = "lxml-5.3.0-cp310-cp310-manylinux_2_28_s390x.whl", hash = "sha256:2dfab5fa6a28a0b60a20638dc48e6343c02ea9933e3279ccb132f555a62323d8"}, + {file = "lxml-5.3.0-cp310-cp310-manylinux_2_28_x86_64.whl", hash = "sha256:b1c8c20847b9f34e98080da785bb2336ea982e7f913eed5809e5a3c872900f32"}, + {file = "lxml-5.3.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:2c86bf781b12ba417f64f3422cfc302523ac9cd1d8ae8c0f92a1c66e56ef2e86"}, + {file = "lxml-5.3.0-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:c162b216070f280fa7da844531169be0baf9ccb17263cf5a8bf876fcd3117fa5"}, + {file = "lxml-5.3.0-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:36aef61a1678cb778097b4a6eeae96a69875d51d1e8f4d4b491ab3cfb54b5a03"}, + {file = "lxml-5.3.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:f65e5120863c2b266dbcc927b306c5b78e502c71edf3295dfcb9501ec96e5fc7"}, + {file = "lxml-5.3.0-cp310-cp310-win32.whl", hash = "sha256:ef0c1fe22171dd7c7c27147f2e9c3e86f8bdf473fed75f16b0c2e84a5030ce80"}, + {file = "lxml-5.3.0-cp310-cp310-win_amd64.whl", hash = "sha256:052d99051e77a4f3e8482c65014cf6372e61b0a6f4fe9edb98503bb5364cfee3"}, + {file = "lxml-5.3.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:74bcb423462233bc5d6066e4e98b0264e7c1bed7541fff2f4e34fe6b21563c8b"}, + {file = "lxml-5.3.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:a3d819eb6f9b8677f57f9664265d0a10dd6551d227afb4af2b9cd7bdc2ccbf18"}, + {file = "lxml-5.3.0-cp311-cp311-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:5b8f5db71b28b8c404956ddf79575ea77aa8b1538e8b2ef9ec877945b3f46442"}, + {file = "lxml-5.3.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2c3406b63232fc7e9b8783ab0b765d7c59e7c59ff96759d8ef9632fca27c7ee4"}, + {file = "lxml-5.3.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:2ecdd78ab768f844c7a1d4a03595038c166b609f6395e25af9b0f3f26ae1230f"}, + {file = "lxml-5.3.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:168f2dfcfdedf611eb285efac1516c8454c8c99caf271dccda8943576b67552e"}, + {file = "lxml-5.3.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:aa617107a410245b8660028a7483b68e7914304a6d4882b5ff3d2d3eb5948d8c"}, + {file = "lxml-5.3.0-cp311-cp311-manylinux_2_28_aarch64.whl", hash = "sha256:69959bd3167b993e6e710b99051265654133a98f20cec1d9b493b931942e9c16"}, + {file = "lxml-5.3.0-cp311-cp311-manylinux_2_28_ppc64le.whl", hash = "sha256:bd96517ef76c8654446fc3db9242d019a1bb5fe8b751ba414765d59f99210b79"}, + {file = "lxml-5.3.0-cp311-cp311-manylinux_2_28_s390x.whl", hash = "sha256:ab6dd83b970dc97c2d10bc71aa925b84788c7c05de30241b9e96f9b6d9ea3080"}, + {file = "lxml-5.3.0-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:eec1bb8cdbba2925bedc887bc0609a80e599c75b12d87ae42ac23fd199445654"}, + {file = "lxml-5.3.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:6a7095eeec6f89111d03dabfe5883a1fd54da319c94e0fb104ee8f23616b572d"}, + {file = "lxml-5.3.0-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:6f651ebd0b21ec65dfca93aa629610a0dbc13dbc13554f19b0113da2e61a4763"}, + {file = "lxml-5.3.0-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:f422a209d2455c56849442ae42f25dbaaba1c6c3f501d58761c619c7836642ec"}, + {file = "lxml-5.3.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:62f7fdb0d1ed2065451f086519865b4c90aa19aed51081979ecd05a21eb4d1be"}, + {file = "lxml-5.3.0-cp311-cp311-win32.whl", hash = "sha256:c6379f35350b655fd817cd0d6cbeef7f265f3ae5fedb1caae2eb442bbeae9ab9"}, + {file = "lxml-5.3.0-cp311-cp311-win_amd64.whl", hash = "sha256:9c52100e2c2dbb0649b90467935c4b0de5528833c76a35ea1a2691ec9f1ee7a1"}, + {file = "lxml-5.3.0-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:e99f5507401436fdcc85036a2e7dc2e28d962550afe1cbfc07c40e454256a859"}, + {file = "lxml-5.3.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:384aacddf2e5813a36495233b64cb96b1949da72bef933918ba5c84e06af8f0e"}, + {file = "lxml-5.3.0-cp312-cp312-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:874a216bf6afaf97c263b56371434e47e2c652d215788396f60477540298218f"}, + {file = "lxml-5.3.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:65ab5685d56914b9a2a34d67dd5488b83213d680b0c5d10b47f81da5a16b0b0e"}, + {file = "lxml-5.3.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:aac0bbd3e8dd2d9c45ceb82249e8bdd3ac99131a32b4d35c8af3cc9db1657179"}, + {file = "lxml-5.3.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b369d3db3c22ed14c75ccd5af429086f166a19627e84a8fdade3f8f31426e52a"}, + {file = "lxml-5.3.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c24037349665434f375645fa9d1f5304800cec574d0310f618490c871fd902b3"}, + {file = "lxml-5.3.0-cp312-cp312-manylinux_2_28_aarch64.whl", hash = "sha256:62d172f358f33a26d6b41b28c170c63886742f5b6772a42b59b4f0fa10526cb1"}, + {file = "lxml-5.3.0-cp312-cp312-manylinux_2_28_ppc64le.whl", hash = "sha256:c1f794c02903c2824fccce5b20c339a1a14b114e83b306ff11b597c5f71a1c8d"}, + {file = "lxml-5.3.0-cp312-cp312-manylinux_2_28_s390x.whl", hash = "sha256:5d6a6972b93c426ace71e0be9a6f4b2cfae9b1baed2eed2006076a746692288c"}, + {file = "lxml-5.3.0-cp312-cp312-manylinux_2_28_x86_64.whl", hash = "sha256:3879cc6ce938ff4eb4900d901ed63555c778731a96365e53fadb36437a131a99"}, + {file = "lxml-5.3.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:74068c601baff6ff021c70f0935b0c7bc528baa8ea210c202e03757c68c5a4ff"}, + {file = "lxml-5.3.0-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:ecd4ad8453ac17bc7ba3868371bffb46f628161ad0eefbd0a855d2c8c32dd81a"}, + {file = "lxml-5.3.0-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:7e2f58095acc211eb9d8b5771bf04df9ff37d6b87618d1cbf85f92399c98dae8"}, + {file = "lxml-5.3.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:e63601ad5cd8f860aa99d109889b5ac34de571c7ee902d6812d5d9ddcc77fa7d"}, + {file = "lxml-5.3.0-cp312-cp312-win32.whl", hash = "sha256:17e8d968d04a37c50ad9c456a286b525d78c4a1c15dd53aa46c1d8e06bf6fa30"}, + {file = "lxml-5.3.0-cp312-cp312-win_amd64.whl", hash = "sha256:c1a69e58a6bb2de65902051d57fde951febad631a20a64572677a1052690482f"}, + {file = "lxml-5.3.0-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:8c72e9563347c7395910de6a3100a4840a75a6f60e05af5e58566868d5eb2d6a"}, + {file = "lxml-5.3.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:e92ce66cd919d18d14b3856906a61d3f6b6a8500e0794142338da644260595cd"}, + {file = "lxml-5.3.0-cp313-cp313-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1d04f064bebdfef9240478f7a779e8c5dc32b8b7b0b2fc6a62e39b928d428e51"}, + {file = "lxml-5.3.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5c2fb570d7823c2bbaf8b419ba6e5662137f8166e364a8b2b91051a1fb40ab8b"}, + {file = "lxml-5.3.0-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:0c120f43553ec759f8de1fee2f4794452b0946773299d44c36bfe18e83caf002"}, + {file = "lxml-5.3.0-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:562e7494778a69086f0312ec9689f6b6ac1c6b65670ed7d0267e49f57ffa08c4"}, + {file = "lxml-5.3.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:423b121f7e6fa514ba0c7918e56955a1d4470ed35faa03e3d9f0e3baa4c7e492"}, + {file = "lxml-5.3.0-cp313-cp313-manylinux_2_28_aarch64.whl", hash = "sha256:c00f323cc00576df6165cc9d21a4c21285fa6b9989c5c39830c3903dc4303ef3"}, + {file = "lxml-5.3.0-cp313-cp313-manylinux_2_28_ppc64le.whl", hash = "sha256:1fdc9fae8dd4c763e8a31e7630afef517eab9f5d5d31a278df087f307bf601f4"}, + {file = "lxml-5.3.0-cp313-cp313-manylinux_2_28_s390x.whl", hash = "sha256:658f2aa69d31e09699705949b5fc4719cbecbd4a97f9656a232e7d6c7be1a367"}, + {file = "lxml-5.3.0-cp313-cp313-manylinux_2_28_x86_64.whl", hash = "sha256:1473427aff3d66a3fa2199004c3e601e6c4500ab86696edffdbc84954c72d832"}, + {file = "lxml-5.3.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:a87de7dd873bf9a792bf1e58b1c3887b9264036629a5bf2d2e6579fe8e73edff"}, + {file = "lxml-5.3.0-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:0d7b36afa46c97875303a94e8f3ad932bf78bace9e18e603f2085b652422edcd"}, + {file = "lxml-5.3.0-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:cf120cce539453ae086eacc0130a324e7026113510efa83ab42ef3fcfccac7fb"}, + {file = "lxml-5.3.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:df5c7333167b9674aa8ae1d4008fa4bc17a313cc490b2cca27838bbdcc6bb15b"}, + {file = "lxml-5.3.0-cp313-cp313-win32.whl", hash = "sha256:c802e1c2ed9f0c06a65bc4ed0189d000ada8049312cfeab6ca635e39c9608957"}, + {file = "lxml-5.3.0-cp313-cp313-win_amd64.whl", hash = "sha256:406246b96d552e0503e17a1006fd27edac678b3fcc9f1be71a2f94b4ff61528d"}, + {file = "lxml-5.3.0-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:8f0de2d390af441fe8b2c12626d103540b5d850d585b18fcada58d972b74a74e"}, + {file = "lxml-5.3.0-cp36-cp36m-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1afe0a8c353746e610bd9031a630a95bcfb1a720684c3f2b36c4710a0a96528f"}, + {file = "lxml-5.3.0-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:56b9861a71575f5795bde89256e7467ece3d339c9b43141dbdd54544566b3b94"}, + {file = "lxml-5.3.0-cp36-cp36m-manylinux_2_28_x86_64.whl", hash = "sha256:9fb81d2824dff4f2e297a276297e9031f46d2682cafc484f49de182aa5e5df99"}, + {file = "lxml-5.3.0-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:2c226a06ecb8cdef28845ae976da407917542c5e6e75dcac7cc33eb04aaeb237"}, + {file = "lxml-5.3.0-cp36-cp36m-musllinux_1_2_x86_64.whl", hash = "sha256:7d3d1ca42870cdb6d0d29939630dbe48fa511c203724820fc0fd507b2fb46577"}, + {file = "lxml-5.3.0-cp36-cp36m-win32.whl", hash = "sha256:094cb601ba9f55296774c2d57ad68730daa0b13dc260e1f941b4d13678239e70"}, + {file = "lxml-5.3.0-cp36-cp36m-win_amd64.whl", hash = "sha256:eafa2c8658f4e560b098fe9fc54539f86528651f61849b22111a9b107d18910c"}, + {file = "lxml-5.3.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:cb83f8a875b3d9b458cada4f880fa498646874ba4011dc974e071a0a84a1b033"}, + {file = "lxml-5.3.0-cp37-cp37m-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:25f1b69d41656b05885aa185f5fdf822cb01a586d1b32739633679699f220391"}, + {file = "lxml-5.3.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:23e0553b8055600b3bf4a00b255ec5c92e1e4aebf8c2c09334f8368e8bd174d6"}, + {file = "lxml-5.3.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9ada35dd21dc6c039259596b358caab6b13f4db4d4a7f8665764d616daf9cc1d"}, + {file = "lxml-5.3.0-cp37-cp37m-manylinux_2_28_aarch64.whl", hash = "sha256:81b4e48da4c69313192d8c8d4311e5d818b8be1afe68ee20f6385d0e96fc9512"}, + {file = "lxml-5.3.0-cp37-cp37m-manylinux_2_28_x86_64.whl", hash = "sha256:2bc9fd5ca4729af796f9f59cd8ff160fe06a474da40aca03fcc79655ddee1a8b"}, + {file = "lxml-5.3.0-cp37-cp37m-musllinux_1_2_aarch64.whl", hash = "sha256:07da23d7ee08577760f0a71d67a861019103e4812c87e2fab26b039054594cc5"}, + {file = "lxml-5.3.0-cp37-cp37m-musllinux_1_2_x86_64.whl", hash = "sha256:ea2e2f6f801696ad7de8aec061044d6c8c0dd4037608c7cab38a9a4d316bfb11"}, + {file = "lxml-5.3.0-cp37-cp37m-win32.whl", hash = "sha256:5c54afdcbb0182d06836cc3d1be921e540be3ebdf8b8a51ee3ef987537455f84"}, + {file = "lxml-5.3.0-cp37-cp37m-win_amd64.whl", hash = "sha256:f2901429da1e645ce548bf9171784c0f74f0718c3f6150ce166be39e4dd66c3e"}, + {file = "lxml-5.3.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:c56a1d43b2f9ee4786e4658c7903f05da35b923fb53c11025712562d5cc02753"}, + {file = "lxml-5.3.0-cp38-cp38-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6ee8c39582d2652dcd516d1b879451500f8db3fe3607ce45d7c5957ab2596040"}, + {file = "lxml-5.3.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0fdf3a3059611f7585a78ee10399a15566356116a4288380921a4b598d807a22"}, + {file = "lxml-5.3.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:146173654d79eb1fc97498b4280c1d3e1e5d58c398fa530905c9ea50ea849b22"}, + {file = "lxml-5.3.0-cp38-cp38-manylinux_2_28_aarch64.whl", hash = "sha256:0a7056921edbdd7560746f4221dca89bb7a3fe457d3d74267995253f46343f15"}, + {file = "lxml-5.3.0-cp38-cp38-manylinux_2_28_x86_64.whl", hash = "sha256:9e4b47ac0f5e749cfc618efdf4726269441014ae1d5583e047b452a32e221920"}, + {file = "lxml-5.3.0-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:f914c03e6a31deb632e2daa881fe198461f4d06e57ac3d0e05bbcab8eae01945"}, + {file = "lxml-5.3.0-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:213261f168c5e1d9b7535a67e68b1f59f92398dd17a56d934550837143f79c42"}, + {file = "lxml-5.3.0-cp38-cp38-win32.whl", hash = "sha256:218c1b2e17a710e363855594230f44060e2025b05c80d1f0661258142b2add2e"}, + {file = "lxml-5.3.0-cp38-cp38-win_amd64.whl", hash = "sha256:315f9542011b2c4e1d280e4a20ddcca1761993dda3afc7a73b01235f8641e903"}, + {file = "lxml-5.3.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:1ffc23010330c2ab67fac02781df60998ca8fe759e8efde6f8b756a20599c5de"}, + {file = "lxml-5.3.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:2b3778cb38212f52fac9fe913017deea2fdf4eb1a4f8e4cfc6b009a13a6d3fcc"}, + {file = "lxml-5.3.0-cp39-cp39-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4b0c7a688944891086ba192e21c5229dea54382f4836a209ff8d0a660fac06be"}, + {file = "lxml-5.3.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:747a3d3e98e24597981ca0be0fd922aebd471fa99d0043a3842d00cdcad7ad6a"}, + {file = "lxml-5.3.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:86a6b24b19eaebc448dc56b87c4865527855145d851f9fc3891673ff97950540"}, + {file = "lxml-5.3.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b11a5d918a6216e521c715b02749240fb07ae5a1fefd4b7bf12f833bc8b4fe70"}, + {file = "lxml-5.3.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:68b87753c784d6acb8a25b05cb526c3406913c9d988d51f80adecc2b0775d6aa"}, + {file = "lxml-5.3.0-cp39-cp39-manylinux_2_28_aarch64.whl", hash = "sha256:109fa6fede314cc50eed29e6e56c540075e63d922455346f11e4d7a036d2b8cf"}, + {file = "lxml-5.3.0-cp39-cp39-manylinux_2_28_ppc64le.whl", hash = "sha256:02ced472497b8362c8e902ade23e3300479f4f43e45f4105c85ef43b8db85229"}, + {file = "lxml-5.3.0-cp39-cp39-manylinux_2_28_s390x.whl", hash = "sha256:6b038cc86b285e4f9fea2ba5ee76e89f21ed1ea898e287dc277a25884f3a7dfe"}, + {file = "lxml-5.3.0-cp39-cp39-manylinux_2_28_x86_64.whl", hash = "sha256:7437237c6a66b7ca341e868cda48be24b8701862757426852c9b3186de1da8a2"}, + {file = "lxml-5.3.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:7f41026c1d64043a36fda21d64c5026762d53a77043e73e94b71f0521939cc71"}, + {file = "lxml-5.3.0-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:482c2f67761868f0108b1743098640fbb2a28a8e15bf3f47ada9fa59d9fe08c3"}, + {file = "lxml-5.3.0-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:1483fd3358963cc5c1c9b122c80606a3a79ee0875bcac0204149fa09d6ff2727"}, + {file = "lxml-5.3.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:2dec2d1130a9cda5b904696cec33b2cfb451304ba9081eeda7f90f724097300a"}, + {file = "lxml-5.3.0-cp39-cp39-win32.whl", hash = "sha256:a0eabd0a81625049c5df745209dc7fcef6e2aea7793e5f003ba363610aa0a3ff"}, + {file = "lxml-5.3.0-cp39-cp39-win_amd64.whl", hash = "sha256:89e043f1d9d341c52bf2af6d02e6adde62e0a46e6755d5eb60dc6e4f0b8aeca2"}, + {file = "lxml-5.3.0-pp310-pypy310_pp73-macosx_10_15_x86_64.whl", hash = "sha256:7b1cd427cb0d5f7393c31b7496419da594fe600e6fdc4b105a54f82405e6626c"}, + {file = "lxml-5.3.0-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:51806cfe0279e06ed8500ce19479d757db42a30fd509940b1701be9c86a5ff9a"}, + {file = "lxml-5.3.0-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ee70d08fd60c9565ba8190f41a46a54096afa0eeb8f76bd66f2c25d3b1b83005"}, + {file = "lxml-5.3.0-pp310-pypy310_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:8dc2c0395bea8254d8daebc76dcf8eb3a95ec2a46fa6fae5eaccee366bfe02ce"}, + {file = "lxml-5.3.0-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:6ba0d3dcac281aad8a0e5b14c7ed6f9fa89c8612b47939fc94f80b16e2e9bc83"}, + {file = "lxml-5.3.0-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:6e91cf736959057f7aac7adfc83481e03615a8e8dd5758aa1d95ea69e8931dba"}, + {file = "lxml-5.3.0-pp37-pypy37_pp73-macosx_10_9_x86_64.whl", hash = "sha256:94d6c3782907b5e40e21cadf94b13b0842ac421192f26b84c45f13f3c9d5dc27"}, + {file = "lxml-5.3.0-pp37-pypy37_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c300306673aa0f3ed5ed9372b21867690a17dba38c68c44b287437c362ce486b"}, + {file = "lxml-5.3.0-pp37-pypy37_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:78d9b952e07aed35fe2e1a7ad26e929595412db48535921c5013edc8aa4a35ce"}, + {file = "lxml-5.3.0-pp37-pypy37_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:01220dca0d066d1349bd6a1726856a78f7929f3878f7e2ee83c296c69495309e"}, + {file = "lxml-5.3.0-pp37-pypy37_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:2d9b8d9177afaef80c53c0a9e30fa252ff3036fb1c6494d427c066a4ce6a282f"}, + {file = "lxml-5.3.0-pp37-pypy37_pp73-win_amd64.whl", hash = "sha256:20094fc3f21ea0a8669dc4c61ed7fa8263bd37d97d93b90f28fc613371e7a875"}, + {file = "lxml-5.3.0-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:ace2c2326a319a0bb8a8b0e5b570c764962e95818de9f259ce814ee666603f19"}, + {file = "lxml-5.3.0-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:92e67a0be1639c251d21e35fe74df6bcc40cba445c2cda7c4a967656733249e2"}, + {file = "lxml-5.3.0-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dd5350b55f9fecddc51385463a4f67a5da829bc741e38cf689f38ec9023f54ab"}, + {file = "lxml-5.3.0-pp38-pypy38_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:4c1fefd7e3d00921c44dc9ca80a775af49698bbfd92ea84498e56acffd4c5469"}, + {file = "lxml-5.3.0-pp38-pypy38_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:71a8dd38fbd2f2319136d4ae855a7078c69c9a38ae06e0c17c73fd70fc6caad8"}, + {file = "lxml-5.3.0-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:97acf1e1fd66ab53dacd2c35b319d7e548380c2e9e8c54525c6e76d21b1ae3b1"}, + {file = "lxml-5.3.0-pp39-pypy39_pp73-macosx_10_15_x86_64.whl", hash = "sha256:68934b242c51eb02907c5b81d138cb977b2129a0a75a8f8b60b01cb8586c7b21"}, + {file = "lxml-5.3.0-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b710bc2b8292966b23a6a0121f7a6c51d45d2347edcc75f016ac123b8054d3f2"}, + {file = "lxml-5.3.0-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:18feb4b93302091b1541221196a2155aa296c363fd233814fa11e181adebc52f"}, + {file = "lxml-5.3.0-pp39-pypy39_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:3eb44520c4724c2e1a57c0af33a379eee41792595023f367ba3952a2d96c2aab"}, + {file = "lxml-5.3.0-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:609251a0ca4770e5a8768ff902aa02bf636339c5a93f9349b48eb1f606f7f3e9"}, + {file = "lxml-5.3.0-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:516f491c834eb320d6c843156440fe7fc0d50b33e44387fcec5b02f0bc118a4c"}, + {file = "lxml-5.3.0.tar.gz", hash = "sha256:4e109ca30d1edec1ac60cdbe341905dc3b8f55b16855e03a54aaf59e51ec8c6f"}, ] [package.dependencies] @@ -2608,17 +2736,17 @@ cssselect = ["cssselect (>=0.7)"] html-clean = ["lxml-html-clean"] html5 = ["html5lib"] htmlsoup = ["BeautifulSoup4"] -source = ["Cython (>=3.0.10)"] +source = ["Cython (>=3.0.11)"] [[package]] name = "lxml-html-clean" -version = "0.1.1" +version = "0.2.0" description = "HTML cleaner from lxml project" optional = true python-versions = "*" files = [ - {file = "lxml_html_clean-0.1.1-py3-none-any.whl", hash = "sha256:58c04176593c9caf72ec92e033d2f38859e918b3eff0cc0f8051ad27dc2ab8ef"}, - {file = "lxml_html_clean-0.1.1.tar.gz", hash = "sha256:8a644ed01dbbe132fabddb9467f077f6dad12a1d4f3a6a553e280f3815fa46df"}, + {file = "lxml_html_clean-0.2.0-py3-none-any.whl", hash = "sha256:80bdc730b288b8e68f0bf86b99f4bbef129c5ec59b694c6681422be4c1eeb3c5"}, + {file = "lxml_html_clean-0.2.0.tar.gz", hash = "sha256:47c323f39d95d4cbf4956da62929c89a79313074467efaa4821013c97bf95628"}, ] [package.dependencies] @@ -2943,13 +3071,13 @@ mkdocs = ">=1.0.3" [[package]] name = "mkdocs-material" -version = "9.5.29" +version = "9.5.31" description = "Documentation that simply works" optional = false python-versions = ">=3.8" files = [ - {file = "mkdocs_material-9.5.29-py3-none-any.whl", hash = "sha256:afc1f508e2662ded95f0a35a329e8a5acd73ee88ca07ba73836eb6fcdae5d8b4"}, - {file = "mkdocs_material-9.5.29.tar.gz", hash = "sha256:3e977598ec15a4ddad5c4dfc9e08edab6023edb51e88f0729bd27be77e3d322a"}, + {file = "mkdocs_material-9.5.31-py3-none-any.whl", hash = "sha256:1b1f49066fdb3824c1e96d6bacd2d4375de4ac74580b47e79ff44c4d835c5fcb"}, + {file = "mkdocs_material-9.5.31.tar.gz", hash = "sha256:31833ec664772669f5856f4f276bf3fdf0e642a445e64491eda459249c3a1ca8"}, ] [package.dependencies] @@ -2997,23 +3125,25 @@ mkdocs = ">=1.2" [[package]] name = "mkdocstrings" -version = "0.23.0" +version = "0.25.2" description = "Automatic documentation from sources, for MkDocs." optional = false python-versions = ">=3.8" files = [ - {file = "mkdocstrings-0.23.0-py3-none-any.whl", hash = "sha256:051fa4014dfcd9ed90254ae91de2dbb4f24e166347dae7be9a997fe16316c65e"}, - {file = "mkdocstrings-0.23.0.tar.gz", hash = "sha256:d9c6a37ffbe7c14a7a54ef1258c70b8d394e6a33a1c80832bce40b9567138d1c"}, + {file = "mkdocstrings-0.25.2-py3-none-any.whl", hash = "sha256:9e2cda5e2e12db8bb98d21e3410f3f27f8faab685a24b03b06ba7daa5b92abfc"}, + {file = "mkdocstrings-0.25.2.tar.gz", hash = "sha256:5cf57ad7f61e8be3111a2458b4e49c2029c9cb35525393b179f9c916ca8042dc"}, ] [package.dependencies] +click = ">=7.0" importlib-metadata = {version = ">=4.6", markers = "python_version < \"3.10\""} Jinja2 = ">=2.11.1" Markdown = ">=3.3" MarkupSafe = ">=1.1" -mkdocs = ">=1.2" +mkdocs = ">=1.4" mkdocs-autorefs = ">=0.3.1" mkdocstrings-python = {version = ">=0.5.2", optional = true, markers = "extra == \"python\""} +platformdirs = ">=2.2.0" pymdown-extensions = ">=6.3" typing-extensions = {version = ">=4.1", markers = "python_version < \"3.10\""} @@ -3037,24 +3167,6 @@ files = [ griffe = ">=0.37" mkdocstrings = ">=0.20" -[[package]] -name = "mkl" -version = "2021.4.0" -description = "Intel® oneAPI Math Kernel Library" -optional = true -python-versions = "*" -files = [ - {file = "mkl-2021.4.0-py2.py3-none-macosx_10_15_x86_64.macosx_11_0_x86_64.whl", hash = "sha256:67460f5cd7e30e405b54d70d1ed3ca78118370b65f7327d495e9c8847705e2fb"}, - {file = "mkl-2021.4.0-py2.py3-none-manylinux1_i686.whl", hash = "sha256:636d07d90e68ccc9630c654d47ce9fdeb036bb46e2b193b3a9ac8cfea683cce5"}, - {file = "mkl-2021.4.0-py2.py3-none-manylinux1_x86_64.whl", hash = "sha256:398dbf2b0d12acaf54117a5210e8f191827f373d362d796091d161f610c1ebfb"}, - {file = "mkl-2021.4.0-py2.py3-none-win32.whl", hash = "sha256:439c640b269a5668134e3dcbcea4350459c4a8bc46469669b2d67e07e3d330e8"}, - {file = "mkl-2021.4.0-py2.py3-none-win_amd64.whl", hash = "sha256:ceef3cafce4c009dd25f65d7ad0d833a0fbadc3d8903991ec92351fe5de1e718"}, -] - -[package.dependencies] -intel-openmp = "==2021.*" -tbb = "==2021.*" - [[package]] name = "mongomock" version = "4.1.2" @@ -3072,13 +3184,13 @@ sentinels = "*" [[package]] name = "more-itertools" -version = "10.3.0" +version = "10.4.0" description = "More routines for operating on iterables, beyond itertools" optional = false python-versions = ">=3.8" files = [ - {file = "more-itertools-10.3.0.tar.gz", hash = "sha256:e5d93ef411224fbcef366a6e8ddc4c5781bc6359d43412a65dd5964e46111463"}, - {file = "more_itertools-10.3.0-py3-none-any.whl", hash = "sha256:ea6a02e24a9161e51faad17a8782b92a0df82c12c1c8886fec7f0c3fa1a1b320"}, + {file = "more-itertools-10.4.0.tar.gz", hash = "sha256:fe0e63c4ab068eac62410ab05cccca2dc71ec44ba8ef29916a0090df061cf923"}, + {file = "more_itertools-10.4.0-py3-none-any.whl", hash = "sha256:0f7d9f83a0a8dcfa8a2694a770590d98a67ea943e3d9f5298309a484758c4e27"}, ] [[package]] @@ -3246,13 +3358,13 @@ files = [ [[package]] name = "mypy-boto3-bedrock" -version = "1.34.143" -description = "Type annotations for boto3.Bedrock 1.34.143 service generated with mypy-boto3-builder 7.25.0" +version = "1.34.152" +description = "Type annotations for boto3.Bedrock 1.34.152 service generated with mypy-boto3-builder 7.25.0" optional = false python-versions = ">=3.8" files = [ - {file = "mypy_boto3_bedrock-1.34.143-py3-none-any.whl", hash = "sha256:5cab5de36736aa8a452f9aeb02ef51548814f1b1e3dfd46d4479d7dc94755c43"}, - {file = "mypy_boto3_bedrock-1.34.143.tar.gz", hash = "sha256:df2e02860c64f8b8df90daed08d4f36817f36e830878cb69c1ec3dbc6ed35cf6"}, + {file = "mypy_boto3_bedrock-1.34.152-py3-none-any.whl", hash = "sha256:487959d0b9e753d7c62edbad5b62fb43f426724cd42b8353c98e35536b2c34d2"}, + {file = "mypy_boto3_bedrock-1.34.152.tar.gz", hash = "sha256:dd69fdec631887c68cd4066dfe2e175894ff93ee267ce9ac1fbe85790370bc3e"}, ] [package.dependencies] @@ -3260,13 +3372,13 @@ typing-extensions = {version = ">=4.1.0", markers = "python_version < \"3.12\""} [[package]] name = "mypy-boto3-iam" -version = "1.34.83" -description = "Type annotations for boto3.IAM 1.34.83 service generated with mypy-boto3-builder 7.23.2" +version = "1.34.152" +description = "Type annotations for boto3.IAM 1.34.152 service generated with mypy-boto3-builder 7.25.0" optional = false python-versions = ">=3.8" files = [ - {file = "mypy-boto3-iam-1.34.83.tar.gz", hash = "sha256:7261315616757ebf7509df0e9b091d5942e470eb51c4b23c662a06873a9a8eca"}, - {file = "mypy_boto3_iam-1.34.83-py3-none-any.whl", hash = "sha256:dec66a98e29ec1e36178c24b8ff57aab6b91230df97557363bbd90ec06874768"}, + {file = "mypy_boto3_iam-1.34.152-py3-none-any.whl", hash = "sha256:2c97e2b05f8e2839921d57114b6a2fc6a990b84462fba3c72a04ab1e382cb0f9"}, + {file = "mypy_boto3_iam-1.34.152.tar.gz", hash = "sha256:e2a6094b53f5043b972765d24d86fce228ae224780b3e3b2a441f5ad8967e279"}, ] [package.dependencies] @@ -3288,13 +3400,13 @@ typing-extensions = {version = ">=4.1.0", markers = "python_version < \"3.12\""} [[package]] name = "mypy-boto3-s3" -version = "1.34.138" -description = "Type annotations for boto3.S3 1.34.138 service generated with mypy-boto3-builder 7.25.0" +version = "1.34.160" +description = "Type annotations for boto3.S3 1.34.160 service generated with mypy-boto3-builder 7.26.0" optional = false python-versions = ">=3.8" files = [ - {file = "mypy_boto3_s3-1.34.138-py3-none-any.whl", hash = "sha256:47ded5f06accc10ff9db9d55c85cca88e4f028ec360d7cfcea90377e525cba56"}, - {file = "mypy_boto3_s3-1.34.138.tar.gz", hash = "sha256:7f9770d1f0e9f6fc2ced96daf5c0792b2dbbb4a4f874f28200ff3c940d0815c3"}, + {file = "mypy_boto3_s3-1.34.160-py3-none-any.whl", hash = "sha256:ef6513c39cb97462b1f335dc112b48f612f3d9cfa474b5ce1be8941f28116082"}, + {file = "mypy_boto3_s3-1.34.160.tar.gz", hash = "sha256:19a6c09a634af79feb2f4aeb55b1bdefc41f7e8905572a035c9b5722cbc3f9f2"}, ] [package.dependencies] @@ -3302,13 +3414,13 @@ typing-extensions = {version = ">=4.1.0", markers = "python_version < \"3.12\""} [[package]] name = "mypy-boto3-sagemaker" -version = "1.34.145" -description = "Type annotations for boto3.SageMaker 1.34.145 service generated with mypy-boto3-builder 7.25.0" +version = "1.34.159" +description = "Type annotations for boto3.SageMaker 1.34.159 service generated with mypy-boto3-builder 7.25.3" optional = false python-versions = ">=3.8" files = [ - {file = "mypy_boto3_sagemaker-1.34.145-py3-none-any.whl", hash = "sha256:af9be506bc8788870878f4b332cfd8c2ec27cc2e133977fb7df1df9ac00ae78d"}, - {file = "mypy_boto3_sagemaker-1.34.145.tar.gz", hash = "sha256:ee2317d9297d1c28479a7c7362421a784c1aabf61c6aad3488140db20254ba85"}, + {file = "mypy_boto3_sagemaker-1.34.159-py3-none-any.whl", hash = "sha256:3a2f0582507d9f98d16d1ac411348833e063341b87204e94e7e89ec2ffbab1ba"}, + {file = "mypy_boto3_sagemaker-1.34.159.tar.gz", hash = "sha256:3c93353239170594947b8e912a4a7461a52f0bbba01432c308173cb3bff11c8b"}, ] [package.dependencies] @@ -3475,12 +3587,13 @@ files = [ [[package]] name = "nvidia-cudnn-cu12" -version = "8.9.2.26" +version = "9.1.0.70" description = "cuDNN runtime libraries" optional = true python-versions = ">=3" files = [ - {file = "nvidia_cudnn_cu12-8.9.2.26-py3-none-manylinux1_x86_64.whl", hash = "sha256:5ccb288774fdfb07a7e7025ffec286971c06d8d7b4fb162525334616d7629ff9"}, + {file = "nvidia_cudnn_cu12-9.1.0.70-py3-none-manylinux2014_x86_64.whl", hash = "sha256:165764f44ef8c61fcdfdfdbe769d687e06374059fbb388b6c89ecb0e28793a6f"}, + {file = "nvidia_cudnn_cu12-9.1.0.70-py3-none-win_amd64.whl", hash = "sha256:6278562929433d68365a07a4a1546c237ba2849852c0d4b2262a486e805b977a"}, ] [package.dependencies] @@ -3551,13 +3664,13 @@ files = [ [[package]] name = "nvidia-nvjitlink-cu12" -version = "12.5.82" +version = "12.6.20" description = "Nvidia JIT LTO Library" optional = true python-versions = ">=3" files = [ - {file = "nvidia_nvjitlink_cu12-12.5.82-py3-none-manylinux2014_x86_64.whl", hash = "sha256:f9b37bc5c8cf7509665cb6ada5aaa0ce65618f2332b7d3e78e9790511f111212"}, - {file = "nvidia_nvjitlink_cu12-12.5.82-py3-none-win_amd64.whl", hash = "sha256:e782564d705ff0bf61ac3e1bf730166da66dd2fe9012f111ede5fc49b64ae697"}, + {file = "nvidia_nvjitlink_cu12-12.6.20-py3-none-manylinux2014_x86_64.whl", hash = "sha256:562ab97ea2c23164823b2a89cb328d01d45cb99634b8c65fe7cd60d14562bd79"}, + {file = "nvidia_nvjitlink_cu12-12.6.20-py3-none-win_amd64.whl", hash = "sha256:ed3c43a17f37b0c922a919203d2d36cbef24d41cc3e6b625182f8b58203644f6"}, ] [[package]] @@ -3573,13 +3686,13 @@ files = [ [[package]] name = "ollama" -version = "0.3.0" +version = "0.3.1" description = "The official Python client for Ollama." optional = true python-versions = "<4.0,>=3.8" files = [ - {file = "ollama-0.3.0-py3-none-any.whl", hash = "sha256:cd7010c4e2a37d7f08f36cd35c4592b14f1ec0d1bf3df10342cd47963d81ad7a"}, - {file = "ollama-0.3.0.tar.gz", hash = "sha256:6ff493a2945ba76cdd6b7912a1cd79a45cfd9ba9120d14adeb63b2b5a7f353da"}, + {file = "ollama-0.3.1-py3-none-any.whl", hash = "sha256:db50034c73d6350349bdfba19c3f0d54a3cea73eb97b35f9d7419b2fc7206454"}, + {file = "ollama-0.3.1.tar.gz", hash = "sha256:032572fb494a4fba200c65013fe937a65382c846b5f358d9e8918ecbc9ac44b5"}, ] [package.dependencies] @@ -3587,23 +3700,24 @@ httpx = ">=0.27.0,<0.28.0" [[package]] name = "openai" -version = "1.37.0" +version = "1.40.6" description = "The official Python library for the openai API" optional = false python-versions = ">=3.7.1" files = [ - {file = "openai-1.37.0-py3-none-any.whl", hash = "sha256:a903245c0ecf622f2830024acdaa78683c70abb8e9d37a497b851670864c9f73"}, - {file = "openai-1.37.0.tar.gz", hash = "sha256:dc8197fc40ab9d431777b6620d962cc49f4544ffc3011f03ce0a805e6eb54adb"}, + {file = "openai-1.40.6-py3-none-any.whl", hash = "sha256:b36372124a779381a420a34dd96f762baa748b6bdfaf83a6b9f2745f72ccc1c5"}, + {file = "openai-1.40.6.tar.gz", hash = "sha256:2239232bcb7f4bd4ce8e02544b5769618582411cf399816d96686d1b6c1e5c8d"}, ] [package.dependencies] anyio = ">=3.5.0,<5" distro = ">=1.7.0,<2" httpx = ">=0.23.0,<1" +jiter = ">=0.4.0,<1" pydantic = ">=1.9.0,<3" sniffio = "*" tqdm = ">4" -typing-extensions = ">=4.7,<5" +typing-extensions = ">=4.11,<5" [package.extras] datalib = ["numpy (>=1)", "pandas (>=1.2.3)", "pandas-stubs (>=1.1.0.11)"] @@ -3638,51 +3752,51 @@ kerberos = ["requests-kerberos"] [[package]] name = "opentelemetry-api" -version = "1.25.0" +version = "1.26.0" description = "OpenTelemetry Python API" optional = true python-versions = ">=3.8" files = [ - {file = "opentelemetry_api-1.25.0-py3-none-any.whl", hash = "sha256:757fa1aa020a0f8fa139f8959e53dec2051cc26b832e76fa839a6d76ecefd737"}, - {file = "opentelemetry_api-1.25.0.tar.gz", hash = "sha256:77c4985f62f2614e42ce77ee4c9da5fa5f0bc1e1821085e9a47533a9323ae869"}, + {file = "opentelemetry_api-1.26.0-py3-none-any.whl", hash = "sha256:7d7ea33adf2ceda2dd680b18b1677e4152000b37ca76e679da71ff103b943064"}, + {file = "opentelemetry_api-1.26.0.tar.gz", hash = "sha256:2bd639e4bed5b18486fef0b5a520aaffde5a18fc225e808a1ac4df363f43a1ce"}, ] [package.dependencies] deprecated = ">=1.2.6" -importlib-metadata = ">=6.0,<=7.1" +importlib-metadata = ">=6.0,<=8.0.0" [[package]] name = "opentelemetry-exporter-otlp-proto-common" -version = "1.25.0" +version = "1.26.0" description = "OpenTelemetry Protobuf encoding" optional = true python-versions = ">=3.8" files = [ - {file = "opentelemetry_exporter_otlp_proto_common-1.25.0-py3-none-any.whl", hash = "sha256:15637b7d580c2675f70246563363775b4e6de947871e01d0f4e3881d1848d693"}, - {file = "opentelemetry_exporter_otlp_proto_common-1.25.0.tar.gz", hash = "sha256:c93f4e30da4eee02bacd1e004eb82ce4da143a2f8e15b987a9f603e0a85407d3"}, + {file = "opentelemetry_exporter_otlp_proto_common-1.26.0-py3-none-any.whl", hash = "sha256:ee4d8f8891a1b9c372abf8d109409e5b81947cf66423fd998e56880057afbc71"}, + {file = "opentelemetry_exporter_otlp_proto_common-1.26.0.tar.gz", hash = "sha256:bdbe50e2e22a1c71acaa0c8ba6efaadd58882e5a5978737a44a4c4b10d304c92"}, ] [package.dependencies] -opentelemetry-proto = "1.25.0" +opentelemetry-proto = "1.26.0" [[package]] name = "opentelemetry-exporter-otlp-proto-http" -version = "1.25.0" +version = "1.26.0" description = "OpenTelemetry Collector Protobuf over HTTP Exporter" optional = true python-versions = ">=3.8" files = [ - {file = "opentelemetry_exporter_otlp_proto_http-1.25.0-py3-none-any.whl", hash = "sha256:2eca686ee11b27acd28198b3ea5e5863a53d1266b91cda47c839d95d5e0541a6"}, - {file = "opentelemetry_exporter_otlp_proto_http-1.25.0.tar.gz", hash = "sha256:9f8723859e37c75183ea7afa73a3542f01d0fd274a5b97487ea24cb683d7d684"}, + {file = "opentelemetry_exporter_otlp_proto_http-1.26.0-py3-none-any.whl", hash = "sha256:ee72a87c48ec977421b02f16c52ea8d884122470e0be573905237b540f4ee562"}, + {file = "opentelemetry_exporter_otlp_proto_http-1.26.0.tar.gz", hash = "sha256:5801ebbcf7b527377883e6cbbdda35ee712dc55114fff1e93dfee210be56c908"}, ] [package.dependencies] deprecated = ">=1.2.6" googleapis-common-protos = ">=1.52,<2.0" opentelemetry-api = ">=1.15,<2.0" -opentelemetry-exporter-otlp-proto-common = "1.25.0" -opentelemetry-proto = "1.25.0" -opentelemetry-sdk = ">=1.25.0,<1.26.0" +opentelemetry-exporter-otlp-proto-common = "1.26.0" +opentelemetry-proto = "1.26.0" +opentelemetry-sdk = ">=1.26.0,<1.27.0" requests = ">=2.7,<3.0" [[package]] @@ -3719,13 +3833,13 @@ wrapt = ">=1.0.0,<2.0.0" [[package]] name = "opentelemetry-proto" -version = "1.25.0" +version = "1.26.0" description = "OpenTelemetry Python Proto" optional = true python-versions = ">=3.8" files = [ - {file = "opentelemetry_proto-1.25.0-py3-none-any.whl", hash = "sha256:f07e3341c78d835d9b86665903b199893befa5e98866f63d22b00d0b7ca4972f"}, - {file = "opentelemetry_proto-1.25.0.tar.gz", hash = "sha256:35b6ef9dc4a9f7853ecc5006738ad40443701e52c26099e197895cbda8b815a3"}, + {file = "opentelemetry_proto-1.26.0-py3-none-any.whl", hash = "sha256:6c4d7b4d4d9c88543bcf8c28ae3f8f0448a753dc291c18c5390444c90b76a725"}, + {file = "opentelemetry_proto-1.26.0.tar.gz", hash = "sha256:c5c18796c0cab3751fc3b98dee53855835e90c0422924b484432ac852d93dc1e"}, ] [package.dependencies] @@ -3733,33 +3847,34 @@ protobuf = ">=3.19,<5.0" [[package]] name = "opentelemetry-sdk" -version = "1.25.0" +version = "1.26.0" description = "OpenTelemetry Python SDK" optional = true python-versions = ">=3.8" files = [ - {file = "opentelemetry_sdk-1.25.0-py3-none-any.whl", hash = "sha256:d97ff7ec4b351692e9d5a15af570c693b8715ad78b8aafbec5c7100fe966b4c9"}, - {file = "opentelemetry_sdk-1.25.0.tar.gz", hash = "sha256:ce7fc319c57707ef5bf8b74fb9f8ebdb8bfafbe11898410e0d2a761d08a98ec7"}, + {file = "opentelemetry_sdk-1.26.0-py3-none-any.whl", hash = "sha256:feb5056a84a88670c041ea0ded9921fca559efec03905dddeb3885525e0af897"}, + {file = "opentelemetry_sdk-1.26.0.tar.gz", hash = "sha256:c90d2868f8805619535c05562d699e2f4fb1f00dbd55a86dcefca4da6fa02f85"}, ] [package.dependencies] -opentelemetry-api = "1.25.0" -opentelemetry-semantic-conventions = "0.46b0" +opentelemetry-api = "1.26.0" +opentelemetry-semantic-conventions = "0.47b0" typing-extensions = ">=3.7.4" [[package]] name = "opentelemetry-semantic-conventions" -version = "0.46b0" +version = "0.47b0" description = "OpenTelemetry Semantic Conventions" optional = true python-versions = ">=3.8" files = [ - {file = "opentelemetry_semantic_conventions-0.46b0-py3-none-any.whl", hash = "sha256:6daef4ef9fa51d51855d9f8e0ccd3a1bd59e0e545abe99ac6203804e36ab3e07"}, - {file = "opentelemetry_semantic_conventions-0.46b0.tar.gz", hash = "sha256:fbc982ecbb6a6e90869b15c1673be90bd18c8a56ff1cffc0864e38e2edffaefa"}, + {file = "opentelemetry_semantic_conventions-0.47b0-py3-none-any.whl", hash = "sha256:4ff9d595b85a59c1c1413f02bba320ce7ea6bf9e2ead2b0913c4395c7bbc1063"}, + {file = "opentelemetry_semantic_conventions-0.47b0.tar.gz", hash = "sha256:a8d57999bbe3495ffd4d510de26a97dadc1dace53e0275001b2c1b2f67992a7e"}, ] [package.dependencies] -opentelemetry-api = "1.25.0" +deprecated = ">=1.2.6" +opentelemetry-api = "1.26.0" [[package]] name = "packaging" @@ -4020,18 +4135,18 @@ type = ["mypy (>=1.8)"] [[package]] name = "playwright" -version = "1.45.0" +version = "1.46.0" description = "A high-level API to automate web browsers" optional = true python-versions = ">=3.8" files = [ - {file = "playwright-1.45.0-py3-none-macosx_10_13_x86_64.whl", hash = "sha256:7d49aee5907d8e72060f04bc299cb6851c2dc44cb227540ade89d7aa529e907a"}, - {file = "playwright-1.45.0-py3-none-macosx_11_0_arm64.whl", hash = "sha256:210c9f848820f58b5b5ed48047748620b780ca3acc3e2b7560dafb2bfdd6d90a"}, - {file = "playwright-1.45.0-py3-none-macosx_11_0_universal2.whl", hash = "sha256:13b5398831f5499580e819ddc996633446a93bf88029e89451e51da188e16ae3"}, - {file = "playwright-1.45.0-py3-none-manylinux1_x86_64.whl", hash = "sha256:0ba5a39f25fb9b9cf1bd48678f44536a29f6d83376329de2dee1567dac220afe"}, - {file = "playwright-1.45.0-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b09fa76614ba2926d45a4c0581f710c13652d5e32290ba6a1490fbafff7f0be8"}, - {file = "playwright-1.45.0-py3-none-win32.whl", hash = "sha256:97a7d53af89af54208b69c051046b462675fcf5b93f7fbfb7c0fa7f813424ee2"}, - {file = "playwright-1.45.0-py3-none-win_amd64.whl", hash = "sha256:701db496928429aec103739e48e3110806bd5cf49456cc95b89f28e1abda71da"}, + {file = "playwright-1.46.0-py3-none-macosx_10_13_x86_64.whl", hash = "sha256:fa60b95c16f6ce954636229a6c9dd885485326bca52d5ba20d02c0bc731a2bbb"}, + {file = "playwright-1.46.0-py3-none-macosx_11_0_arm64.whl", hash = "sha256:73dcfc24834f4d004bc862ed0d74b4c1406793a8164734238ad035356fddc8ac"}, + {file = "playwright-1.46.0-py3-none-macosx_11_0_universal2.whl", hash = "sha256:f5acfec1dbdc84d02dc696a17a344227e66c91413eab2036428dab405f195b82"}, + {file = "playwright-1.46.0-py3-none-manylinux1_x86_64.whl", hash = "sha256:3b418509f45879f1403d070858657a39bd0b333b23d92c37355682b671726df9"}, + {file = "playwright-1.46.0-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:23580f6a3f99757bb9779d29be37144cb9328cd9bafa178e6db5b3ab4b7faf4c"}, + {file = "playwright-1.46.0-py3-none-win32.whl", hash = "sha256:85f44dd32a23d02850f0ff4dafe51580e5199531fff5121a62489d9838707782"}, + {file = "playwright-1.46.0-py3-none-win_amd64.whl", hash = "sha256:f14a7fd7e24e954eec6ce61d787d499e41937ade811a0818e9a088aabe28ebb6"}, ] [package.dependencies] @@ -4085,13 +4200,13 @@ files = [ [[package]] name = "pre-commit" -version = "3.7.1" +version = "3.8.0" description = "A framework for managing and maintaining multi-language pre-commit hooks." optional = false python-versions = ">=3.9" files = [ - {file = "pre_commit-3.7.1-py2.py3-none-any.whl", hash = "sha256:fae36fd1d7ad7d6a5a1c0b0d5adb2ed1a3bda5a21bf6c3e5372073d7a11cd4c5"}, - {file = "pre_commit-3.7.1.tar.gz", hash = "sha256:8ca3ad567bc78a4972a3f1a477e94a79d4597e8140a6e0b651c5e33899c3654a"}, + {file = "pre_commit-3.8.0-py2.py3-none-any.whl", hash = "sha256:9a90a53bf82fdd8778d58085faf8d83df56e40dfe18f45b19446e26bf1b3a63f"}, + {file = "pre_commit-3.8.0.tar.gz", hash = "sha256:8bb6494d4a20423842e198980c9ecf9f96607a07ea29549e180eef9ae80fe7af"}, ] [package.dependencies] @@ -4101,6 +4216,26 @@ nodeenv = ">=0.11.1" pyyaml = ">=5.1" virtualenv = ">=20.10.0" +[[package]] +name = "primp" +version = "0.6.0" +description = "HTTP client that can impersonate web browsers, mimicking their headers and `TLS/JA3/JA4/HTTP2` fingerprints" +optional = true +python-versions = ">=3.8" +files = [ + {file = "primp-0.6.0-cp38-abi3-macosx_10_12_x86_64.whl", hash = "sha256:e74173d87b232d276b0a64ce21717b993e2460677cb81d3289965a80f6675abd"}, + {file = "primp-0.6.0-cp38-abi3-macosx_11_0_arm64.whl", hash = "sha256:e7c2f0443afd3064efbe267e0515657173531bc89ee36ea21794edf08d931be9"}, + {file = "primp-0.6.0-cp38-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:6e16aaab98bddea6fef90337e170fd2c232d399fb75d36dda53dd9f956cd9eeb"}, + {file = "primp-0.6.0-cp38-abi3-manylinux_2_34_aarch64.whl", hash = "sha256:f36ff625e1531f051bdad68d4cd8832ba8c7f1dec7b430af8206562b37656a28"}, + {file = "primp-0.6.0-cp38-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:e941355ca729eb277613f9c5ee0b0f4273d3df9440a177dccc1780ad0c752858"}, + {file = "primp-0.6.0-cp38-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:74585b3fb460dd13379b36d6c7ebc1efb24e92d21696476b93c4b97a2638f4ac"}, + {file = "primp-0.6.0-cp38-abi3-win_amd64.whl", hash = "sha256:cf391210e28397ddbbbea7216c8fe72caa61a3a407e7faba351825ecf6ff3bec"}, + {file = "primp-0.6.0.tar.gz", hash = "sha256:25ef0a1be619a621f197ed9d9768cedde2b4f1e8cd0764b609b06aa5773b4de9"}, +] + +[package.extras] +dev = ["certifi", "pytest (>=8.1.1)"] + [[package]] name = "proto-plus" version = "1.24.0" @@ -4120,22 +4255,22 @@ testing = ["google-api-core (>=1.31.5)"] [[package]] name = "protobuf" -version = "4.25.3" +version = "4.25.4" description = "" optional = true python-versions = ">=3.8" files = [ - {file = "protobuf-4.25.3-cp310-abi3-win32.whl", hash = "sha256:d4198877797a83cbfe9bffa3803602bbe1625dc30d8a097365dbc762e5790faa"}, - {file = "protobuf-4.25.3-cp310-abi3-win_amd64.whl", hash = "sha256:209ba4cc916bab46f64e56b85b090607a676f66b473e6b762e6f1d9d591eb2e8"}, - {file = "protobuf-4.25.3-cp37-abi3-macosx_10_9_universal2.whl", hash = "sha256:f1279ab38ecbfae7e456a108c5c0681e4956d5b1090027c1de0f934dfdb4b35c"}, - {file = "protobuf-4.25.3-cp37-abi3-manylinux2014_aarch64.whl", hash = "sha256:e7cb0ae90dd83727f0c0718634ed56837bfeeee29a5f82a7514c03ee1364c019"}, - {file = "protobuf-4.25.3-cp37-abi3-manylinux2014_x86_64.whl", hash = "sha256:7c8daa26095f82482307bc717364e7c13f4f1c99659be82890dcfc215194554d"}, - {file = "protobuf-4.25.3-cp38-cp38-win32.whl", hash = "sha256:f4f118245c4a087776e0a8408be33cf09f6c547442c00395fbfb116fac2f8ac2"}, - {file = "protobuf-4.25.3-cp38-cp38-win_amd64.whl", hash = "sha256:c053062984e61144385022e53678fbded7aea14ebb3e0305ae3592fb219ccfa4"}, - {file = "protobuf-4.25.3-cp39-cp39-win32.whl", hash = "sha256:19b270aeaa0099f16d3ca02628546b8baefe2955bbe23224aaf856134eccf1e4"}, - {file = "protobuf-4.25.3-cp39-cp39-win_amd64.whl", hash = "sha256:e3c97a1555fd6388f857770ff8b9703083de6bf1f9274a002a332d65fbb56c8c"}, - {file = "protobuf-4.25.3-py3-none-any.whl", hash = "sha256:f0700d54bcf45424477e46a9f0944155b46fb0639d69728739c0e47bab83f2b9"}, - {file = "protobuf-4.25.3.tar.gz", hash = "sha256:25b5d0b42fd000320bd7830b349e3b696435f3b329810427a6bcce6a5492cc5c"}, + {file = "protobuf-4.25.4-cp310-abi3-win32.whl", hash = "sha256:db9fd45183e1a67722cafa5c1da3e85c6492a5383f127c86c4c4aa4845867dc4"}, + {file = "protobuf-4.25.4-cp310-abi3-win_amd64.whl", hash = "sha256:ba3d8504116a921af46499471c63a85260c1a5fc23333154a427a310e015d26d"}, + {file = "protobuf-4.25.4-cp37-abi3-macosx_10_9_universal2.whl", hash = "sha256:eecd41bfc0e4b1bd3fa7909ed93dd14dd5567b98c941d6c1ad08fdcab3d6884b"}, + {file = "protobuf-4.25.4-cp37-abi3-manylinux2014_aarch64.whl", hash = "sha256:4c8a70fdcb995dcf6c8966cfa3a29101916f7225e9afe3ced4395359955d3835"}, + {file = "protobuf-4.25.4-cp37-abi3-manylinux2014_x86_64.whl", hash = "sha256:3319e073562e2515c6ddc643eb92ce20809f5d8f10fead3332f71c63be6a7040"}, + {file = "protobuf-4.25.4-cp38-cp38-win32.whl", hash = "sha256:7e372cbbda66a63ebca18f8ffaa6948455dfecc4e9c1029312f6c2edcd86c4e1"}, + {file = "protobuf-4.25.4-cp38-cp38-win_amd64.whl", hash = "sha256:051e97ce9fa6067a4546e75cb14f90cf0232dcb3e3d508c448b8d0e4265b61c1"}, + {file = "protobuf-4.25.4-cp39-cp39-win32.whl", hash = "sha256:90bf6fd378494eb698805bbbe7afe6c5d12c8e17fca817a646cd6a1818c696ca"}, + {file = "protobuf-4.25.4-cp39-cp39-win_amd64.whl", hash = "sha256:ac79a48d6b99dfed2729ccccee547b34a1d3d63289c71cef056653a846a2240f"}, + {file = "protobuf-4.25.4-py3-none-any.whl", hash = "sha256:bfbebc1c8e4793cfd58589acfb8a1026be0003e852b9da7db5a4285bde996978"}, + {file = "protobuf-4.25.4.tar.gz", hash = "sha256:0dc4a62cc4052a036ee2204d26fe4d835c62827c855c8a03f29fe6da146b380d"}, ] [[package]] @@ -4478,30 +4613,30 @@ windows-terminal = ["colorama (>=0.4.6)"] [[package]] name = "pyjwt" -version = "2.8.0" +version = "2.9.0" description = "JSON Web Token implementation in Python" optional = true -python-versions = ">=3.7" +python-versions = ">=3.8" files = [ - {file = "PyJWT-2.8.0-py3-none-any.whl", hash = "sha256:59127c392cc44c2da5bb3192169a91f429924e17aff6534d70fdc02ab3e04320"}, - {file = "PyJWT-2.8.0.tar.gz", hash = "sha256:57e28d156e3d5c10088e0c68abb90bfac3df82b40a71bd0daa20c65ccd5c23de"}, + {file = "PyJWT-2.9.0-py3-none-any.whl", hash = "sha256:3b02fb0f44517787776cf48f2ae25d8e14f300e6d7545a4315cee571a415e850"}, + {file = "pyjwt-2.9.0.tar.gz", hash = "sha256:7e1e5b56cc735432a7369cbfa0efe50fa113ebecdc04ae6922deba8b84582d0c"}, ] [package.extras] crypto = ["cryptography (>=3.4.0)"] -dev = ["coverage[toml] (==5.0.4)", "cryptography (>=3.4.0)", "pre-commit", "pytest (>=6.0.0,<7.0.0)", "sphinx (>=4.5.0,<5.0.0)", "sphinx-rtd-theme", "zope.interface"] -docs = ["sphinx (>=4.5.0,<5.0.0)", "sphinx-rtd-theme", "zope.interface"] +dev = ["coverage[toml] (==5.0.4)", "cryptography (>=3.4.0)", "pre-commit", "pytest (>=6.0.0,<7.0.0)", "sphinx", "sphinx-rtd-theme", "zope.interface"] +docs = ["sphinx", "sphinx-rtd-theme", "zope.interface"] tests = ["coverage[toml] (==5.0.4)", "pytest (>=6.0.0,<7.0.0)"] [[package]] name = "pymdown-extensions" -version = "10.8.1" +version = "10.9" description = "Extension pack for Python Markdown." optional = false python-versions = ">=3.8" files = [ - {file = "pymdown_extensions-10.8.1-py3-none-any.whl", hash = "sha256:f938326115884f48c6059c67377c46cf631c733ef3629b6eed1349989d1b30cb"}, - {file = "pymdown_extensions-10.8.1.tar.gz", hash = "sha256:3ab1db5c9e21728dabf75192d71471f8e50f216627e9a1fa9535ecb0231b9940"}, + {file = "pymdown_extensions-10.9-py3-none-any.whl", hash = "sha256:d323f7e90d83c86113ee78f3fe62fc9dee5f56b54d912660703ea1816fed5626"}, + {file = "pymdown_extensions-10.9.tar.gz", hash = "sha256:6ff740bcd99ec4172a938970d42b96128bdc9d4b9bcad72494f29921dc69b753"}, ] [package.dependencies] @@ -4662,35 +4797,15 @@ docs = ["myst_parser", "sphinx", "sphinx_rtd_theme"] full = ["Pillow (>=8.0.0)", "PyCryptodome", "cryptography"] image = ["Pillow (>=8.0.0)"] -[[package]] -name = "pyreqwest-impersonate" -version = "0.5.2" -description = "HTTP client that can impersonate web browsers, mimicking their headers and `TLS/JA3/JA4/HTTP2` fingerprints" -optional = true -python-versions = ">=3.8" -files = [ - {file = "pyreqwest_impersonate-0.5.2-cp38-abi3-macosx_10_12_x86_64.whl", hash = "sha256:ec8c9318190020bad3f12a62aedc5faaa92078c38cf48189e98cd692b82e7c28"}, - {file = "pyreqwest_impersonate-0.5.2-cp38-abi3-macosx_11_0_arm64.whl", hash = "sha256:ca7fc399a672558de1d50b5beb5a153c7002adbe332edd7a752f1d19c35d7f7f"}, - {file = "pyreqwest_impersonate-0.5.2-cp38-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8b453ab0a7f30a279151e88bd52449a01931307986d7c2a6a92a699d3ba58b66"}, - {file = "pyreqwest_impersonate-0.5.2-cp38-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d9c78ab908cc10a195ddbfc60c18b205237233f76b4f56c28ebc1afe80210335"}, - {file = "pyreqwest_impersonate-0.5.2-cp38-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:31db4e6e491868c227a252ef3a8fa8b7accc306e741934f74ad3e6fb096e334c"}, - {file = "pyreqwest_impersonate-0.5.2-cp38-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:3ce34743e3ce2656ba262a325103b96e1d4f301be5f3dd8cb21ee4eec4c6e1b7"}, - {file = "pyreqwest_impersonate-0.5.2-cp38-abi3-win_amd64.whl", hash = "sha256:e72b39a64c12a35a1689af558d62c67f94d14310043e404a90550e39ddd4af7c"}, - {file = "pyreqwest_impersonate-0.5.2.tar.gz", hash = "sha256:50a57c4b139788606b311757ddf36efd82ea1c952decea808b4196220bd43803"}, -] - -[package.extras] -dev = ["pytest (>=8.1.1)"] - [[package]] name = "pyright" -version = "1.1.372" +version = "1.1.376" description = "Command line wrapper for pyright" optional = false python-versions = ">=3.7" files = [ - {file = "pyright-1.1.372-py3-none-any.whl", hash = "sha256:25b15fb8967740f0949fd35b963777187f0a0404c0bd753cc966ec139f3eaa0b"}, - {file = "pyright-1.1.372.tar.gz", hash = "sha256:a9f5e0daa955daaa17e3d1ef76d3623e75f8afd5e37b437d3ff84d5b38c15420"}, + {file = "pyright-1.1.376-py3-none-any.whl", hash = "sha256:0f2473b12c15c46b3207f0eec224c3cea2bdc07cd45dd4a037687cbbca0fbeff"}, + {file = "pyright-1.1.376.tar.gz", hash = "sha256:bffd63b197cd0810395bb3245c06b01f95a85ddf6bfa0e5644ed69c841e954dd"}, ] [package.dependencies] @@ -4702,13 +4817,13 @@ dev = ["twine (>=3.4.1)"] [[package]] name = "pytest" -version = "8.3.1" +version = "8.3.2" description = "pytest: simple powerful testing with Python" optional = false python-versions = ">=3.8" files = [ - {file = "pytest-8.3.1-py3-none-any.whl", hash = "sha256:e9600ccf4f563976e2c99fa02c7624ab938296551f280835ee6516df8bc4ae8c"}, - {file = "pytest-8.3.1.tar.gz", hash = "sha256:7e8e5c5abd6e93cb1cc151f23e57adc31fcf8cfd2a3ff2da63e23f732de35db6"}, + {file = "pytest-8.3.2-py3-none-any.whl", hash = "sha256:4ba08f9ae7dcf84ded419494d229b48d0903ea6407b030eaec46df5e6a73bba5"}, + {file = "pytest-8.3.2.tar.gz", hash = "sha256:c132345d12ce551242c87269de812483f5bcc87cdbb4722e48487ba194f9fdce"}, ] [package.dependencies] @@ -4860,72 +4975,75 @@ files = [ [[package]] name = "pywin32-ctypes" -version = "0.2.2" +version = "0.2.3" description = "A (partial) reimplementation of pywin32 using ctypes/cffi" optional = false python-versions = ">=3.6" files = [ - {file = "pywin32-ctypes-0.2.2.tar.gz", hash = "sha256:3426e063bdd5fd4df74a14fa3cf80a0b42845a87e1d1e81f6549f9daec593a60"}, - {file = "pywin32_ctypes-0.2.2-py3-none-any.whl", hash = "sha256:bf490a1a709baf35d688fe0ecf980ed4de11d2b3e37b51e5442587a75d9957e7"}, + {file = "pywin32-ctypes-0.2.3.tar.gz", hash = "sha256:d162dc04946d704503b2edc4d55f3dba5c1d539ead017afa00142c38b9885755"}, + {file = "pywin32_ctypes-0.2.3-py3-none-any.whl", hash = "sha256:8a1513379d709975552d202d942d9837758905c8d01eb82b8bcc30918929e7b8"}, ] [[package]] name = "pyyaml" -version = "6.0.1" +version = "6.0.2" description = "YAML parser and emitter for Python" optional = false -python-versions = ">=3.6" +python-versions = ">=3.8" files = [ - {file = "PyYAML-6.0.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:d858aa552c999bc8a8d57426ed01e40bef403cd8ccdd0fc5f6f04a00414cac2a"}, - {file = "PyYAML-6.0.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:fd66fc5d0da6d9815ba2cebeb4205f95818ff4b79c3ebe268e75d961704af52f"}, - {file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:69b023b2b4daa7548bcfbd4aa3da05b3a74b772db9e23b982788168117739938"}, - {file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:81e0b275a9ecc9c0c0c07b4b90ba548307583c125f54d5b6946cfee6360c733d"}, - {file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ba336e390cd8e4d1739f42dfe9bb83a3cc2e80f567d8805e11b46f4a943f5515"}, - {file = "PyYAML-6.0.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:326c013efe8048858a6d312ddd31d56e468118ad4cdeda36c719bf5bb6192290"}, - {file = "PyYAML-6.0.1-cp310-cp310-win32.whl", hash = "sha256:bd4af7373a854424dabd882decdc5579653d7868b8fb26dc7d0e99f823aa5924"}, - {file = "PyYAML-6.0.1-cp310-cp310-win_amd64.whl", hash = "sha256:fd1592b3fdf65fff2ad0004b5e363300ef59ced41c2e6b3a99d4089fa8c5435d"}, - {file = "PyYAML-6.0.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:6965a7bc3cf88e5a1c3bd2e0b5c22f8d677dc88a455344035f03399034eb3007"}, - {file = "PyYAML-6.0.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:f003ed9ad21d6a4713f0a9b5a7a0a79e08dd0f221aff4525a2be4c346ee60aab"}, - {file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:42f8152b8dbc4fe7d96729ec2b99c7097d656dc1213a3229ca5383f973a5ed6d"}, - {file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:062582fca9fabdd2c8b54a3ef1c978d786e0f6b3a1510e0ac93ef59e0ddae2bc"}, - {file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d2b04aac4d386b172d5b9692e2d2da8de7bfb6c387fa4f801fbf6fb2e6ba4673"}, - {file = "PyYAML-6.0.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:e7d73685e87afe9f3b36c799222440d6cf362062f78be1013661b00c5c6f678b"}, - {file = "PyYAML-6.0.1-cp311-cp311-win32.whl", hash = "sha256:1635fd110e8d85d55237ab316b5b011de701ea0f29d07611174a1b42f1444741"}, - {file = "PyYAML-6.0.1-cp311-cp311-win_amd64.whl", hash = "sha256:bf07ee2fef7014951eeb99f56f39c9bb4af143d8aa3c21b1677805985307da34"}, - {file = "PyYAML-6.0.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:855fb52b0dc35af121542a76b9a84f8d1cd886ea97c84703eaa6d88e37a2ad28"}, - {file = "PyYAML-6.0.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:40df9b996c2b73138957fe23a16a4f0ba614f4c0efce1e9406a184b6d07fa3a9"}, - {file = "PyYAML-6.0.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6c22bec3fbe2524cde73d7ada88f6566758a8f7227bfbf93a408a9d86bcc12a0"}, - {file = "PyYAML-6.0.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:8d4e9c88387b0f5c7d5f281e55304de64cf7f9c0021a3525bd3b1c542da3b0e4"}, - {file = "PyYAML-6.0.1-cp312-cp312-win32.whl", hash = "sha256:d483d2cdf104e7c9fa60c544d92981f12ad66a457afae824d146093b8c294c54"}, - {file = "PyYAML-6.0.1-cp312-cp312-win_amd64.whl", hash = "sha256:0d3304d8c0adc42be59c5f8a4d9e3d7379e6955ad754aa9d6ab7a398b59dd1df"}, - {file = "PyYAML-6.0.1-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:50550eb667afee136e9a77d6dc71ae76a44df8b3e51e41b77f6de2932bfe0f47"}, - {file = "PyYAML-6.0.1-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1fe35611261b29bd1de0070f0b2f47cb6ff71fa6595c077e42bd0c419fa27b98"}, - {file = "PyYAML-6.0.1-cp36-cp36m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:704219a11b772aea0d8ecd7058d0082713c3562b4e271b849ad7dc4a5c90c13c"}, - {file = "PyYAML-6.0.1-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:afd7e57eddb1a54f0f1a974bc4391af8bcce0b444685d936840f125cf046d5bd"}, - {file = "PyYAML-6.0.1-cp36-cp36m-win32.whl", hash = "sha256:fca0e3a251908a499833aa292323f32437106001d436eca0e6e7833256674585"}, - {file = "PyYAML-6.0.1-cp36-cp36m-win_amd64.whl", hash = "sha256:f22ac1c3cac4dbc50079e965eba2c1058622631e526bd9afd45fedd49ba781fa"}, - {file = "PyYAML-6.0.1-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:b1275ad35a5d18c62a7220633c913e1b42d44b46ee12554e5fd39c70a243d6a3"}, - {file = "PyYAML-6.0.1-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:18aeb1bf9a78867dc38b259769503436b7c72f7a1f1f4c93ff9a17de54319b27"}, - {file = "PyYAML-6.0.1-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:596106435fa6ad000c2991a98fa58eeb8656ef2325d7e158344fb33864ed87e3"}, - {file = "PyYAML-6.0.1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:baa90d3f661d43131ca170712d903e6295d1f7a0f595074f151c0aed377c9b9c"}, - {file = "PyYAML-6.0.1-cp37-cp37m-win32.whl", hash = "sha256:9046c58c4395dff28dd494285c82ba00b546adfc7ef001486fbf0324bc174fba"}, - {file = "PyYAML-6.0.1-cp37-cp37m-win_amd64.whl", hash = "sha256:4fb147e7a67ef577a588a0e2c17b6db51dda102c71de36f8549b6816a96e1867"}, - {file = "PyYAML-6.0.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:1d4c7e777c441b20e32f52bd377e0c409713e8bb1386e1099c2415f26e479595"}, - {file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a0cd17c15d3bb3fa06978b4e8958dcdc6e0174ccea823003a106c7d4d7899ac5"}, - {file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:28c119d996beec18c05208a8bd78cbe4007878c6dd15091efb73a30e90539696"}, - {file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7e07cbde391ba96ab58e532ff4803f79c4129397514e1413a7dc761ccd755735"}, - {file = "PyYAML-6.0.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:49a183be227561de579b4a36efbb21b3eab9651dd81b1858589f796549873dd6"}, - {file = "PyYAML-6.0.1-cp38-cp38-win32.whl", hash = "sha256:184c5108a2aca3c5b3d3bf9395d50893a7ab82a38004c8f61c258d4428e80206"}, - {file = "PyYAML-6.0.1-cp38-cp38-win_amd64.whl", hash = "sha256:1e2722cc9fbb45d9b87631ac70924c11d3a401b2d7f410cc0e3bbf249f2dca62"}, - {file = "PyYAML-6.0.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:9eb6caa9a297fc2c2fb8862bc5370d0303ddba53ba97e71f08023b6cd73d16a8"}, - {file = "PyYAML-6.0.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:c8098ddcc2a85b61647b2590f825f3db38891662cfc2fc776415143f599bb859"}, - {file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5773183b6446b2c99bb77e77595dd486303b4faab2b086e7b17bc6bef28865f6"}, - {file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b786eecbdf8499b9ca1d697215862083bd6d2a99965554781d0d8d1ad31e13a0"}, - {file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bc1bf2925a1ecd43da378f4db9e4f799775d6367bdb94671027b73b393a7c42c"}, - {file = "PyYAML-6.0.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:04ac92ad1925b2cff1db0cfebffb6ffc43457495c9b3c39d3fcae417d7125dc5"}, - {file = "PyYAML-6.0.1-cp39-cp39-win32.whl", hash = "sha256:faca3bdcf85b2fc05d06ff3fbc1f83e1391b3e724afa3feba7d13eeab355484c"}, - {file = "PyYAML-6.0.1-cp39-cp39-win_amd64.whl", hash = "sha256:510c9deebc5c0225e8c96813043e62b680ba2f9c50a08d3724c7f28a747d1486"}, - {file = "PyYAML-6.0.1.tar.gz", hash = "sha256:bfdf460b1736c775f2ba9f6a92bca30bc2095067b8a9d77876d1fad6cc3b4a43"}, + {file = "PyYAML-6.0.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:0a9a2848a5b7feac301353437eb7d5957887edbf81d56e903999a75a3d743086"}, + {file = "PyYAML-6.0.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:29717114e51c84ddfba879543fb232a6ed60086602313ca38cce623c1d62cfbf"}, + {file = "PyYAML-6.0.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8824b5a04a04a047e72eea5cec3bc266db09e35de6bdfe34c9436ac5ee27d237"}, + {file = "PyYAML-6.0.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7c36280e6fb8385e520936c3cb3b8042851904eba0e58d277dca80a5cfed590b"}, + {file = "PyYAML-6.0.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ec031d5d2feb36d1d1a24380e4db6d43695f3748343d99434e6f5f9156aaa2ed"}, + {file = "PyYAML-6.0.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:936d68689298c36b53b29f23c6dbb74de12b4ac12ca6cfe0e047bedceea56180"}, + {file = "PyYAML-6.0.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:23502f431948090f597378482b4812b0caae32c22213aecf3b55325e049a6c68"}, + {file = "PyYAML-6.0.2-cp310-cp310-win32.whl", hash = "sha256:2e99c6826ffa974fe6e27cdb5ed0021786b03fc98e5ee3c5bfe1fd5015f42b99"}, + {file = "PyYAML-6.0.2-cp310-cp310-win_amd64.whl", hash = "sha256:a4d3091415f010369ae4ed1fc6b79def9416358877534caf6a0fdd2146c87a3e"}, + {file = "PyYAML-6.0.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:cc1c1159b3d456576af7a3e4d1ba7e6924cb39de8f67111c735f6fc832082774"}, + {file = "PyYAML-6.0.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:1e2120ef853f59c7419231f3bf4e7021f1b936f6ebd222406c3b60212205d2ee"}, + {file = "PyYAML-6.0.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5d225db5a45f21e78dd9358e58a98702a0302f2659a3c6cd320564b75b86f47c"}, + {file = "PyYAML-6.0.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5ac9328ec4831237bec75defaf839f7d4564be1e6b25ac710bd1a96321cc8317"}, + {file = "PyYAML-6.0.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3ad2a3decf9aaba3d29c8f537ac4b243e36bef957511b4766cb0057d32b0be85"}, + {file = "PyYAML-6.0.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:ff3824dc5261f50c9b0dfb3be22b4567a6f938ccce4587b38952d85fd9e9afe4"}, + {file = "PyYAML-6.0.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:797b4f722ffa07cc8d62053e4cff1486fa6dc094105d13fea7b1de7d8bf71c9e"}, + {file = "PyYAML-6.0.2-cp311-cp311-win32.whl", hash = "sha256:11d8f3dd2b9c1207dcaf2ee0bbbfd5991f571186ec9cc78427ba5bd32afae4b5"}, + {file = "PyYAML-6.0.2-cp311-cp311-win_amd64.whl", hash = "sha256:e10ce637b18caea04431ce14fabcf5c64a1c61ec9c56b071a4b7ca131ca52d44"}, + {file = "PyYAML-6.0.2-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:c70c95198c015b85feafc136515252a261a84561b7b1d51e3384e0655ddf25ab"}, + {file = "PyYAML-6.0.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:ce826d6ef20b1bc864f0a68340c8b3287705cae2f8b4b1d932177dcc76721725"}, + {file = "PyYAML-6.0.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1f71ea527786de97d1a0cc0eacd1defc0985dcf6b3f17bb77dcfc8c34bec4dc5"}, + {file = "PyYAML-6.0.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9b22676e8097e9e22e36d6b7bda33190d0d400f345f23d4065d48f4ca7ae0425"}, + {file = "PyYAML-6.0.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:80bab7bfc629882493af4aa31a4cfa43a4c57c83813253626916b8c7ada83476"}, + {file = "PyYAML-6.0.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:0833f8694549e586547b576dcfaba4a6b55b9e96098b36cdc7ebefe667dfed48"}, + {file = "PyYAML-6.0.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:8b9c7197f7cb2738065c481a0461e50ad02f18c78cd75775628afb4d7137fb3b"}, + {file = "PyYAML-6.0.2-cp312-cp312-win32.whl", hash = "sha256:ef6107725bd54b262d6dedcc2af448a266975032bc85ef0172c5f059da6325b4"}, + {file = "PyYAML-6.0.2-cp312-cp312-win_amd64.whl", hash = "sha256:7e7401d0de89a9a855c839bc697c079a4af81cf878373abd7dc625847d25cbd8"}, + {file = "PyYAML-6.0.2-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:efdca5630322a10774e8e98e1af481aad470dd62c3170801852d752aa7a783ba"}, + {file = "PyYAML-6.0.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:50187695423ffe49e2deacb8cd10510bc361faac997de9efef88badc3bb9e2d1"}, + {file = "PyYAML-6.0.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0ffe8360bab4910ef1b9e87fb812d8bc0a308b0d0eef8c8f44e0254ab3b07133"}, + {file = "PyYAML-6.0.2-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:17e311b6c678207928d649faa7cb0d7b4c26a0ba73d41e99c4fff6b6c3276484"}, + {file = "PyYAML-6.0.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:70b189594dbe54f75ab3a1acec5f1e3faa7e8cf2f1e08d9b561cb41b845f69d5"}, + {file = "PyYAML-6.0.2-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:41e4e3953a79407c794916fa277a82531dd93aad34e29c2a514c2c0c5fe971cc"}, + {file = "PyYAML-6.0.2-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:68ccc6023a3400877818152ad9a1033e3db8625d899c72eacb5a668902e4d652"}, + {file = "PyYAML-6.0.2-cp313-cp313-win32.whl", hash = "sha256:bc2fa7c6b47d6bc618dd7fb02ef6fdedb1090ec036abab80d4681424b84c1183"}, + {file = "PyYAML-6.0.2-cp313-cp313-win_amd64.whl", hash = "sha256:8388ee1976c416731879ac16da0aff3f63b286ffdd57cdeb95f3f2e085687563"}, + {file = "PyYAML-6.0.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:24471b829b3bf607e04e88d79542a9d48bb037c2267d7927a874e6c205ca7e9a"}, + {file = "PyYAML-6.0.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d7fded462629cfa4b685c5416b949ebad6cec74af5e2d42905d41e257e0869f5"}, + {file = "PyYAML-6.0.2-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d84a1718ee396f54f3a086ea0a66d8e552b2ab2017ef8b420e92edbc841c352d"}, + {file = "PyYAML-6.0.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9056c1ecd25795207ad294bcf39f2db3d845767be0ea6e6a34d856f006006083"}, + {file = "PyYAML-6.0.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:82d09873e40955485746739bcb8b4586983670466c23382c19cffecbf1fd8706"}, + {file = "PyYAML-6.0.2-cp38-cp38-win32.whl", hash = "sha256:43fa96a3ca0d6b1812e01ced1044a003533c47f6ee8aca31724f78e93ccc089a"}, + {file = "PyYAML-6.0.2-cp38-cp38-win_amd64.whl", hash = "sha256:01179a4a8559ab5de078078f37e5c1a30d76bb88519906844fd7bdea1b7729ff"}, + {file = "PyYAML-6.0.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:688ba32a1cffef67fd2e9398a2efebaea461578b0923624778664cc1c914db5d"}, + {file = "PyYAML-6.0.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:a8786accb172bd8afb8be14490a16625cbc387036876ab6ba70912730faf8e1f"}, + {file = "PyYAML-6.0.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d8e03406cac8513435335dbab54c0d385e4a49e4945d2909a581c83647ca0290"}, + {file = "PyYAML-6.0.2-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f753120cb8181e736c57ef7636e83f31b9c0d1722c516f7e86cf15b7aa57ff12"}, + {file = "PyYAML-6.0.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3b1fdb9dc17f5a7677423d508ab4f243a726dea51fa5e70992e59a7411c89d19"}, + {file = "PyYAML-6.0.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:0b69e4ce7a131fe56b7e4d770c67429700908fc0752af059838b1cfb41960e4e"}, + {file = "PyYAML-6.0.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:a9f8c2e67970f13b16084e04f134610fd1d374bf477b17ec1599185cf611d725"}, + {file = "PyYAML-6.0.2-cp39-cp39-win32.whl", hash = "sha256:6395c297d42274772abc367baaa79683958044e5d3835486c16da75d2a694631"}, + {file = "PyYAML-6.0.2-cp39-cp39-win_amd64.whl", hash = "sha256:39693e1f8320ae4f43943590b49779ffb98acb81f788220ea932a6b6c51004d8"}, + {file = "pyyaml-6.0.2.tar.gz", hash = "sha256:d584d9ec91ad65861cc08d42e834324ef890a082e591037abe114850ff7bbc3e"}, ] [[package]] @@ -4944,13 +5062,13 @@ pyyaml = "*" [[package]] name = "qdrant-client" -version = "1.10.1" +version = "1.11.0" description = "Client library for the Qdrant vector search engine" optional = true python-versions = ">=3.8" files = [ - {file = "qdrant_client-1.10.1-py3-none-any.whl", hash = "sha256:b9fb8fe50dd168d92b2998be7c6135d5a229b3a3258ad158cc69c8adf9ff1810"}, - {file = "qdrant_client-1.10.1.tar.gz", hash = "sha256:2284c8c5bb1defb0d9dbacb07d16f344972f395f4f2ed062318476a7951fd84c"}, + {file = "qdrant_client-1.11.0-py3-none-any.whl", hash = "sha256:1f574ccebb91c0bc8a620c9a41a5a010084fbc4d8c6f1cd0ab7b2eeb97336fc0"}, + {file = "qdrant_client-1.11.0.tar.gz", hash = "sha256:7c1d4d7a96cfd1ee0cde2a21c607e9df86bcca795ad8d1fd274d295ab64b8458"}, ] [package.dependencies] @@ -4966,8 +5084,8 @@ pydantic = ">=1.10.8" urllib3 = ">=1.26.14,<3" [package.extras] -fastembed = ["fastembed (==0.2.7)"] -fastembed-gpu = ["fastembed-gpu (==0.2.7)"] +fastembed = ["fastembed (==0.3.4)"] +fastembed-gpu = ["fastembed-gpu (==0.3.4)"] [[package]] name = "readme-renderer" @@ -5008,90 +5126,90 @@ ocsp = ["cryptography (>=36.0.1)", "pyopenssl (==20.0.1)", "requests (>=2.26.0)" [[package]] name = "regex" -version = "2024.5.15" +version = "2024.7.24" description = "Alternative regular expression module, to replace re." optional = false python-versions = ">=3.8" files = [ - {file = "regex-2024.5.15-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:a81e3cfbae20378d75185171587cbf756015ccb14840702944f014e0d93ea09f"}, - {file = "regex-2024.5.15-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:7b59138b219ffa8979013be7bc85bb60c6f7b7575df3d56dc1e403a438c7a3f6"}, - {file = "regex-2024.5.15-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:a0bd000c6e266927cb7a1bc39d55be95c4b4f65c5be53e659537537e019232b1"}, - {file = "regex-2024.5.15-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5eaa7ddaf517aa095fa8da0b5015c44d03da83f5bd49c87961e3c997daed0de7"}, - {file = "regex-2024.5.15-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ba68168daedb2c0bab7fd7e00ced5ba90aebf91024dea3c88ad5063c2a562cca"}, - {file = "regex-2024.5.15-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:6e8d717bca3a6e2064fc3a08df5cbe366369f4b052dcd21b7416e6d71620dca1"}, - {file = "regex-2024.5.15-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1337b7dbef9b2f71121cdbf1e97e40de33ff114801263b275aafd75303bd62b5"}, - {file = "regex-2024.5.15-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f9ebd0a36102fcad2f03696e8af4ae682793a5d30b46c647eaf280d6cfb32796"}, - {file = "regex-2024.5.15-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:9efa1a32ad3a3ea112224897cdaeb6aa00381627f567179c0314f7b65d354c62"}, - {file = "regex-2024.5.15-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:1595f2d10dff3d805e054ebdc41c124753631b6a471b976963c7b28543cf13b0"}, - {file = "regex-2024.5.15-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:b802512f3e1f480f41ab5f2cfc0e2f761f08a1f41092d6718868082fc0d27143"}, - {file = "regex-2024.5.15-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:a0981022dccabca811e8171f913de05720590c915b033b7e601f35ce4ea7019f"}, - {file = "regex-2024.5.15-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:19068a6a79cf99a19ccefa44610491e9ca02c2be3305c7760d3831d38a467a6f"}, - {file = "regex-2024.5.15-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:1b5269484f6126eee5e687785e83c6b60aad7663dafe842b34691157e5083e53"}, - {file = "regex-2024.5.15-cp310-cp310-win32.whl", hash = "sha256:ada150c5adfa8fbcbf321c30c751dc67d2f12f15bd183ffe4ec7cde351d945b3"}, - {file = "regex-2024.5.15-cp310-cp310-win_amd64.whl", hash = "sha256:ac394ff680fc46b97487941f5e6ae49a9f30ea41c6c6804832063f14b2a5a145"}, - {file = "regex-2024.5.15-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:f5b1dff3ad008dccf18e652283f5e5339d70bf8ba7c98bf848ac33db10f7bc7a"}, - {file = "regex-2024.5.15-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:c6a2b494a76983df8e3d3feea9b9ffdd558b247e60b92f877f93a1ff43d26656"}, - {file = "regex-2024.5.15-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:a32b96f15c8ab2e7d27655969a23895eb799de3665fa94349f3b2fbfd547236f"}, - {file = "regex-2024.5.15-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:10002e86e6068d9e1c91eae8295ef690f02f913c57db120b58fdd35a6bb1af35"}, - {file = "regex-2024.5.15-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ec54d5afa89c19c6dd8541a133be51ee1017a38b412b1321ccb8d6ddbeb4cf7d"}, - {file = "regex-2024.5.15-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:10e4ce0dca9ae7a66e6089bb29355d4432caed736acae36fef0fdd7879f0b0cb"}, - {file = "regex-2024.5.15-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3e507ff1e74373c4d3038195fdd2af30d297b4f0950eeda6f515ae3d84a1770f"}, - {file = "regex-2024.5.15-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d1f059a4d795e646e1c37665b9d06062c62d0e8cc3c511fe01315973a6542e40"}, - {file = "regex-2024.5.15-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:0721931ad5fe0dda45d07f9820b90b2148ccdd8e45bb9e9b42a146cb4f695649"}, - {file = "regex-2024.5.15-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:833616ddc75ad595dee848ad984d067f2f31be645d603e4d158bba656bbf516c"}, - {file = "regex-2024.5.15-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:287eb7f54fc81546346207c533ad3c2c51a8d61075127d7f6d79aaf96cdee890"}, - {file = "regex-2024.5.15-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:19dfb1c504781a136a80ecd1fff9f16dddf5bb43cec6871778c8a907a085bb3d"}, - {file = "regex-2024.5.15-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:119af6e56dce35e8dfb5222573b50c89e5508d94d55713c75126b753f834de68"}, - {file = "regex-2024.5.15-cp311-cp311-win32.whl", hash = "sha256:1c1c174d6ec38d6c8a7504087358ce9213d4332f6293a94fbf5249992ba54efa"}, - {file = "regex-2024.5.15-cp311-cp311-win_amd64.whl", hash = "sha256:9e717956dcfd656f5055cc70996ee2cc82ac5149517fc8e1b60261b907740201"}, - {file = "regex-2024.5.15-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:632b01153e5248c134007209b5c6348a544ce96c46005d8456de1d552455b014"}, - {file = "regex-2024.5.15-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:e64198f6b856d48192bf921421fdd8ad8eb35e179086e99e99f711957ffedd6e"}, - {file = "regex-2024.5.15-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:68811ab14087b2f6e0fc0c2bae9ad689ea3584cad6917fc57be6a48bbd012c49"}, - {file = "regex-2024.5.15-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f8ec0c2fea1e886a19c3bee0cd19d862b3aa75dcdfb42ebe8ed30708df64687a"}, - {file = "regex-2024.5.15-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d0c0c0003c10f54a591d220997dd27d953cd9ccc1a7294b40a4be5312be8797b"}, - {file = "regex-2024.5.15-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2431b9e263af1953c55abbd3e2efca67ca80a3de8a0437cb58e2421f8184717a"}, - {file = "regex-2024.5.15-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4a605586358893b483976cffc1723fb0f83e526e8f14c6e6614e75919d9862cf"}, - {file = "regex-2024.5.15-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:391d7f7f1e409d192dba8bcd42d3e4cf9e598f3979cdaed6ab11288da88cb9f2"}, - {file = "regex-2024.5.15-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:9ff11639a8d98969c863d4617595eb5425fd12f7c5ef6621a4b74b71ed8726d5"}, - {file = "regex-2024.5.15-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:4eee78a04e6c67e8391edd4dad3279828dd66ac4b79570ec998e2155d2e59fd5"}, - {file = "regex-2024.5.15-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:8fe45aa3f4aa57faabbc9cb46a93363edd6197cbc43523daea044e9ff2fea83e"}, - {file = "regex-2024.5.15-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:d0a3d8d6acf0c78a1fff0e210d224b821081330b8524e3e2bc5a68ef6ab5803d"}, - {file = "regex-2024.5.15-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:c486b4106066d502495b3025a0a7251bf37ea9540433940a23419461ab9f2a80"}, - {file = "regex-2024.5.15-cp312-cp312-win32.whl", hash = "sha256:c49e15eac7c149f3670b3e27f1f28a2c1ddeccd3a2812cba953e01be2ab9b5fe"}, - {file = "regex-2024.5.15-cp312-cp312-win_amd64.whl", hash = "sha256:673b5a6da4557b975c6c90198588181029c60793835ce02f497ea817ff647cb2"}, - {file = "regex-2024.5.15-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:87e2a9c29e672fc65523fb47a90d429b70ef72b901b4e4b1bd42387caf0d6835"}, - {file = "regex-2024.5.15-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:c3bea0ba8b73b71b37ac833a7f3fd53825924165da6a924aec78c13032f20850"}, - {file = "regex-2024.5.15-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:bfc4f82cabe54f1e7f206fd3d30fda143f84a63fe7d64a81558d6e5f2e5aaba9"}, - {file = "regex-2024.5.15-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e5bb9425fe881d578aeca0b2b4b3d314ec88738706f66f219c194d67179337cb"}, - {file = "regex-2024.5.15-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:64c65783e96e563103d641760664125e91bd85d8e49566ee560ded4da0d3e704"}, - {file = "regex-2024.5.15-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:cf2430df4148b08fb4324b848672514b1385ae3807651f3567871f130a728cc3"}, - {file = "regex-2024.5.15-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5397de3219a8b08ae9540c48f602996aa6b0b65d5a61683e233af8605c42b0f2"}, - {file = "regex-2024.5.15-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:455705d34b4154a80ead722f4f185b04c4237e8e8e33f265cd0798d0e44825fa"}, - {file = "regex-2024.5.15-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:b2b6f1b3bb6f640c1a92be3bbfbcb18657b125b99ecf141fb3310b5282c7d4ed"}, - {file = "regex-2024.5.15-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:3ad070b823ca5890cab606c940522d05d3d22395d432f4aaaf9d5b1653e47ced"}, - {file = "regex-2024.5.15-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:5b5467acbfc153847d5adb21e21e29847bcb5870e65c94c9206d20eb4e99a384"}, - {file = "regex-2024.5.15-cp38-cp38-musllinux_1_2_ppc64le.whl", hash = "sha256:e6662686aeb633ad65be2a42b4cb00178b3fbf7b91878f9446075c404ada552f"}, - {file = "regex-2024.5.15-cp38-cp38-musllinux_1_2_s390x.whl", hash = "sha256:2b4c884767504c0e2401babe8b5b7aea9148680d2e157fa28f01529d1f7fcf67"}, - {file = "regex-2024.5.15-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:3cd7874d57f13bf70078f1ff02b8b0aa48d5b9ed25fc48547516c6aba36f5741"}, - {file = "regex-2024.5.15-cp38-cp38-win32.whl", hash = "sha256:e4682f5ba31f475d58884045c1a97a860a007d44938c4c0895f41d64481edbc9"}, - {file = "regex-2024.5.15-cp38-cp38-win_amd64.whl", hash = "sha256:d99ceffa25ac45d150e30bd9ed14ec6039f2aad0ffa6bb87a5936f5782fc1569"}, - {file = "regex-2024.5.15-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:13cdaf31bed30a1e1c2453ef6015aa0983e1366fad2667657dbcac7b02f67133"}, - {file = "regex-2024.5.15-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:cac27dcaa821ca271855a32188aa61d12decb6fe45ffe3e722401fe61e323cd1"}, - {file = "regex-2024.5.15-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:7dbe2467273b875ea2de38ded4eba86cbcbc9a1a6d0aa11dcf7bd2e67859c435"}, - {file = "regex-2024.5.15-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:64f18a9a3513a99c4bef0e3efd4c4a5b11228b48aa80743be822b71e132ae4f5"}, - {file = "regex-2024.5.15-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d347a741ea871c2e278fde6c48f85136c96b8659b632fb57a7d1ce1872547600"}, - {file = "regex-2024.5.15-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:1878b8301ed011704aea4c806a3cadbd76f84dece1ec09cc9e4dc934cfa5d4da"}, - {file = "regex-2024.5.15-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4babf07ad476aaf7830d77000874d7611704a7fcf68c9c2ad151f5d94ae4bfc4"}, - {file = "regex-2024.5.15-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:35cb514e137cb3488bce23352af3e12fb0dbedd1ee6e60da053c69fb1b29cc6c"}, - {file = "regex-2024.5.15-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:cdd09d47c0b2efee9378679f8510ee6955d329424c659ab3c5e3a6edea696294"}, - {file = "regex-2024.5.15-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:72d7a99cd6b8f958e85fc6ca5b37c4303294954eac1376535b03c2a43eb72629"}, - {file = "regex-2024.5.15-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:a094801d379ab20c2135529948cb84d417a2169b9bdceda2a36f5f10977ebc16"}, - {file = "regex-2024.5.15-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:c0c18345010870e58238790a6779a1219b4d97bd2e77e1140e8ee5d14df071aa"}, - {file = "regex-2024.5.15-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:16093f563098448ff6b1fa68170e4acbef94e6b6a4e25e10eae8598bb1694b5d"}, - {file = "regex-2024.5.15-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:e38a7d4e8f633a33b4c7350fbd8bad3b70bf81439ac67ac38916c4a86b465456"}, - {file = "regex-2024.5.15-cp39-cp39-win32.whl", hash = "sha256:71a455a3c584a88f654b64feccc1e25876066c4f5ef26cd6dd711308aa538694"}, - {file = "regex-2024.5.15-cp39-cp39-win_amd64.whl", hash = "sha256:cab12877a9bdafde5500206d1020a584355a97884dfd388af3699e9137bf7388"}, - {file = "regex-2024.5.15.tar.gz", hash = "sha256:d3ee02d9e5f482cc8309134a91eeaacbdd2261ba111b0fef3748eeb4913e6a2c"}, + {file = "regex-2024.7.24-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:228b0d3f567fafa0633aee87f08b9276c7062da9616931382993c03808bb68ce"}, + {file = "regex-2024.7.24-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:3426de3b91d1bc73249042742f45c2148803c111d1175b283270177fdf669024"}, + {file = "regex-2024.7.24-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:f273674b445bcb6e4409bf8d1be67bc4b58e8b46fd0d560055d515b8830063cd"}, + {file = "regex-2024.7.24-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:23acc72f0f4e1a9e6e9843d6328177ae3074b4182167e34119ec7233dfeccf53"}, + {file = "regex-2024.7.24-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:65fd3d2e228cae024c411c5ccdffae4c315271eee4a8b839291f84f796b34eca"}, + {file = "regex-2024.7.24-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c414cbda77dbf13c3bc88b073a1a9f375c7b0cb5e115e15d4b73ec3a2fbc6f59"}, + {file = "regex-2024.7.24-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bf7a89eef64b5455835f5ed30254ec19bf41f7541cd94f266ab7cbd463f00c41"}, + {file = "regex-2024.7.24-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:19c65b00d42804e3fbea9708f0937d157e53429a39b7c61253ff15670ff62cb5"}, + {file = "regex-2024.7.24-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:7a5486ca56c8869070a966321d5ab416ff0f83f30e0e2da1ab48815c8d165d46"}, + {file = "regex-2024.7.24-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:6f51f9556785e5a203713f5efd9c085b4a45aecd2a42573e2b5041881b588d1f"}, + {file = "regex-2024.7.24-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:a4997716674d36a82eab3e86f8fa77080a5d8d96a389a61ea1d0e3a94a582cf7"}, + {file = "regex-2024.7.24-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:c0abb5e4e8ce71a61d9446040c1e86d4e6d23f9097275c5bd49ed978755ff0fe"}, + {file = "regex-2024.7.24-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:18300a1d78cf1290fa583cd8b7cde26ecb73e9f5916690cf9d42de569c89b1ce"}, + {file = "regex-2024.7.24-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:416c0e4f56308f34cdb18c3f59849479dde5b19febdcd6e6fa4d04b6c31c9faa"}, + {file = "regex-2024.7.24-cp310-cp310-win32.whl", hash = "sha256:fb168b5924bef397b5ba13aabd8cf5df7d3d93f10218d7b925e360d436863f66"}, + {file = "regex-2024.7.24-cp310-cp310-win_amd64.whl", hash = "sha256:6b9fc7e9cc983e75e2518496ba1afc524227c163e43d706688a6bb9eca41617e"}, + {file = "regex-2024.7.24-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:382281306e3adaaa7b8b9ebbb3ffb43358a7bbf585fa93821300a418bb975281"}, + {file = "regex-2024.7.24-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:4fdd1384619f406ad9037fe6b6eaa3de2749e2e12084abc80169e8e075377d3b"}, + {file = "regex-2024.7.24-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:3d974d24edb231446f708c455fd08f94c41c1ff4f04bcf06e5f36df5ef50b95a"}, + {file = "regex-2024.7.24-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a2ec4419a3fe6cf8a4795752596dfe0adb4aea40d3683a132bae9c30b81e8d73"}, + {file = "regex-2024.7.24-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:eb563dd3aea54c797adf513eeec819c4213d7dbfc311874eb4fd28d10f2ff0f2"}, + {file = "regex-2024.7.24-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:45104baae8b9f67569f0f1dca5e1f1ed77a54ae1cd8b0b07aba89272710db61e"}, + {file = "regex-2024.7.24-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:994448ee01864501912abf2bad9203bffc34158e80fe8bfb5b031f4f8e16da51"}, + {file = "regex-2024.7.24-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3fac296f99283ac232d8125be932c5cd7644084a30748fda013028c815ba3364"}, + {file = "regex-2024.7.24-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:7e37e809b9303ec3a179085415cb5f418ecf65ec98cdfe34f6a078b46ef823ee"}, + {file = "regex-2024.7.24-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:01b689e887f612610c869421241e075c02f2e3d1ae93a037cb14f88ab6a8934c"}, + {file = "regex-2024.7.24-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:f6442f0f0ff81775eaa5b05af8a0ffa1dda36e9cf6ec1e0d3d245e8564b684ce"}, + {file = "regex-2024.7.24-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:871e3ab2838fbcb4e0865a6e01233975df3a15e6fce93b6f99d75cacbd9862d1"}, + {file = "regex-2024.7.24-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:c918b7a1e26b4ab40409820ddccc5d49871a82329640f5005f73572d5eaa9b5e"}, + {file = "regex-2024.7.24-cp311-cp311-win32.whl", hash = "sha256:2dfbb8baf8ba2c2b9aa2807f44ed272f0913eeeba002478c4577b8d29cde215c"}, + {file = "regex-2024.7.24-cp311-cp311-win_amd64.whl", hash = "sha256:538d30cd96ed7d1416d3956f94d54e426a8daf7c14527f6e0d6d425fcb4cca52"}, + {file = "regex-2024.7.24-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:fe4ebef608553aff8deb845c7f4f1d0740ff76fa672c011cc0bacb2a00fbde86"}, + {file = "regex-2024.7.24-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:74007a5b25b7a678459f06559504f1eec2f0f17bca218c9d56f6a0a12bfffdad"}, + {file = "regex-2024.7.24-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:7df9ea48641da022c2a3c9c641650cd09f0cd15e8908bf931ad538f5ca7919c9"}, + {file = "regex-2024.7.24-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6a1141a1dcc32904c47f6846b040275c6e5de0bf73f17d7a409035d55b76f289"}, + {file = "regex-2024.7.24-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:80c811cfcb5c331237d9bad3bea2c391114588cf4131707e84d9493064d267f9"}, + {file = "regex-2024.7.24-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7214477bf9bd195894cf24005b1e7b496f46833337b5dedb7b2a6e33f66d962c"}, + {file = "regex-2024.7.24-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d55588cba7553f0b6ec33130bc3e114b355570b45785cebdc9daed8c637dd440"}, + {file = "regex-2024.7.24-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:558a57cfc32adcf19d3f791f62b5ff564922942e389e3cfdb538a23d65a6b610"}, + {file = "regex-2024.7.24-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:a512eed9dfd4117110b1881ba9a59b31433caed0c4101b361f768e7bcbaf93c5"}, + {file = "regex-2024.7.24-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:86b17ba823ea76256b1885652e3a141a99a5c4422f4a869189db328321b73799"}, + {file = "regex-2024.7.24-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:5eefee9bfe23f6df09ffb6dfb23809f4d74a78acef004aa904dc7c88b9944b05"}, + {file = "regex-2024.7.24-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:731fcd76bbdbf225e2eb85b7c38da9633ad3073822f5ab32379381e8c3c12e94"}, + {file = "regex-2024.7.24-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:eaef80eac3b4cfbdd6de53c6e108b4c534c21ae055d1dbea2de6b3b8ff3def38"}, + {file = "regex-2024.7.24-cp312-cp312-win32.whl", hash = "sha256:185e029368d6f89f36e526764cf12bf8d6f0e3a2a7737da625a76f594bdfcbfc"}, + {file = "regex-2024.7.24-cp312-cp312-win_amd64.whl", hash = "sha256:2f1baff13cc2521bea83ab2528e7a80cbe0ebb2c6f0bfad15be7da3aed443908"}, + {file = "regex-2024.7.24-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:66b4c0731a5c81921e938dcf1a88e978264e26e6ac4ec96a4d21ae0354581ae0"}, + {file = "regex-2024.7.24-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:88ecc3afd7e776967fa16c80f974cb79399ee8dc6c96423321d6f7d4b881c92b"}, + {file = "regex-2024.7.24-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:64bd50cf16bcc54b274e20235bf8edbb64184a30e1e53873ff8d444e7ac656b2"}, + {file = "regex-2024.7.24-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:eb462f0e346fcf41a901a126b50f8781e9a474d3927930f3490f38a6e73b6950"}, + {file = "regex-2024.7.24-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a82465ebbc9b1c5c50738536fdfa7cab639a261a99b469c9d4c7dcbb2b3f1e57"}, + {file = "regex-2024.7.24-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:68a8f8c046c6466ac61a36b65bb2395c74451df2ffb8458492ef49900efed293"}, + {file = "regex-2024.7.24-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dac8e84fff5d27420f3c1e879ce9929108e873667ec87e0c8eeb413a5311adfe"}, + {file = "regex-2024.7.24-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ba2537ef2163db9e6ccdbeb6f6424282ae4dea43177402152c67ef869cf3978b"}, + {file = "regex-2024.7.24-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:43affe33137fcd679bdae93fb25924979517e011f9dea99163f80b82eadc7e53"}, + {file = "regex-2024.7.24-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:c9bb87fdf2ab2370f21e4d5636e5317775e5d51ff32ebff2cf389f71b9b13750"}, + {file = "regex-2024.7.24-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:945352286a541406f99b2655c973852da7911b3f4264e010218bbc1cc73168f2"}, + {file = "regex-2024.7.24-cp38-cp38-musllinux_1_2_ppc64le.whl", hash = "sha256:8bc593dcce679206b60a538c302d03c29b18e3d862609317cb560e18b66d10cf"}, + {file = "regex-2024.7.24-cp38-cp38-musllinux_1_2_s390x.whl", hash = "sha256:3f3b6ca8eae6d6c75a6cff525c8530c60e909a71a15e1b731723233331de4169"}, + {file = "regex-2024.7.24-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:c51edc3541e11fbe83f0c4d9412ef6c79f664a3745fab261457e84465ec9d5a8"}, + {file = "regex-2024.7.24-cp38-cp38-win32.whl", hash = "sha256:d0a07763776188b4db4c9c7fb1b8c494049f84659bb387b71c73bbc07f189e96"}, + {file = "regex-2024.7.24-cp38-cp38-win_amd64.whl", hash = "sha256:8fd5afd101dcf86a270d254364e0e8dddedebe6bd1ab9d5f732f274fa00499a5"}, + {file = "regex-2024.7.24-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:0ffe3f9d430cd37d8fa5632ff6fb36d5b24818c5c986893063b4e5bdb84cdf24"}, + {file = "regex-2024.7.24-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:25419b70ba00a16abc90ee5fce061228206173231f004437730b67ac77323f0d"}, + {file = "regex-2024.7.24-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:33e2614a7ce627f0cdf2ad104797d1f68342d967de3695678c0cb84f530709f8"}, + {file = "regex-2024.7.24-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d33a0021893ede5969876052796165bab6006559ab845fd7b515a30abdd990dc"}, + {file = "regex-2024.7.24-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:04ce29e2c5fedf296b1a1b0acc1724ba93a36fb14031f3abfb7abda2806c1535"}, + {file = "regex-2024.7.24-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b16582783f44fbca6fcf46f61347340c787d7530d88b4d590a397a47583f31dd"}, + {file = "regex-2024.7.24-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:836d3cc225b3e8a943d0b02633fb2f28a66e281290302a79df0e1eaa984ff7c1"}, + {file = "regex-2024.7.24-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:438d9f0f4bc64e8dea78274caa5af971ceff0f8771e1a2333620969936ba10be"}, + {file = "regex-2024.7.24-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:973335b1624859cb0e52f96062a28aa18f3a5fc77a96e4a3d6d76e29811a0e6e"}, + {file = "regex-2024.7.24-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:c5e69fd3eb0b409432b537fe3c6f44ac089c458ab6b78dcec14478422879ec5f"}, + {file = "regex-2024.7.24-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:fbf8c2f00904eaf63ff37718eb13acf8e178cb940520e47b2f05027f5bb34ce3"}, + {file = "regex-2024.7.24-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:ae2757ace61bc4061b69af19e4689fa4416e1a04840f33b441034202b5cd02d4"}, + {file = "regex-2024.7.24-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:44fc61b99035fd9b3b9453f1713234e5a7c92a04f3577252b45feefe1b327759"}, + {file = "regex-2024.7.24-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:84c312cdf839e8b579f504afcd7b65f35d60b6285d892b19adea16355e8343c9"}, + {file = "regex-2024.7.24-cp39-cp39-win32.whl", hash = "sha256:ca5b2028c2f7af4e13fb9fc29b28d0ce767c38c7facdf64f6c2cd040413055f1"}, + {file = "regex-2024.7.24-cp39-cp39-win_amd64.whl", hash = "sha256:7c479f5ae937ec9985ecaf42e2e10631551d909f203e31308c12d703922742f9"}, + {file = "regex-2024.7.24.tar.gz", hash = "sha256:9cfd009eed1a46b27c14039ad5bbc5e71b6367c5b2e6d5f5da0ea91600817506"}, ] [[package]] @@ -5196,28 +5314,29 @@ pyasn1 = ">=0.1.3" [[package]] name = "ruff" -version = "0.4.10" +version = "0.6.0" description = "An extremely fast Python linter and code formatter, written in Rust." optional = false python-versions = ">=3.7" files = [ - {file = "ruff-0.4.10-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:5c2c4d0859305ac5a16310eec40e4e9a9dec5dcdfbe92697acd99624e8638dac"}, - {file = "ruff-0.4.10-py3-none-macosx_11_0_arm64.whl", hash = "sha256:a79489607d1495685cdd911a323a35871abfb7a95d4f98fc6f85e799227ac46e"}, - {file = "ruff-0.4.10-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b1dd1681dfa90a41b8376a61af05cc4dc5ff32c8f14f5fe20dba9ff5deb80cd6"}, - {file = "ruff-0.4.10-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:c75c53bb79d71310dc79fb69eb4902fba804a81f374bc86a9b117a8d077a1784"}, - {file = "ruff-0.4.10-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:18238c80ee3d9100d3535d8eb15a59c4a0753b45cc55f8bf38f38d6a597b9739"}, - {file = "ruff-0.4.10-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:d8f71885bce242da344989cae08e263de29752f094233f932d4f5cfb4ef36a81"}, - {file = "ruff-0.4.10-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:330421543bd3222cdfec481e8ff3460e8702ed1e58b494cf9d9e4bf90db52b9d"}, - {file = "ruff-0.4.10-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9e9b6fb3a37b772628415b00c4fc892f97954275394ed611056a4b8a2631365e"}, - {file = "ruff-0.4.10-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0f54c481b39a762d48f64d97351048e842861c6662d63ec599f67d515cb417f6"}, - {file = "ruff-0.4.10-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:67fe086b433b965c22de0b4259ddfe6fa541c95bf418499bedb9ad5fb8d1c631"}, - {file = "ruff-0.4.10-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:acfaaab59543382085f9eb51f8e87bac26bf96b164839955f244d07125a982ef"}, - {file = "ruff-0.4.10-py3-none-musllinux_1_2_i686.whl", hash = "sha256:3cea07079962b2941244191569cf3a05541477286f5cafea638cd3aa94b56815"}, - {file = "ruff-0.4.10-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:338a64ef0748f8c3a80d7f05785930f7965d71ca260904a9321d13be24b79695"}, - {file = "ruff-0.4.10-py3-none-win32.whl", hash = "sha256:ffe3cd2f89cb54561c62e5fa20e8f182c0a444934bf430515a4b422f1ab7b7ca"}, - {file = "ruff-0.4.10-py3-none-win_amd64.whl", hash = "sha256:67f67cef43c55ffc8cc59e8e0b97e9e60b4837c8f21e8ab5ffd5d66e196e25f7"}, - {file = "ruff-0.4.10-py3-none-win_arm64.whl", hash = "sha256:dd1fcee327c20addac7916ca4e2653fbbf2e8388d8a6477ce5b4e986b68ae6c0"}, - {file = "ruff-0.4.10.tar.gz", hash = "sha256:3aa4f2bc388a30d346c56524f7cacca85945ba124945fe489952aadb6b5cd804"}, + {file = "ruff-0.6.0-py3-none-linux_armv6l.whl", hash = "sha256:92dcce923e5df265781e5fc76f9a1edad52201a7aafe56e586b90988d5239013"}, + {file = "ruff-0.6.0-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:31b90ff9dc79ed476c04e957ba7e2b95c3fceb76148f2079d0d68a908d2cfae7"}, + {file = "ruff-0.6.0-py3-none-macosx_11_0_arm64.whl", hash = "sha256:6d834a9ec9f8287dd6c3297058b3a265ed6b59233db22593379ee38ebc4b9768"}, + {file = "ruff-0.6.0-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f2089267692696aba342179471831a085043f218706e642564812145df8b8d0d"}, + {file = "ruff-0.6.0-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:aa62b423ee4bbd8765f2c1dbe8f6aac203e0583993a91453dc0a449d465c84da"}, + {file = "ruff-0.6.0-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7344e1a964b16b1137ea361d6516ce4ee61a0403fa94252a1913ecc1311adcae"}, + {file = "ruff-0.6.0-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:487f3a35c3f33bf82be212ce15dc6278ea854e35573a3f809442f73bec8b2760"}, + {file = "ruff-0.6.0-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:75db409984077a793cf344d499165298a6f65449e905747ac65983b12e3e64b1"}, + {file = "ruff-0.6.0-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:84908bd603533ecf1db456d8fc2665d1f4335d722e84bc871d3bbd2d1116c272"}, + {file = "ruff-0.6.0-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0f1749a0aef3ec41ed91a0e2127a6ae97d2e2853af16dbd4f3c00d7a3af726c5"}, + {file = "ruff-0.6.0-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:016fea751e2bcfbbd2f8cb19b97b37b3fd33148e4df45b526e87096f4e17354f"}, + {file = "ruff-0.6.0-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:6ae80f141b53b2e36e230017e64f5ea2def18fac14334ffceaae1b780d70c4f7"}, + {file = "ruff-0.6.0-py3-none-musllinux_1_2_i686.whl", hash = "sha256:eaaaf33ea4b3f63fd264d6a6f4a73fa224bbfda4b438ffea59a5340f4afa2bb5"}, + {file = "ruff-0.6.0-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:7667ddd1fc688150a7ca4137140867584c63309695a30016880caf20831503a0"}, + {file = "ruff-0.6.0-py3-none-win32.whl", hash = "sha256:ae48365aae60d40865a412356f8c6f2c0be1c928591168111eaf07eaefa6bea3"}, + {file = "ruff-0.6.0-py3-none-win_amd64.whl", hash = "sha256:774032b507c96f0c803c8237ce7d2ef3934df208a09c40fa809c2931f957fe5e"}, + {file = "ruff-0.6.0-py3-none-win_arm64.whl", hash = "sha256:a5366e8c3ae6b2dc32821749b532606c42e609a99b0ae1472cf601da931a048c"}, + {file = "ruff-0.6.0.tar.gz", hash = "sha256:272a81830f68f9bd19d49eaf7fa01a5545c5a2e86f32a9935bb0e4bb9a1db5b8"}, ] [[package]] @@ -5239,111 +5358,121 @@ crt = ["botocore[crt] (>=1.33.2,<2.0a.0)"] [[package]] name = "safetensors" -version = "0.4.3" +version = "0.4.4" description = "" optional = true python-versions = ">=3.7" files = [ - {file = "safetensors-0.4.3-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:dcf5705cab159ce0130cd56057f5f3425023c407e170bca60b4868048bae64fd"}, - {file = "safetensors-0.4.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:bb4f8c5d0358a31e9a08daeebb68f5e161cdd4018855426d3f0c23bb51087055"}, - {file = "safetensors-0.4.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:70a5319ef409e7f88686a46607cbc3c428271069d8b770076feaf913664a07ac"}, - {file = "safetensors-0.4.3-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:fb9c65bd82f9ef3ce4970dc19ee86be5f6f93d032159acf35e663c6bea02b237"}, - {file = "safetensors-0.4.3-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:edb5698a7bc282089f64c96c477846950358a46ede85a1c040e0230344fdde10"}, - {file = "safetensors-0.4.3-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:efcc860be094b8d19ac61b452ec635c7acb9afa77beb218b1d7784c6d41fe8ad"}, - {file = "safetensors-0.4.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d88b33980222085dd6001ae2cad87c6068e0991d4f5ccf44975d216db3b57376"}, - {file = "safetensors-0.4.3-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:5fc6775529fb9f0ce2266edd3e5d3f10aab068e49f765e11f6f2a63b5367021d"}, - {file = "safetensors-0.4.3-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:9c6ad011c1b4e3acff058d6b090f1da8e55a332fbf84695cf3100c649cc452d1"}, - {file = "safetensors-0.4.3-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:8c496c5401c1b9c46d41a7688e8ff5b0310a3b9bae31ce0f0ae870e1ea2b8caf"}, - {file = "safetensors-0.4.3-cp310-none-win32.whl", hash = "sha256:38e2a8666178224a51cca61d3cb4c88704f696eac8f72a49a598a93bbd8a4af9"}, - {file = "safetensors-0.4.3-cp310-none-win_amd64.whl", hash = "sha256:393e6e391467d1b2b829c77e47d726f3b9b93630e6a045b1d1fca67dc78bf632"}, - {file = "safetensors-0.4.3-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:22f3b5d65e440cec0de8edaa672efa888030802e11c09b3d6203bff60ebff05a"}, - {file = "safetensors-0.4.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:7c4fa560ebd4522adddb71dcd25d09bf211b5634003f015a4b815b7647d62ebe"}, - {file = "safetensors-0.4.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e9afd5358719f1b2cf425fad638fc3c887997d6782da317096877e5b15b2ce93"}, - {file = "safetensors-0.4.3-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:d8c5093206ef4b198600ae484230402af6713dab1bd5b8e231905d754022bec7"}, - {file = "safetensors-0.4.3-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e0b2104df1579d6ba9052c0ae0e3137c9698b2d85b0645507e6fd1813b70931a"}, - {file = "safetensors-0.4.3-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8cf18888606dad030455d18f6c381720e57fc6a4170ee1966adb7ebc98d4d6a3"}, - {file = "safetensors-0.4.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0bf4f9d6323d9f86eef5567eabd88f070691cf031d4c0df27a40d3b4aaee755b"}, - {file = "safetensors-0.4.3-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:585c9ae13a205807b63bef8a37994f30c917ff800ab8a1ca9c9b5d73024f97ee"}, - {file = "safetensors-0.4.3-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:faefeb3b81bdfb4e5a55b9bbdf3d8d8753f65506e1d67d03f5c851a6c87150e9"}, - {file = "safetensors-0.4.3-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:befdf0167ad626f22f6aac6163477fcefa342224a22f11fdd05abb3995c1783c"}, - {file = "safetensors-0.4.3-cp311-none-win32.whl", hash = "sha256:a7cef55929dcbef24af3eb40bedec35d82c3c2fa46338bb13ecf3c5720af8a61"}, - {file = "safetensors-0.4.3-cp311-none-win_amd64.whl", hash = "sha256:840b7ac0eff5633e1d053cc9db12fdf56b566e9403b4950b2dc85393d9b88d67"}, - {file = "safetensors-0.4.3-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:22d21760dc6ebae42e9c058d75aa9907d9f35e38f896e3c69ba0e7b213033856"}, - {file = "safetensors-0.4.3-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:8d22c1a10dff3f64d0d68abb8298a3fd88ccff79f408a3e15b3e7f637ef5c980"}, - {file = "safetensors-0.4.3-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b1648568667f820b8c48317c7006221dc40aced1869908c187f493838a1362bc"}, - {file = "safetensors-0.4.3-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:446e9fe52c051aeab12aac63d1017e0f68a02a92a027b901c4f8e931b24e5397"}, - {file = "safetensors-0.4.3-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:fef5d70683643618244a4f5221053567ca3e77c2531e42ad48ae05fae909f542"}, - {file = "safetensors-0.4.3-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2a1f4430cc0c9d6afa01214a4b3919d0a029637df8e09675ceef1ca3f0dfa0df"}, - {file = "safetensors-0.4.3-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2d603846a8585b9432a0fd415db1d4c57c0f860eb4aea21f92559ff9902bae4d"}, - {file = "safetensors-0.4.3-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:a844cdb5d7cbc22f5f16c7e2a0271170750763c4db08381b7f696dbd2c78a361"}, - {file = "safetensors-0.4.3-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:88887f69f7a00cf02b954cdc3034ffb383b2303bc0ab481d4716e2da51ddc10e"}, - {file = "safetensors-0.4.3-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:ee463219d9ec6c2be1d331ab13a8e0cd50d2f32240a81d498266d77d07b7e71e"}, - {file = "safetensors-0.4.3-cp312-none-win32.whl", hash = "sha256:d0dd4a1db09db2dba0f94d15addc7e7cd3a7b0d393aa4c7518c39ae7374623c3"}, - {file = "safetensors-0.4.3-cp312-none-win_amd64.whl", hash = "sha256:d14d30c25897b2bf19b6fb5ff7e26cc40006ad53fd4a88244fdf26517d852dd7"}, - {file = "safetensors-0.4.3-cp37-cp37m-macosx_10_12_x86_64.whl", hash = "sha256:d1456f814655b224d4bf6e7915c51ce74e389b413be791203092b7ff78c936dd"}, - {file = "safetensors-0.4.3-cp37-cp37m-macosx_11_0_arm64.whl", hash = "sha256:455d538aa1aae4a8b279344a08136d3f16334247907b18a5c3c7fa88ef0d3c46"}, - {file = "safetensors-0.4.3-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cf476bca34e1340ee3294ef13e2c625833f83d096cfdf69a5342475602004f95"}, - {file = "safetensors-0.4.3-cp37-cp37m-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:02ef3a24face643456020536591fbd3c717c5abaa2737ec428ccbbc86dffa7a4"}, - {file = "safetensors-0.4.3-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:7de32d0d34b6623bb56ca278f90db081f85fb9c5d327e3c18fd23ac64f465768"}, - {file = "safetensors-0.4.3-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2a0deb16a1d3ea90c244ceb42d2c6c276059616be21a19ac7101aa97da448faf"}, - {file = "safetensors-0.4.3-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c59d51f182c729f47e841510b70b967b0752039f79f1de23bcdd86462a9b09ee"}, - {file = "safetensors-0.4.3-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:1f598b713cc1a4eb31d3b3203557ac308acf21c8f41104cdd74bf640c6e538e3"}, - {file = "safetensors-0.4.3-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:5757e4688f20df083e233b47de43845d1adb7e17b6cf7da5f8444416fc53828d"}, - {file = "safetensors-0.4.3-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:fe746d03ed8d193674a26105e4f0fe6c726f5bb602ffc695b409eaf02f04763d"}, - {file = "safetensors-0.4.3-cp37-none-win32.whl", hash = "sha256:0d5ffc6a80f715c30af253e0e288ad1cd97a3d0086c9c87995e5093ebc075e50"}, - {file = "safetensors-0.4.3-cp37-none-win_amd64.whl", hash = "sha256:a11c374eb63a9c16c5ed146457241182f310902bd2a9c18255781bb832b6748b"}, - {file = "safetensors-0.4.3-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:b1e31be7945f66be23f4ec1682bb47faa3df34cb89fc68527de6554d3c4258a4"}, - {file = "safetensors-0.4.3-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:03a4447c784917c9bf01d8f2ac5080bc15c41692202cd5f406afba16629e84d6"}, - {file = "safetensors-0.4.3-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d244bcafeb1bc06d47cfee71727e775bca88a8efda77a13e7306aae3813fa7e4"}, - {file = "safetensors-0.4.3-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:53c4879b9c6bd7cd25d114ee0ef95420e2812e676314300624594940a8d6a91f"}, - {file = "safetensors-0.4.3-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:74707624b81f1b7f2b93f5619d4a9f00934d5948005a03f2c1845ffbfff42212"}, - {file = "safetensors-0.4.3-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:0d52c958dc210265157573f81d34adf54e255bc2b59ded6218500c9b15a750eb"}, - {file = "safetensors-0.4.3-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6f9568f380f513a60139971169c4a358b8731509cc19112369902eddb33faa4d"}, - {file = "safetensors-0.4.3-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:0d9cd8e1560dfc514b6d7859247dc6a86ad2f83151a62c577428d5102d872721"}, - {file = "safetensors-0.4.3-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:89f9f17b0dacb913ed87d57afbc8aad85ea42c1085bd5de2f20d83d13e9fc4b2"}, - {file = "safetensors-0.4.3-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:1139eb436fd201c133d03c81209d39ac57e129f5e74e34bb9ab60f8d9b726270"}, - {file = "safetensors-0.4.3-cp38-none-win32.whl", hash = "sha256:d9c289f140a9ae4853fc2236a2ffc9a9f2d5eae0cb673167e0f1b8c18c0961ac"}, - {file = "safetensors-0.4.3-cp38-none-win_amd64.whl", hash = "sha256:622afd28968ef3e9786562d352659a37de4481a4070f4ebac883f98c5836563e"}, - {file = "safetensors-0.4.3-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:8651c7299cbd8b4161a36cd6a322fa07d39cd23535b144d02f1c1972d0c62f3c"}, - {file = "safetensors-0.4.3-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:e375d975159ac534c7161269de24ddcd490df2157b55c1a6eeace6cbb56903f0"}, - {file = "safetensors-0.4.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:084fc436e317f83f7071fc6a62ca1c513b2103db325cd09952914b50f51cf78f"}, - {file = "safetensors-0.4.3-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:41a727a7f5e6ad9f1db6951adee21bbdadc632363d79dc434876369a17de6ad6"}, - {file = "safetensors-0.4.3-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e7dbbde64b6c534548696808a0e01276d28ea5773bc9a2dfb97a88cd3dffe3df"}, - {file = "safetensors-0.4.3-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:bbae3b4b9d997971431c346edbfe6e41e98424a097860ee872721e176040a893"}, - {file = "safetensors-0.4.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:01e4b22e3284cd866edeabe4f4d896229495da457229408d2e1e4810c5187121"}, - {file = "safetensors-0.4.3-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:0dd37306546b58d3043eb044c8103a02792cc024b51d1dd16bd3dd1f334cb3ed"}, - {file = "safetensors-0.4.3-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:d8815b5e1dac85fc534a97fd339e12404db557878c090f90442247e87c8aeaea"}, - {file = "safetensors-0.4.3-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:e011cc162503c19f4b1fd63dfcddf73739c7a243a17dac09b78e57a00983ab35"}, - {file = "safetensors-0.4.3-cp39-none-win32.whl", hash = "sha256:01feb3089e5932d7e662eda77c3ecc389f97c0883c4a12b5cfdc32b589a811c3"}, - {file = "safetensors-0.4.3-cp39-none-win_amd64.whl", hash = "sha256:3f9cdca09052f585e62328c1c2923c70f46814715c795be65f0b93f57ec98a02"}, - {file = "safetensors-0.4.3-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:1b89381517891a7bb7d1405d828b2bf5d75528299f8231e9346b8eba092227f9"}, - {file = "safetensors-0.4.3-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:cd6fff9e56df398abc5866b19a32124815b656613c1c5ec0f9350906fd798aac"}, - {file = "safetensors-0.4.3-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:840caf38d86aa7014fe37ade5d0d84e23dcfbc798b8078015831996ecbc206a3"}, - {file = "safetensors-0.4.3-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f9650713b2cfa9537a2baf7dd9fee458b24a0aaaa6cafcea8bdd5fb2b8efdc34"}, - {file = "safetensors-0.4.3-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:e4119532cd10dba04b423e0f86aecb96cfa5a602238c0aa012f70c3a40c44b50"}, - {file = "safetensors-0.4.3-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:e066e8861eef6387b7c772344d1fe1f9a72800e04ee9a54239d460c400c72aab"}, - {file = "safetensors-0.4.3-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:90964917f5b0fa0fa07e9a051fbef100250c04d150b7026ccbf87a34a54012e0"}, - {file = "safetensors-0.4.3-pp37-pypy37_pp73-macosx_10_12_x86_64.whl", hash = "sha256:c41e1893d1206aa7054029681778d9a58b3529d4c807002c156d58426c225173"}, - {file = "safetensors-0.4.3-pp37-pypy37_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ae7613a119a71a497d012ccc83775c308b9c1dab454806291427f84397d852fd"}, - {file = "safetensors-0.4.3-pp37-pypy37_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4f9bac020faba7f5dc481e881b14b6425265feabb5bfc552551d21189c0eddc3"}, - {file = "safetensors-0.4.3-pp37-pypy37_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:420a98f593ff9930f5822560d14c395ccbc57342ddff3b463bc0b3d6b1951550"}, - {file = "safetensors-0.4.3-pp37-pypy37_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:f5e6883af9a68c0028f70a4c19d5a6ab6238a379be36ad300a22318316c00cb0"}, - {file = "safetensors-0.4.3-pp37-pypy37_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:cdd0a3b5da66e7f377474599814dbf5cbf135ff059cc73694de129b58a5e8a2c"}, - {file = "safetensors-0.4.3-pp38-pypy38_pp73-macosx_10_12_x86_64.whl", hash = "sha256:9bfb92f82574d9e58401d79c70c716985dc049b635fef6eecbb024c79b2c46ad"}, - {file = "safetensors-0.4.3-pp38-pypy38_pp73-macosx_11_0_arm64.whl", hash = "sha256:3615a96dd2dcc30eb66d82bc76cda2565f4f7bfa89fcb0e31ba3cea8a1a9ecbb"}, - {file = "safetensors-0.4.3-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:868ad1b6fc41209ab6bd12f63923e8baeb1a086814cb2e81a65ed3d497e0cf8f"}, - {file = "safetensors-0.4.3-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b7ffba80aa49bd09195145a7fd233a7781173b422eeb995096f2b30591639517"}, - {file = "safetensors-0.4.3-pp38-pypy38_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:c0acbe31340ab150423347e5b9cc595867d814244ac14218932a5cf1dd38eb39"}, - {file = "safetensors-0.4.3-pp38-pypy38_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:19bbdf95de2cf64f25cd614c5236c8b06eb2cfa47cbf64311f4b5d80224623a3"}, - {file = "safetensors-0.4.3-pp38-pypy38_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:b852e47eb08475c2c1bd8131207b405793bfc20d6f45aff893d3baaad449ed14"}, - {file = "safetensors-0.4.3-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:5d07cbca5b99babb692d76d8151bec46f461f8ad8daafbfd96b2fca40cadae65"}, - {file = "safetensors-0.4.3-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:1ab6527a20586d94291c96e00a668fa03f86189b8a9defa2cdd34a1a01acc7d5"}, - {file = "safetensors-0.4.3-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:02318f01e332cc23ffb4f6716e05a492c5f18b1d13e343c49265149396284a44"}, - {file = "safetensors-0.4.3-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ec4b52ce9a396260eb9731eb6aea41a7320de22ed73a1042c2230af0212758ce"}, - {file = "safetensors-0.4.3-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:018b691383026a2436a22b648873ed11444a364324e7088b99cd2503dd828400"}, - {file = "safetensors-0.4.3-pp39-pypy39_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:309b10dbcab63269ecbf0e2ca10ce59223bb756ca5d431ce9c9eeabd446569da"}, - {file = "safetensors-0.4.3-pp39-pypy39_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:b277482120df46e27a58082df06a15aebda4481e30a1c21eefd0921ae7e03f65"}, - {file = "safetensors-0.4.3.tar.gz", hash = "sha256:2f85fc50c4e07a21e95c24e07460fe6f7e2859d0ce88092838352b798ce711c2"}, + {file = "safetensors-0.4.4-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:2adb497ada13097f30e386e88c959c0fda855a5f6f98845710f5bb2c57e14f12"}, + {file = "safetensors-0.4.4-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:7db7fdc2d71fd1444d85ca3f3d682ba2df7d61a637dfc6d80793f439eae264ab"}, + {file = "safetensors-0.4.4-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8d4f0eed76b430f009fbefca1a0028ddb112891b03cb556d7440d5cd68eb89a9"}, + {file = "safetensors-0.4.4-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:57d216fab0b5c432aabf7170883d7c11671622bde8bd1436c46d633163a703f6"}, + {file = "safetensors-0.4.4-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:7d9b76322e49c056bcc819f8bdca37a2daa5a6d42c07f30927b501088db03309"}, + {file = "safetensors-0.4.4-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:32f0d1f6243e90ee43bc6ee3e8c30ac5b09ca63f5dd35dbc985a1fc5208c451a"}, + {file = "safetensors-0.4.4-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:44d464bdc384874601a177375028012a5f177f1505279f9456fea84bbc575c7f"}, + {file = "safetensors-0.4.4-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:63144e36209ad8e4e65384dbf2d52dd5b1866986079c00a72335402a38aacdc5"}, + {file = "safetensors-0.4.4-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:051d5ecd490af7245258000304b812825974d5e56f14a3ff7e1b8b2ba6dc2ed4"}, + {file = "safetensors-0.4.4-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:51bc8429d9376224cd3cf7e8ce4f208b4c930cd10e515b6ac6a72cbc3370f0d9"}, + {file = "safetensors-0.4.4-cp310-none-win32.whl", hash = "sha256:fb7b54830cee8cf9923d969e2df87ce20e625b1af2fd194222ab902d3adcc29c"}, + {file = "safetensors-0.4.4-cp310-none-win_amd64.whl", hash = "sha256:4b3e8aa8226d6560de8c2b9d5ff8555ea482599c670610758afdc97f3e021e9c"}, + {file = "safetensors-0.4.4-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:bbaa31f2cb49013818bde319232ccd72da62ee40f7d2aa532083eda5664e85ff"}, + {file = "safetensors-0.4.4-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:9fdcb80f4e9fbb33b58e9bf95e7dbbedff505d1bcd1c05f7c7ce883632710006"}, + {file = "safetensors-0.4.4-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:55c14c20be247b8a1aeaf3ab4476265e3ca83096bb8e09bb1a7aa806088def4f"}, + {file = "safetensors-0.4.4-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:949aaa1118660f992dbf0968487b3e3cfdad67f948658ab08c6b5762e90cc8b6"}, + {file = "safetensors-0.4.4-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c11a4ab7debc456326a2bac67f35ee0ac792bcf812c7562a4a28559a5c795e27"}, + {file = "safetensors-0.4.4-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c0cea44bba5c5601b297bc8307e4075535b95163402e4906b2e9b82788a2a6df"}, + {file = "safetensors-0.4.4-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a9d752c97f6bbe327352f76e5b86442d776abc789249fc5e72eacb49e6916482"}, + {file = "safetensors-0.4.4-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:03f2bb92e61b055ef6cc22883ad1ae898010a95730fa988c60a23800eb742c2c"}, + {file = "safetensors-0.4.4-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:87bf3f91a9328a941acc44eceffd4e1f5f89b030985b2966637e582157173b98"}, + {file = "safetensors-0.4.4-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:20d218ec2b6899d29d6895419a58b6e44cc5ff8f0cc29fac8d236a8978ab702e"}, + {file = "safetensors-0.4.4-cp311-none-win32.whl", hash = "sha256:8079486118919f600c603536e2490ca37b3dbd3280e3ad6eaacfe6264605ac8a"}, + {file = "safetensors-0.4.4-cp311-none-win_amd64.whl", hash = "sha256:2f8c2eb0615e2e64ee27d478c7c13f51e5329d7972d9e15528d3e4cfc4a08f0d"}, + {file = "safetensors-0.4.4-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:baec5675944b4a47749c93c01c73d826ef7d42d36ba8d0dba36336fa80c76426"}, + {file = "safetensors-0.4.4-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:f15117b96866401825f3e94543145028a2947d19974429246ce59403f49e77c6"}, + {file = "safetensors-0.4.4-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6a13a9caea485df164c51be4eb0c87f97f790b7c3213d635eba2314d959fe929"}, + {file = "safetensors-0.4.4-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:6b54bc4ca5f9b9bba8cd4fb91c24b2446a86b5ae7f8975cf3b7a277353c3127c"}, + {file = "safetensors-0.4.4-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:08332c22e03b651c8eb7bf5fc2de90044f3672f43403b3d9ac7e7e0f4f76495e"}, + {file = "safetensors-0.4.4-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:bb62841e839ee992c37bb75e75891c7f4904e772db3691c59daaca5b4ab960e1"}, + {file = "safetensors-0.4.4-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8e5b927acc5f2f59547270b0309a46d983edc44be64e1ca27a7fcb0474d6cd67"}, + {file = "safetensors-0.4.4-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:2a69c71b1ae98a8021a09a0b43363b0143b0ce74e7c0e83cacba691b62655fb8"}, + {file = "safetensors-0.4.4-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:23654ad162c02a5636f0cd520a0310902c4421aab1d91a0b667722a4937cc445"}, + {file = "safetensors-0.4.4-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:0677c109d949cf53756859160b955b2e75b0eefe952189c184d7be30ecf7e858"}, + {file = "safetensors-0.4.4-cp312-none-win32.whl", hash = "sha256:a51d0ddd4deb8871c6de15a772ef40b3dbd26a3c0451bb9e66bc76fc5a784e5b"}, + {file = "safetensors-0.4.4-cp312-none-win_amd64.whl", hash = "sha256:2d065059e75a798bc1933c293b68d04d79b586bb7f8c921e0ca1e82759d0dbb1"}, + {file = "safetensors-0.4.4-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:9d625692578dd40a112df30c02a1adf068027566abd8e6a74893bb13d441c150"}, + {file = "safetensors-0.4.4-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:7cabcf39c81e5b988d0adefdaea2eb9b4fd9bd62d5ed6559988c62f36bfa9a89"}, + {file = "safetensors-0.4.4-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8359bef65f49d51476e9811d59c015f0ddae618ee0e44144f5595278c9f8268c"}, + {file = "safetensors-0.4.4-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:1a32c662e7df9226fd850f054a3ead0e4213a96a70b5ce37b2d26ba27004e013"}, + {file = "safetensors-0.4.4-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c329a4dcc395364a1c0d2d1574d725fe81a840783dda64c31c5a60fc7d41472c"}, + {file = "safetensors-0.4.4-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:239ee093b1db877c9f8fe2d71331a97f3b9c7c0d3ab9f09c4851004a11f44b65"}, + {file = "safetensors-0.4.4-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bd574145d930cf9405a64f9923600879a5ce51d9f315443a5f706374841327b6"}, + {file = "safetensors-0.4.4-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:f6784eed29f9e036acb0b7769d9e78a0dc2c72c2d8ba7903005350d817e287a4"}, + {file = "safetensors-0.4.4-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:65a4a6072436bf0a4825b1c295d248cc17e5f4651e60ee62427a5bcaa8622a7a"}, + {file = "safetensors-0.4.4-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:df81e3407630de060ae8313da49509c3caa33b1a9415562284eaf3d0c7705f9f"}, + {file = "safetensors-0.4.4-cp37-cp37m-macosx_10_12_x86_64.whl", hash = "sha256:e4a0f374200e8443d9746e947ebb346c40f83a3970e75a685ade0adbba5c48d9"}, + {file = "safetensors-0.4.4-cp37-cp37m-macosx_11_0_arm64.whl", hash = "sha256:181fb5f3dee78dae7fd7ec57d02e58f7936498d587c6b7c1c8049ef448c8d285"}, + {file = "safetensors-0.4.4-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2cb4ac1d8f6b65ec84ddfacd275079e89d9df7c92f95675ba96c4f790a64df6e"}, + {file = "safetensors-0.4.4-cp37-cp37m-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:76897944cd9239e8a70955679b531b9a0619f76e25476e57ed373322d9c2075d"}, + {file = "safetensors-0.4.4-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:2a9e9d1a27e51a0f69e761a3d581c3af46729ec1c988fa1f839e04743026ae35"}, + {file = "safetensors-0.4.4-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:005ef9fc0f47cb9821c40793eb029f712e97278dae84de91cb2b4809b856685d"}, + {file = "safetensors-0.4.4-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:26987dac3752688c696c77c3576f951dbbdb8c57f0957a41fb6f933cf84c0b62"}, + {file = "safetensors-0.4.4-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:c05270b290acd8d249739f40d272a64dd597d5a4b90f27d830e538bc2549303c"}, + {file = "safetensors-0.4.4-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:068d3a33711fc4d93659c825a04480ff5a3854e1d78632cdc8f37fee917e8a60"}, + {file = "safetensors-0.4.4-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:063421ef08ca1021feea8b46951251b90ae91f899234dd78297cbe7c1db73b99"}, + {file = "safetensors-0.4.4-cp37-none-win32.whl", hash = "sha256:d52f5d0615ea83fd853d4e1d8acf93cc2e0223ad4568ba1e1f6ca72e94ea7b9d"}, + {file = "safetensors-0.4.4-cp37-none-win_amd64.whl", hash = "sha256:88a5ac3280232d4ed8e994cbc03b46a1807ce0aa123867b40c4a41f226c61f94"}, + {file = "safetensors-0.4.4-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:3467ab511bfe3360967d7dc53b49f272d59309e57a067dd2405b4d35e7dcf9dc"}, + {file = "safetensors-0.4.4-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:2ab4c96d922e53670ce25fbb9b63d5ea972e244de4fa1dd97b590d9fd66aacef"}, + {file = "safetensors-0.4.4-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:87df18fce4440477c3ef1fd7ae17c704a69a74a77e705a12be135ee0651a0c2d"}, + {file = "safetensors-0.4.4-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:0e5fe345b2bc7d88587149ac11def1f629d2671c4c34f5df38aed0ba59dc37f8"}, + {file = "safetensors-0.4.4-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9f1a3e01dce3cd54060791e7e24588417c98b941baa5974700eeb0b8eb65b0a0"}, + {file = "safetensors-0.4.4-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:1c6bf35e9a8998d8339fd9a05ac4ce465a4d2a2956cc0d837b67c4642ed9e947"}, + {file = "safetensors-0.4.4-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:166c0c52f6488b8538b2a9f3fbc6aad61a7261e170698779b371e81b45f0440d"}, + {file = "safetensors-0.4.4-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:87e9903b8668a16ef02c08ba4ebc91e57a49c481e9b5866e31d798632805014b"}, + {file = "safetensors-0.4.4-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:a9c421153aa23c323bd8483d4155b4eee82c9a50ac11cccd83539104a8279c64"}, + {file = "safetensors-0.4.4-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:a4b8617499b2371c7353302c5116a7e0a3a12da66389ce53140e607d3bf7b3d3"}, + {file = "safetensors-0.4.4-cp38-none-win32.whl", hash = "sha256:c6280f5aeafa1731f0a3709463ab33d8e0624321593951aefada5472f0b313fd"}, + {file = "safetensors-0.4.4-cp38-none-win_amd64.whl", hash = "sha256:6ceed6247fc2d33b2a7b7d25d8a0fe645b68798856e0bc7a9800c5fd945eb80f"}, + {file = "safetensors-0.4.4-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:5cf6c6f6193797372adf50c91d0171743d16299491c75acad8650107dffa9269"}, + {file = "safetensors-0.4.4-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:419010156b914a3e5da4e4adf992bee050924d0fe423c4b329e523e2c14c3547"}, + {file = "safetensors-0.4.4-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:88f6fd5a5c1302ce79993cc5feeadcc795a70f953c762544d01fb02b2db4ea33"}, + {file = "safetensors-0.4.4-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:d468cffb82d90789696d5b4d8b6ab8843052cba58a15296691a7a3df55143cd2"}, + {file = "safetensors-0.4.4-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9353c2af2dd467333d4850a16edb66855e795561cd170685178f706c80d2c71e"}, + {file = "safetensors-0.4.4-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:83c155b4a33368d9b9c2543e78f2452090fb030c52401ca608ef16fa58c98353"}, + {file = "safetensors-0.4.4-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9850754c434e636ce3dc586f534bb23bcbd78940c304775bee9005bf610e98f1"}, + {file = "safetensors-0.4.4-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:275f500b4d26f67b6ec05629a4600645231bd75e4ed42087a7c1801bff04f4b3"}, + {file = "safetensors-0.4.4-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:5c2308de665b7130cd0e40a2329278226e4cf083f7400c51ca7e19ccfb3886f3"}, + {file = "safetensors-0.4.4-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:e06a9ebc8656e030ccfe44634f2a541b4b1801cd52e390a53ad8bacbd65f8518"}, + {file = "safetensors-0.4.4-cp39-none-win32.whl", hash = "sha256:ef73df487b7c14b477016947c92708c2d929e1dee2bacdd6fff5a82ed4539537"}, + {file = "safetensors-0.4.4-cp39-none-win_amd64.whl", hash = "sha256:83d054818a8d1198d8bd8bc3ea2aac112a2c19def2bf73758321976788706398"}, + {file = "safetensors-0.4.4-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:1d1f34c71371f0e034004a0b583284b45d233dd0b5f64a9125e16b8a01d15067"}, + {file = "safetensors-0.4.4-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:1a8043a33d58bc9b30dfac90f75712134ca34733ec3d8267b1bd682afe7194f5"}, + {file = "safetensors-0.4.4-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8db8f0c59c84792c12661f8efa85de160f80efe16b87a9d5de91b93f9e0bce3c"}, + {file = "safetensors-0.4.4-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cfc1fc38e37630dd12d519bdec9dcd4b345aec9930bb9ce0ed04461f49e58b52"}, + {file = "safetensors-0.4.4-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:e5c9d86d9b13b18aafa88303e2cd21e677f5da2a14c828d2c460fe513af2e9a5"}, + {file = "safetensors-0.4.4-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:43251d7f29a59120a26f5a0d9583b9e112999e500afabcfdcb91606d3c5c89e3"}, + {file = "safetensors-0.4.4-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:2c42e9b277513b81cf507e6121c7b432b3235f980cac04f39f435b7902857f91"}, + {file = "safetensors-0.4.4-pp37-pypy37_pp73-macosx_10_12_x86_64.whl", hash = "sha256:3daacc9a4e3f428a84dd56bf31f20b768eb0b204af891ed68e1f06db9edf546f"}, + {file = "safetensors-0.4.4-pp37-pypy37_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:218bbb9b883596715fc9997bb42470bf9f21bb832c3b34c2bf744d6fa8f2bbba"}, + {file = "safetensors-0.4.4-pp37-pypy37_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7bd5efc26b39f7fc82d4ab1d86a7f0644c8e34f3699c33f85bfa9a717a030e1b"}, + {file = "safetensors-0.4.4-pp37-pypy37_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:56ad9776b65d8743f86698a1973292c966cf3abff627efc44ed60e66cc538ddd"}, + {file = "safetensors-0.4.4-pp37-pypy37_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:30f23e6253c5f43a809dea02dc28a9f5fa747735dc819f10c073fe1b605e97d4"}, + {file = "safetensors-0.4.4-pp37-pypy37_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:5512078d00263de6cb04e9d26c9ae17611098f52357fea856213e38dc462f81f"}, + {file = "safetensors-0.4.4-pp38-pypy38_pp73-macosx_10_12_x86_64.whl", hash = "sha256:b96c3d9266439d17f35fc2173111d93afc1162f168e95aed122c1ca517b1f8f1"}, + {file = "safetensors-0.4.4-pp38-pypy38_pp73-macosx_11_0_arm64.whl", hash = "sha256:08d464aa72a9a13826946b4fb9094bb4b16554bbea2e069e20bd903289b6ced9"}, + {file = "safetensors-0.4.4-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:210160816d5a36cf41f48f38473b6f70d7bcb4b0527bedf0889cc0b4c3bb07db"}, + {file = "safetensors-0.4.4-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:eb276a53717f2bcfb6df0bcf284d8a12069002508d4c1ca715799226024ccd45"}, + {file = "safetensors-0.4.4-pp38-pypy38_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:a2c28c6487f17d8db0089e8b2cdc13de859366b94cc6cdc50e1b0a4147b56551"}, + {file = "safetensors-0.4.4-pp38-pypy38_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:7915f0c60e4e6e65d90f136d85dd3b429ae9191c36b380e626064694563dbd9f"}, + {file = "safetensors-0.4.4-pp38-pypy38_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:00eea99ae422fbfa0b46065acbc58b46bfafadfcec179d4b4a32d5c45006af6c"}, + {file = "safetensors-0.4.4-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:bb1ed4fcb0b3c2f3ea2c5767434622fe5d660e5752f21ac2e8d737b1e5e480bb"}, + {file = "safetensors-0.4.4-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:73fc9a0a4343188bdb421783e600bfaf81d0793cd4cce6bafb3c2ed567a74cd5"}, + {file = "safetensors-0.4.4-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2c37e6b714200824c73ca6eaf007382de76f39466a46e97558b8dc4cf643cfbf"}, + {file = "safetensors-0.4.4-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f75698c5c5c542417ac4956acfc420f7d4a2396adca63a015fd66641ea751759"}, + {file = "safetensors-0.4.4-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:ca1a209157f242eb183e209040097118472e169f2e069bfbd40c303e24866543"}, + {file = "safetensors-0.4.4-pp39-pypy39_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:177f2b60a058f92a3cec7a1786c9106c29eca8987ecdfb79ee88126e5f47fa31"}, + {file = "safetensors-0.4.4-pp39-pypy39_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:ee9622e84fe6e4cd4f020e5fda70d6206feff3157731df7151d457fdae18e541"}, + {file = "safetensors-0.4.4.tar.gz", hash = "sha256:5fe3e9b705250d0172ed4e100a811543108653fb2b66b9e702a088ad03772a07"}, ] [package.extras] @@ -5459,125 +5588,137 @@ files = [ [[package]] name = "setuptools" -version = "71.1.0" +version = "72.2.0" description = "Easily download, build, install, upgrade, and uninstall Python packages" optional = true python-versions = ">=3.8" files = [ - {file = "setuptools-71.1.0-py3-none-any.whl", hash = "sha256:33874fdc59b3188304b2e7c80d9029097ea31627180896fb549c578ceb8a0855"}, - {file = "setuptools-71.1.0.tar.gz", hash = "sha256:032d42ee9fb536e33087fb66cac5f840eb9391ed05637b3f2a76a7c8fb477936"}, + {file = "setuptools-72.2.0-py3-none-any.whl", hash = "sha256:f11dd94b7bae3a156a95ec151f24e4637fb4fa19c878e4d191bfb8b2d82728c4"}, + {file = "setuptools-72.2.0.tar.gz", hash = "sha256:80aacbf633704e9c8bfa1d99fa5dd4dc59573efcf9e4042c13d3bcef91ac2ef9"}, ] [package.extras] core = ["importlib-metadata (>=6)", "importlib-resources (>=5.10.2)", "jaraco.text (>=3.7)", "more-itertools (>=8.8)", "ordered-set (>=3.1.1)", "packaging (>=24)", "platformdirs (>=2.6.2)", "tomli (>=2.0.1)", "wheel (>=0.43.0)"] -doc = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "pygments-github-lexers (==0.0.5)", "pyproject-hooks (!=1.1)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-favicon", "sphinx-inline-tabs", "sphinx-lint", "sphinx-notfound-page (>=1,<2)", "sphinx-reredirects", "sphinxcontrib-towncrier"] +doc = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "pygments-github-lexers (==0.0.5)", "pyproject-hooks (!=1.1)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-favicon", "sphinx-inline-tabs", "sphinx-lint", "sphinx-notfound-page (>=1,<2)", "sphinx-reredirects", "sphinxcontrib-towncrier", "towncrier (<24.7)"] test = ["build[virtualenv] (>=1.0.3)", "filelock (>=3.4.0)", "importlib-metadata", "ini2toml[lite] (>=0.14)", "jaraco.develop (>=7.21)", "jaraco.envs (>=2.2)", "jaraco.path (>=3.2.0)", "jaraco.test", "mypy (==1.11.*)", "packaging (>=23.2)", "pip (>=19.1)", "pyproject-hooks (!=1.1)", "pytest (>=6,!=8.1.*)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=2.2)", "pytest-home (>=0.5)", "pytest-mypy", "pytest-perf", "pytest-ruff (<0.4)", "pytest-ruff (>=0.2.1)", "pytest-ruff (>=0.3.2)", "pytest-subprocess", "pytest-timeout", "pytest-xdist (>=3)", "tomli", "tomli-w (>=1.0.0)", "virtualenv (>=13.0.0)", "wheel"] [[package]] name = "simplejson" -version = "3.19.2" +version = "3.19.3" description = "Simple, fast, extensible JSON encoder/decoder for Python" optional = true -python-versions = ">=2.5, !=3.0.*, !=3.1.*, !=3.2.*" -files = [ - {file = "simplejson-3.19.2-cp27-cp27m-macosx_10_9_x86_64.whl", hash = "sha256:3471e95110dcaf901db16063b2e40fb394f8a9e99b3fe9ee3acc6f6ef72183a2"}, - {file = "simplejson-3.19.2-cp27-cp27m-manylinux1_i686.whl", hash = "sha256:3194cd0d2c959062b94094c0a9f8780ffd38417a5322450a0db0ca1a23e7fbd2"}, - {file = "simplejson-3.19.2-cp27-cp27m-manylinux1_x86_64.whl", hash = "sha256:8a390e56a7963e3946ff2049ee1eb218380e87c8a0e7608f7f8790ba19390867"}, - {file = "simplejson-3.19.2-cp27-cp27m-manylinux2010_i686.whl", hash = "sha256:1537b3dd62d8aae644f3518c407aa8469e3fd0f179cdf86c5992792713ed717a"}, - {file = "simplejson-3.19.2-cp27-cp27m-manylinux2010_x86_64.whl", hash = "sha256:a8617625369d2d03766413bff9e64310feafc9fc4f0ad2b902136f1a5cd8c6b0"}, - {file = "simplejson-3.19.2-cp27-cp27mu-manylinux1_i686.whl", hash = "sha256:2c433a412e96afb9a3ce36fa96c8e61a757af53e9c9192c97392f72871e18e69"}, - {file = "simplejson-3.19.2-cp27-cp27mu-manylinux1_x86_64.whl", hash = "sha256:f1c70249b15e4ce1a7d5340c97670a95f305ca79f376887759b43bb33288c973"}, - {file = "simplejson-3.19.2-cp27-cp27mu-manylinux2010_i686.whl", hash = "sha256:287e39ba24e141b046812c880f4619d0ca9e617235d74abc27267194fc0c7835"}, - {file = "simplejson-3.19.2-cp27-cp27mu-manylinux2010_x86_64.whl", hash = "sha256:6f0a0b41dd05eefab547576bed0cf066595f3b20b083956b1405a6f17d1be6ad"}, - {file = "simplejson-3.19.2-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:2f98d918f7f3aaf4b91f2b08c0c92b1774aea113334f7cde4fe40e777114dbe6"}, - {file = "simplejson-3.19.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:7d74beca677623481810c7052926365d5f07393c72cbf62d6cce29991b676402"}, - {file = "simplejson-3.19.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:7f2398361508c560d0bf1773af19e9fe644e218f2a814a02210ac2c97ad70db0"}, - {file = "simplejson-3.19.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6ad331349b0b9ca6da86064a3599c425c7a21cd41616e175ddba0866da32df48"}, - {file = "simplejson-3.19.2-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:332c848f02d71a649272b3f1feccacb7e4f7e6de4a2e6dc70a32645326f3d428"}, - {file = "simplejson-3.19.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:25785d038281cd106c0d91a68b9930049b6464288cea59ba95b35ee37c2d23a5"}, - {file = "simplejson-3.19.2-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:18955c1da6fc39d957adfa346f75226246b6569e096ac9e40f67d102278c3bcb"}, - {file = "simplejson-3.19.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:11cc3afd8160d44582543838b7e4f9aa5e97865322844b75d51bf4e0e413bb3e"}, - {file = "simplejson-3.19.2-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:b01fda3e95d07a6148702a641e5e293b6da7863f8bc9b967f62db9461330562c"}, - {file = "simplejson-3.19.2-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:778331444917108fa8441f59af45886270d33ce8a23bfc4f9b192c0b2ecef1b3"}, - {file = "simplejson-3.19.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:9eb117db8d7ed733a7317c4215c35993b815bf6aeab67523f1f11e108c040672"}, - {file = "simplejson-3.19.2-cp310-cp310-win32.whl", hash = "sha256:39b6d79f5cbfa3eb63a869639cfacf7c41d753c64f7801efc72692c1b2637ac7"}, - {file = "simplejson-3.19.2-cp310-cp310-win_amd64.whl", hash = "sha256:5675e9d8eeef0aa06093c1ff898413ade042d73dc920a03e8cea2fb68f62445a"}, - {file = "simplejson-3.19.2-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:ed628c1431100b0b65387419551e822987396bee3c088a15d68446d92f554e0c"}, - {file = "simplejson-3.19.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:adcb3332979cbc941b8fff07181f06d2b608625edc0a4d8bc3ffc0be414ad0c4"}, - {file = "simplejson-3.19.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:08889f2f597ae965284d7b52a5c3928653a9406d88c93e3161180f0abc2433ba"}, - {file = "simplejson-3.19.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ef7938a78447174e2616be223f496ddccdbf7854f7bf2ce716dbccd958cc7d13"}, - {file = "simplejson-3.19.2-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a970a2e6d5281d56cacf3dc82081c95c1f4da5a559e52469287457811db6a79b"}, - {file = "simplejson-3.19.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:554313db34d63eac3b3f42986aa9efddd1a481169c12b7be1e7512edebff8eaf"}, - {file = "simplejson-3.19.2-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4d36081c0b1c12ea0ed62c202046dca11438bee48dd5240b7c8de8da62c620e9"}, - {file = "simplejson-3.19.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:a3cd18e03b0ee54ea4319cdcce48357719ea487b53f92a469ba8ca8e39df285e"}, - {file = "simplejson-3.19.2-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:66e5dc13bfb17cd6ee764fc96ccafd6e405daa846a42baab81f4c60e15650414"}, - {file = "simplejson-3.19.2-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:972a7833d4a1fcf7a711c939e315721a88b988553fc770a5b6a5a64bd6ebeba3"}, - {file = "simplejson-3.19.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:3e74355cb47e0cd399ead3477e29e2f50e1540952c22fb3504dda0184fc9819f"}, - {file = "simplejson-3.19.2-cp311-cp311-win32.whl", hash = "sha256:1dd4f692304854352c3e396e9b5f0a9c9e666868dd0bdc784e2ac4c93092d87b"}, - {file = "simplejson-3.19.2-cp311-cp311-win_amd64.whl", hash = "sha256:9300aee2a8b5992d0f4293d88deb59c218989833e3396c824b69ba330d04a589"}, - {file = "simplejson-3.19.2-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:b8d940fd28eb34a7084877747a60873956893e377f15a32ad445fe66c972c3b8"}, - {file = "simplejson-3.19.2-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:4969d974d9db826a2c07671273e6b27bc48e940738d768fa8f33b577f0978378"}, - {file = "simplejson-3.19.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:c594642d6b13d225e10df5c16ee15b3398e21a35ecd6aee824f107a625690374"}, - {file = "simplejson-3.19.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e2f5a398b5e77bb01b23d92872255e1bcb3c0c719a3be40b8df146570fe7781a"}, - {file = "simplejson-3.19.2-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:176a1b524a3bd3314ed47029a86d02d5a95cc0bee15bd3063a1e1ec62b947de6"}, - {file = "simplejson-3.19.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f3c7363a8cb8c5238878ec96c5eb0fc5ca2cb11fc0c7d2379863d342c6ee367a"}, - {file = "simplejson-3.19.2-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:346820ae96aa90c7d52653539a57766f10f33dd4be609206c001432b59ddf89f"}, - {file = "simplejson-3.19.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:de9a2792612ec6def556d1dc621fd6b2073aff015d64fba9f3e53349ad292734"}, - {file = "simplejson-3.19.2-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:1c768e7584c45094dca4b334af361e43b0aaa4844c04945ac7d43379eeda9bc2"}, - {file = "simplejson-3.19.2-cp312-cp312-musllinux_1_1_ppc64le.whl", hash = "sha256:9652e59c022e62a5b58a6f9948b104e5bb96d3b06940c6482588176f40f4914b"}, - {file = "simplejson-3.19.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:9c1a4393242e321e344213a90a1e3bf35d2f624aa8b8f6174d43e3c6b0e8f6eb"}, - {file = "simplejson-3.19.2-cp312-cp312-win32.whl", hash = "sha256:7cb98be113911cb0ad09e5523d0e2a926c09a465c9abb0784c9269efe4f95917"}, - {file = "simplejson-3.19.2-cp312-cp312-win_amd64.whl", hash = "sha256:6779105d2fcb7fcf794a6a2a233787f6bbd4731227333a072d8513b252ed374f"}, - {file = "simplejson-3.19.2-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:061e81ea2d62671fa9dea2c2bfbc1eec2617ae7651e366c7b4a2baf0a8c72cae"}, - {file = "simplejson-3.19.2-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4280e460e51f86ad76dc456acdbfa9513bdf329556ffc8c49e0200878ca57816"}, - {file = "simplejson-3.19.2-cp36-cp36m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:11c39fbc4280d7420684494373b7c5904fa72a2b48ef543a56c2d412999c9e5d"}, - {file = "simplejson-3.19.2-cp36-cp36m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:bccb3e88ec26ffa90f72229f983d3a5d1155e41a1171190fa723d4135523585b"}, - {file = "simplejson-3.19.2-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1bb5b50dc6dd671eb46a605a3e2eb98deb4a9af787a08fcdddabe5d824bb9664"}, - {file = "simplejson-3.19.2-cp36-cp36m-musllinux_1_1_aarch64.whl", hash = "sha256:d94245caa3c61f760c4ce4953cfa76e7739b6f2cbfc94cc46fff6c050c2390c5"}, - {file = "simplejson-3.19.2-cp36-cp36m-musllinux_1_1_i686.whl", hash = "sha256:d0e5ffc763678d48ecc8da836f2ae2dd1b6eb2d27a48671066f91694e575173c"}, - {file = "simplejson-3.19.2-cp36-cp36m-musllinux_1_1_ppc64le.whl", hash = "sha256:d222a9ed082cd9f38b58923775152003765016342a12f08f8c123bf893461f28"}, - {file = "simplejson-3.19.2-cp36-cp36m-musllinux_1_1_x86_64.whl", hash = "sha256:8434dcdd347459f9fd9c526117c01fe7ca7b016b6008dddc3c13471098f4f0dc"}, - {file = "simplejson-3.19.2-cp36-cp36m-win32.whl", hash = "sha256:c9ac1c2678abf9270e7228133e5b77c6c3c930ad33a3c1dfbdd76ff2c33b7b50"}, - {file = "simplejson-3.19.2-cp36-cp36m-win_amd64.whl", hash = "sha256:92c4a4a2b1f4846cd4364855cbac83efc48ff5a7d7c06ba014c792dd96483f6f"}, - {file = "simplejson-3.19.2-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:0d551dc931638e2102b8549836a1632e6e7cf620af3d093a7456aa642bff601d"}, - {file = "simplejson-3.19.2-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:73a8a4653f2e809049999d63530180d7b5a344b23a793502413ad1ecea9a0290"}, - {file = "simplejson-3.19.2-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:40847f617287a38623507d08cbcb75d51cf9d4f9551dd6321df40215128325a3"}, - {file = "simplejson-3.19.2-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:be893258d5b68dd3a8cba8deb35dc6411db844a9d35268a8d3793b9d9a256f80"}, - {file = "simplejson-3.19.2-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e9eb3cff1b7d71aa50c89a0536f469cb8d6dcdd585d8f14fb8500d822f3bdee4"}, - {file = "simplejson-3.19.2-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:d0f402e787e6e7ee7876c8b05e2fe6464820d9f35ba3f172e95b5f8b699f6c7f"}, - {file = "simplejson-3.19.2-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:fbbcc6b0639aa09b9649f36f1bcb347b19403fe44109948392fbb5ea69e48c3e"}, - {file = "simplejson-3.19.2-cp37-cp37m-musllinux_1_1_ppc64le.whl", hash = "sha256:2fc697be37585eded0c8581c4788fcfac0e3f84ca635b73a5bf360e28c8ea1a2"}, - {file = "simplejson-3.19.2-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:0b0a3eb6dd39cce23801a50c01a0976971498da49bc8a0590ce311492b82c44b"}, - {file = "simplejson-3.19.2-cp37-cp37m-win32.whl", hash = "sha256:49f9da0d6cd17b600a178439d7d2d57c5ef01f816b1e0e875e8e8b3b42db2693"}, - {file = "simplejson-3.19.2-cp37-cp37m-win_amd64.whl", hash = "sha256:c87c22bd6a987aca976e3d3e23806d17f65426191db36d40da4ae16a6a494cbc"}, - {file = "simplejson-3.19.2-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:9e4c166f743bb42c5fcc60760fb1c3623e8fda94f6619534217b083e08644b46"}, - {file = "simplejson-3.19.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:0a48679310e1dd5c9f03481799311a65d343748fe86850b7fb41df4e2c00c087"}, - {file = "simplejson-3.19.2-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:c0521e0f07cb56415fdb3aae0bbd8701eb31a9dfef47bb57206075a0584ab2a2"}, - {file = "simplejson-3.19.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0d2d5119b1d7a1ed286b8af37357116072fc96700bce3bec5bb81b2e7057ab41"}, - {file = "simplejson-3.19.2-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:2c1467d939932901a97ba4f979e8f2642415fcf02ea12f53a4e3206c9c03bc17"}, - {file = "simplejson-3.19.2-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:49aaf4546f6023c44d7e7136be84a03a4237f0b2b5fb2b17c3e3770a758fc1a0"}, - {file = "simplejson-3.19.2-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:60848ab779195b72382841fc3fa4f71698a98d9589b0a081a9399904487b5832"}, - {file = "simplejson-3.19.2-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:0436a70d8eb42bea4fe1a1c32d371d9bb3b62c637969cb33970ad624d5a3336a"}, - {file = "simplejson-3.19.2-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:49e0e3faf3070abdf71a5c80a97c1afc059b4f45a5aa62de0c2ca0444b51669b"}, - {file = "simplejson-3.19.2-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:ff836cd4041e16003549449cc0a5e372f6b6f871eb89007ab0ee18fb2800fded"}, - {file = "simplejson-3.19.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:3848427b65e31bea2c11f521b6fc7a3145d6e501a1038529da2391aff5970f2f"}, - {file = "simplejson-3.19.2-cp38-cp38-win32.whl", hash = "sha256:3f39bb1f6e620f3e158c8b2eaf1b3e3e54408baca96a02fe891794705e788637"}, - {file = "simplejson-3.19.2-cp38-cp38-win_amd64.whl", hash = "sha256:0405984f3ec1d3f8777c4adc33eac7ab7a3e629f3b1c05fdded63acc7cf01137"}, - {file = "simplejson-3.19.2-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:445a96543948c011a3a47c8e0f9d61e9785df2544ea5be5ab3bc2be4bd8a2565"}, - {file = "simplejson-3.19.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:4a8c3cc4f9dfc33220246760358c8265dad6e1104f25f0077bbca692d616d358"}, - {file = "simplejson-3.19.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:af9c7e6669c4d0ad7362f79cb2ab6784d71147503e62b57e3d95c4a0f222c01c"}, - {file = "simplejson-3.19.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:064300a4ea17d1cd9ea1706aa0590dcb3be81112aac30233823ee494f02cb78a"}, - {file = "simplejson-3.19.2-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9453419ea2ab9b21d925d0fd7e3a132a178a191881fab4169b6f96e118cc25bb"}, - {file = "simplejson-3.19.2-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9e038c615b3906df4c3be8db16b3e24821d26c55177638ea47b3f8f73615111c"}, - {file = "simplejson-3.19.2-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:16ca9c90da4b1f50f089e14485db8c20cbfff2d55424062791a7392b5a9b3ff9"}, - {file = "simplejson-3.19.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:1018bd0d70ce85f165185d2227c71e3b1e446186f9fa9f971b69eee223e1e3cd"}, - {file = "simplejson-3.19.2-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:e8dd53a8706b15bc0e34f00e6150fbefb35d2fd9235d095b4f83b3c5ed4fa11d"}, - {file = "simplejson-3.19.2-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:2d022b14d7758bfb98405672953fe5c202ea8a9ccf9f6713c5bd0718eba286fd"}, - {file = "simplejson-3.19.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:febffa5b1eda6622d44b245b0685aff6fb555ce0ed734e2d7b1c3acd018a2cff"}, - {file = "simplejson-3.19.2-cp39-cp39-win32.whl", hash = "sha256:4edcd0bf70087b244ba77038db23cd98a1ace2f91b4a3ecef22036314d77ac23"}, - {file = "simplejson-3.19.2-cp39-cp39-win_amd64.whl", hash = "sha256:aad7405c033d32c751d98d3a65801e2797ae77fac284a539f6c3a3e13005edc4"}, - {file = "simplejson-3.19.2-py3-none-any.whl", hash = "sha256:bcedf4cae0d47839fee7de344f96b5694ca53c786f28b5f773d4f0b265a159eb"}, - {file = "simplejson-3.19.2.tar.gz", hash = "sha256:9eb442a2442ce417801c912df68e1f6ccfcd41577ae7274953ab3ad24ef7d82c"}, +python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,>=2.5" +files = [ + {file = "simplejson-3.19.3-cp27-cp27m-manylinux1_i686.whl", hash = "sha256:f39caec26007a2d0efab6b8b1d74873ede9351962707afab622cc2285dd26ed0"}, + {file = "simplejson-3.19.3-cp27-cp27m-manylinux1_x86_64.whl", hash = "sha256:83c87706265ae3028e8460d08b05f30254c569772e859e5ba61fe8af2c883468"}, + {file = "simplejson-3.19.3-cp27-cp27m-manylinux2010_i686.whl", hash = "sha256:0b5ddd2c7d1d3f4d23224bc8a04bbf1430ae9a8149c05b90f8fc610f7f857a23"}, + {file = "simplejson-3.19.3-cp27-cp27m-manylinux2010_x86_64.whl", hash = "sha256:ad0e0b1ce9bd3edb5cf64b5b5b76eacbfdac8c5367153aeeec8a8b1407f68342"}, + {file = "simplejson-3.19.3-cp27-cp27mu-manylinux1_i686.whl", hash = "sha256:93be280fc69a952c76e261036312c20b910e7fa9e234f1d89bdfe3fa34f8a023"}, + {file = "simplejson-3.19.3-cp27-cp27mu-manylinux1_x86_64.whl", hash = "sha256:6d43e24b88c80f997081503f693be832fc90854f278df277dd54f8a4c847ab61"}, + {file = "simplejson-3.19.3-cp27-cp27mu-manylinux2010_i686.whl", hash = "sha256:2876027ebdd599d730d36464debe84619b0368e9a642ca6e7c601be55aed439e"}, + {file = "simplejson-3.19.3-cp27-cp27mu-manylinux2010_x86_64.whl", hash = "sha256:0766ca6222b410e08e0053a0dda3606cafb3973d5d00538307f631bb59743396"}, + {file = "simplejson-3.19.3-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:50d8b742d74c449c4dcac570d08ce0f21f6a149d2d9cf7652dbf2ba9a1bc729a"}, + {file = "simplejson-3.19.3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:dd011fc3c1d88b779645495fdb8189fb318a26981eebcce14109460e062f209b"}, + {file = "simplejson-3.19.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:637c4d4b81825c1f4d651e56210bd35b5604034b192b02d2d8f17f7ce8c18f42"}, + {file = "simplejson-3.19.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2f56eb03bc9e432bb81adc8ecff2486d39feb371abb442964ffb44f6db23b332"}, + {file = "simplejson-3.19.3-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ef59a53be400c1fad2c914b8d74c9d42384fed5174f9321dd021b7017fd40270"}, + {file = "simplejson-3.19.3-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:72e8abbc86fcac83629a030888b45fed3a404d54161118be52cb491cd6975d3e"}, + {file = "simplejson-3.19.3-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f8efb03ca77bd7725dfacc9254df00d73e6f43013cf39bd37ef1a8ed0ebb5165"}, + {file = "simplejson-3.19.3-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:add8850db04b98507a8b62d248a326ecc8561e6d24336d1ca5c605bbfaab4cad"}, + {file = "simplejson-3.19.3-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:fc3dc9fb413fc34c396f52f4c87de18d0bd5023804afa8ab5cc224deeb6a9900"}, + {file = "simplejson-3.19.3-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:4dfa420bb9225dd33b6efdabde7c6a671b51150b9b1d9c4e5cd74d3b420b3fe1"}, + {file = "simplejson-3.19.3-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:7b5c472099b39b274dcde27f1113db8d818c9aa3ba8f78cbb8ad04a4c1ac2118"}, + {file = "simplejson-3.19.3-cp310-cp310-win32.whl", hash = "sha256:817abad79241ed4a507b3caf4d3f2be5079f39d35d4c550a061988986bffd2ec"}, + {file = "simplejson-3.19.3-cp310-cp310-win_amd64.whl", hash = "sha256:dd5b9b1783e14803e362a558680d88939e830db2466f3fa22df5c9319f8eea94"}, + {file = "simplejson-3.19.3-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:e88abff510dcff903a18d11c2a75f9964e768d99c8d147839913886144b2065e"}, + {file = "simplejson-3.19.3-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:934a50a614fb831614db5dbfba35127ee277624dda4d15895c957d2f5d48610c"}, + {file = "simplejson-3.19.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:212fce86a22188b0c7f53533b0f693ea9605c1a0f02c84c475a30616f55a744d"}, + {file = "simplejson-3.19.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5d9e8f836688a8fabe6a6b41b334aa550a6823f7b4ac3d3712fc0ad8655be9a8"}, + {file = "simplejson-3.19.3-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:23228037dc5d41c36666384062904d74409a62f52283d9858fa12f4c22cffad1"}, + {file = "simplejson-3.19.3-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0791f64fed7d4abad639491f8a6b1ba56d3c604eb94b50f8697359b92d983f36"}, + {file = "simplejson-3.19.3-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c4f614581b61a26fbbba232a1391f6cee82bc26f2abbb6a0b44a9bba25c56a1c"}, + {file = "simplejson-3.19.3-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:1df0aaf1cb787fdf34484ed4a1f0c545efd8811f6028623290fef1a53694e597"}, + {file = "simplejson-3.19.3-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:951095be8d4451a7182403354c22ec2de3e513e0cc40408b689af08d02611588"}, + {file = "simplejson-3.19.3-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:2a954b30810988feeabde843e3263bf187697e0eb5037396276db3612434049b"}, + {file = "simplejson-3.19.3-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:c40df31a75de98db2cdfead6074d4449cd009e79f54c1ebe5e5f1f153c68ad20"}, + {file = "simplejson-3.19.3-cp311-cp311-win32.whl", hash = "sha256:7e2a098c21ad8924076a12b6c178965d88a0ad75d1de67e1afa0a66878f277a5"}, + {file = "simplejson-3.19.3-cp311-cp311-win_amd64.whl", hash = "sha256:c9bedebdc5fdad48af8783022bae307746d54006b783007d1d3c38e10872a2c6"}, + {file = "simplejson-3.19.3-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:66a0399e21c2112acacfebf3d832ebe2884f823b1c7e6d1363f2944f1db31a99"}, + {file = "simplejson-3.19.3-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:6ef9383c5e05f445be60f1735c1816163c874c0b1ede8bb4390aff2ced34f333"}, + {file = "simplejson-3.19.3-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:42e5acf80d4d971238d4df97811286a044d720693092b20a56d5e56b7dcc5d09"}, + {file = "simplejson-3.19.3-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d0b0efc7279d768db7c74d3d07f0b5c81280d16ae3fb14e9081dc903e8360771"}, + {file = "simplejson-3.19.3-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:0552eb06e7234da892e1d02365cd2b7b2b1f8233aa5aabdb2981587b7cc92ea0"}, + {file = "simplejson-3.19.3-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:5bf6a3b9a7d7191471b464fe38f684df10eb491ec9ea454003edb45a011ab187"}, + {file = "simplejson-3.19.3-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7017329ca8d4dca94ad5e59f496e5fc77630aecfc39df381ffc1d37fb6b25832"}, + {file = "simplejson-3.19.3-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:67a20641afebf4cfbcff50061f07daad1eace6e7b31d7622b6fa2c40d43900ba"}, + {file = "simplejson-3.19.3-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:dd6a7dabcc4c32daf601bc45e01b79175dde4b52548becea4f9545b0a4428169"}, + {file = "simplejson-3.19.3-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:08f9b443a94e72dd02c87098c96886d35790e79e46b24e67accafbf13b73d43b"}, + {file = "simplejson-3.19.3-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:fa97278ae6614346b5ca41a45a911f37a3261b57dbe4a00602048652c862c28b"}, + {file = "simplejson-3.19.3-cp312-cp312-win32.whl", hash = "sha256:ef28c3b328d29b5e2756903aed888960bc5df39b4c2eab157ae212f70ed5bf74"}, + {file = "simplejson-3.19.3-cp312-cp312-win_amd64.whl", hash = "sha256:1e662336db50ad665777e6548b5076329a94a0c3d4a0472971c588b3ef27de3a"}, + {file = "simplejson-3.19.3-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:0959e6cb62e3994b5a40e31047ff97ef5c4138875fae31659bead691bed55896"}, + {file = "simplejson-3.19.3-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:7a7bfad839c624e139a4863007233a3f194e7c51551081f9789cba52e4da5167"}, + {file = "simplejson-3.19.3-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:afab2f7f2486a866ff04d6d905e9386ca6a231379181a3838abce1f32fbdcc37"}, + {file = "simplejson-3.19.3-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d00313681015ac498e1736b304446ee6d1c72c5b287cd196996dad84369998f7"}, + {file = "simplejson-3.19.3-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d936ae682d5b878af9d9eb4d8bb1fdd5e41275c8eb59ceddb0aeed857bb264a2"}, + {file = "simplejson-3.19.3-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:01c6657485393f2e9b8177c77a7634f13ebe70d5e6de150aae1677d91516ce6b"}, + {file = "simplejson-3.19.3-cp313-cp313-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2a6a750d3c7461b1c47cfc6bba8d9e57a455e7c5f80057d2a82f738040dd1129"}, + {file = "simplejson-3.19.3-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:ea7a4a998c87c5674a27089e022110a1a08a7753f21af3baf09efe9915c23c3c"}, + {file = "simplejson-3.19.3-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:6300680d83a399be2b8f3b0ef7ef90b35d2a29fe6e9c21438097e0938bbc1564"}, + {file = "simplejson-3.19.3-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:ab69f811a660c362651ae395eba8ce84f84c944cea0df5718ea0ba9d1e4e7252"}, + {file = "simplejson-3.19.3-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:256e09d0f94d9c3d177d9e95fd27a68c875a4baa2046633df387b86b652f5747"}, + {file = "simplejson-3.19.3-cp313-cp313-win32.whl", hash = "sha256:2c78293470313aefa9cfc5e3f75ca0635721fb016fb1121c1c5b0cb8cc74712a"}, + {file = "simplejson-3.19.3-cp313-cp313-win_amd64.whl", hash = "sha256:3bbcdc438dc1683b35f7a8dc100960c721f922f9ede8127f63bed7dfded4c64c"}, + {file = "simplejson-3.19.3-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:89b35433186e977fa86ff1fd179c1fadff39cfa3afa1648dab0b6ca53153acd9"}, + {file = "simplejson-3.19.3-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d43c2d7504eda566c50203cdc9dc043aff6f55f1b7dae0dcd79dfefef9159d1c"}, + {file = "simplejson-3.19.3-cp36-cp36m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:6890ff9cf0bd2e1d487e2a8869ebd620a44684c0a9667fa5ee751d099d5d84c8"}, + {file = "simplejson-3.19.3-cp36-cp36m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1069143a8fb3905e1bc0696c62be7e3adf812e9f1976ac9ae15b05112ff57cc9"}, + {file = "simplejson-3.19.3-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cb324bb903330cbb35d87cce367a12631cd5720afa06e5b9c906483970946da6"}, + {file = "simplejson-3.19.3-cp36-cp36m-musllinux_1_2_aarch64.whl", hash = "sha256:0a32859d45d7b85fb803bb68f6bee14526991a1190269116c33399fa0daf9bbf"}, + {file = "simplejson-3.19.3-cp36-cp36m-musllinux_1_2_i686.whl", hash = "sha256:23833ee7e791ec968b744dfee2a2d39df7152050051096caf4296506d75608d8"}, + {file = "simplejson-3.19.3-cp36-cp36m-musllinux_1_2_ppc64le.whl", hash = "sha256:d73efb03c5b39249c82488a994f0998f9e4399e3d085209d2120503305ba77a8"}, + {file = "simplejson-3.19.3-cp36-cp36m-musllinux_1_2_x86_64.whl", hash = "sha256:7923878b7a0142d39763ec2dbecff3053c1bedd3653585a8474666e420fe83f5"}, + {file = "simplejson-3.19.3-cp36-cp36m-win32.whl", hash = "sha256:7355c7203353c36d46c4e7b6055293b3d2be097bbc5e2874a2b8a7259f0325dd"}, + {file = "simplejson-3.19.3-cp36-cp36m-win_amd64.whl", hash = "sha256:d1b8b4d6379fe55f471914345fe6171d81a18649dacf3248abfc9c349b4442eb"}, + {file = "simplejson-3.19.3-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:d36608557b4dcd7a62c29ad4cd7c5a1720bbf7dc942eff9dc42d2c542a5f042d"}, + {file = "simplejson-3.19.3-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7137e69c6781ecf23afab064be94a277236c9cba31aa48ff1a0ec3995c69171e"}, + {file = "simplejson-3.19.3-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:76f8c28fe2d426182405b18ddf3001fce47835a557dc15c3d8bdea01c03361da"}, + {file = "simplejson-3.19.3-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ff7bc1bbdaa3e487c9469128bf39408e91f5573901cb852e03af378d3582c52d"}, + {file = "simplejson-3.19.3-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a0782cb9bf827f0c488b6aa0f2819f618308a3caf2973cfd792e45d631bec4db"}, + {file = "simplejson-3.19.3-cp37-cp37m-musllinux_1_2_aarch64.whl", hash = "sha256:6fea0716c593dabb4392c4996d4e902a83b2428e6da82938cf28a523a11eb277"}, + {file = "simplejson-3.19.3-cp37-cp37m-musllinux_1_2_i686.whl", hash = "sha256:8f41bb5370b34f63171e65fdb00e12be1d83675cecb23e627df26f4c88dfc021"}, + {file = "simplejson-3.19.3-cp37-cp37m-musllinux_1_2_ppc64le.whl", hash = "sha256:37105d1d708365b91165e1a6e505bdecc88637091348cf4b6adcdcb4f5a5fb8b"}, + {file = "simplejson-3.19.3-cp37-cp37m-musllinux_1_2_x86_64.whl", hash = "sha256:b9198c1f1f8910a3b86b60f4fe2556d9d28d3fefe35bffe6be509a27402e694d"}, + {file = "simplejson-3.19.3-cp37-cp37m-win32.whl", hash = "sha256:bc164f32dd9691e7082ce5df24b4cf8c6c394bbf9bdeeb5d843127cd07ab8ad2"}, + {file = "simplejson-3.19.3-cp37-cp37m-win_amd64.whl", hash = "sha256:1bd41f2cb1a2c57656ceff67b12d005cb255c728265e222027ad73193a04005a"}, + {file = "simplejson-3.19.3-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:0733ecd95ae03ae718ec74aad818f5af5f3155d596f7b242acbc1621e765e5fb"}, + {file = "simplejson-3.19.3-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:4a0710d1a5e41c4f829caa1572793dd3130c8d65c2b194c24ff29c4c305c26e0"}, + {file = "simplejson-3.19.3-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:1a53a07320c5ff574d8b1a89c937ce33608832f166f39dff0581ac43dc979abd"}, + {file = "simplejson-3.19.3-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1773cabfba66a6337b547e45dafbd471b09487370bcab75bd28f626520410d29"}, + {file = "simplejson-3.19.3-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:7c0104b4b7d2c75ccedbf1d9d5a3bd2daa75e51053935a44ba012e2fd4c43752"}, + {file = "simplejson-3.19.3-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1c49eeb94b8f09dc8a5843c156a22b8bde6aa1ddc65ca8ddc62dddcc001e6a2d"}, + {file = "simplejson-3.19.3-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3dc5c1a85ff388e98ea877042daec3d157b6db0d85bac6ba5498034689793e7e"}, + {file = "simplejson-3.19.3-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:49549e3d81ab4a58424405aa545602674d8c35c20e986b42bb8668e782a94bac"}, + {file = "simplejson-3.19.3-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:e1a1452ad5723ff129b081e3c8aa4ba56b8734fee4223355ed7b815a7ece69bc"}, + {file = "simplejson-3.19.3-cp38-cp38-musllinux_1_2_ppc64le.whl", hash = "sha256:d0d5a63f1768fed7e78cf55712dee81f5a345e34d34224f3507ebf71df2b754d"}, + {file = "simplejson-3.19.3-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:7e062767ac165df9a46963f5735aa4eee0089ec1e48b3f2ec46182754b96f55e"}, + {file = "simplejson-3.19.3-cp38-cp38-win32.whl", hash = "sha256:56134bbafe458a7b21f6fddbf889d36bec6d903718f4430768e3af822f8e27c2"}, + {file = "simplejson-3.19.3-cp38-cp38-win_amd64.whl", hash = "sha256:bcde83a553a96dc7533736c547bddaa35414a2566ab0ecf7d3964fc4bdb84c11"}, + {file = "simplejson-3.19.3-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:b5587feda2b65a79da985ae6d116daf6428bf7489992badc29fc96d16cd27b05"}, + {file = "simplejson-3.19.3-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:e0d2b00ecbcd1a3c5ea1abc8bb99a26508f758c1759fd01c3be482a3655a176f"}, + {file = "simplejson-3.19.3-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:32a3ada8f3ea41db35e6d37b86dade03760f804628ec22e4fe775b703d567426"}, + {file = "simplejson-3.19.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6f455672f4738b0f47183c5896e3606cd65c9ddee3805a4d18e8c96aa3f47c84"}, + {file = "simplejson-3.19.3-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:2b737a5fefedb8333fa50b8db3dcc9b1d18fd6c598f89fa7debff8b46bf4e511"}, + {file = "simplejson-3.19.3-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:eb47ee773ce67476a960e2db4a0a906680c54f662521550828c0cc57d0099426"}, + {file = "simplejson-3.19.3-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:eed8cd98a7b24861da9d3d937f5fbfb6657350c547528a117297fe49e3960667"}, + {file = "simplejson-3.19.3-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:619756f1dd634b5bdf57d9a3914300526c3b348188a765e45b8b08eabef0c94e"}, + {file = "simplejson-3.19.3-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:dd7230d061e755d60a4d5445bae854afe33444cdb182f3815cff26ac9fb29a15"}, + {file = "simplejson-3.19.3-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:101a3c8392028cd704a93c7cba8926594e775ca3c91e0bee82144e34190903f1"}, + {file = "simplejson-3.19.3-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:1e557712fc79f251673aeb3fad3501d7d4da3a27eff0857af2e1d1afbbcf6685"}, + {file = "simplejson-3.19.3-cp39-cp39-win32.whl", hash = "sha256:0bc5544e3128891bf613b9f71813ee2ec9c11574806f74dd8bb84e5e95bf64a2"}, + {file = "simplejson-3.19.3-cp39-cp39-win_amd64.whl", hash = "sha256:06662392e4913dc8846d6a71a6d5de86db5fba244831abe1dd741d62a4136764"}, + {file = "simplejson-3.19.3-py3-none-any.whl", hash = "sha256:49cc4c7b940d43bd12bf87ec63f28cbc4964fc4e12c031cc8cd01650f43eb94e"}, + {file = "simplejson-3.19.3.tar.gz", hash = "sha256:8e086896c36210ab6050f2f9f095a5f1e03c83fa0e7f296d6cba425411364680"}, ] [[package]] @@ -5604,37 +5745,37 @@ files = [ [[package]] name = "snowflake-connector-python" -version = "3.11.0" +version = "3.12.0" description = "Snowflake Connector for Python" optional = true python-versions = ">=3.8" files = [ - {file = "snowflake_connector_python-3.11.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:0be9e2e35c7cf98df4ee454f1a00b7d1ff541ce46582d9b9ec51928e1583683c"}, - {file = "snowflake_connector_python-3.11.0-cp310-cp310-macosx_11_0_x86_64.whl", hash = "sha256:e01aa2f65bd7374a11a7d74c30d4a78938bbf60db512fc170bd25fc1b385566b"}, - {file = "snowflake_connector_python-3.11.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5a284c275929f81c5b53160c0d0ee447ee20b63af0493c87f3dd39faf3178f59"}, - {file = "snowflake_connector_python-3.11.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:686a50bf1e7d2cf89db7319a29d08816ea57039fcf05ca3f3bf3f92dc25bed40"}, - {file = "snowflake_connector_python-3.11.0-cp310-cp310-win_amd64.whl", hash = "sha256:eae346b87906db2095f1c830ba105b529a211ecd0c0b1e43d8775fc49e7e476c"}, - {file = "snowflake_connector_python-3.11.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:415992f074b51712770c3dbd7a6b5a95b5dd04ffe02fc51ac8446e193771436d"}, - {file = "snowflake_connector_python-3.11.0-cp311-cp311-macosx_11_0_x86_64.whl", hash = "sha256:e55eca3ff74fb33ea21455369e171ad61ef31eb916cbbbdab7ccb90cb98ad8d0"}, - {file = "snowflake_connector_python-3.11.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:aa48b1f2a124098745a33ee93e34d85a3dfb60fa3d2d7ec5efee4aa17bb05053"}, - {file = "snowflake_connector_python-3.11.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:96b21a062fc7aacb49202c8502239c0728319a96834a9fca1b6666a51e515dcc"}, - {file = "snowflake_connector_python-3.11.0-cp311-cp311-win_amd64.whl", hash = "sha256:ae890352e9e09e2084fd13647a664a31343bfa58d9aa41770e9ec3b810f9bc2c"}, - {file = "snowflake_connector_python-3.11.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:e8f5c376b2368082819126f566551e451d51c95cc2febac45377026d44a401b0"}, - {file = "snowflake_connector_python-3.11.0-cp312-cp312-macosx_11_0_x86_64.whl", hash = "sha256:56c29839cbdf4778b997a96dacb849c3b374b7818c60eefe87b67debc9672f59"}, - {file = "snowflake_connector_python-3.11.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c203a035e417a386d7b09e977a198588471a768336f781b0561d09ed0f495edc"}, - {file = "snowflake_connector_python-3.11.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e906f517d2e79cd19c04dddf3bba13a072755503516f6bcd55ae1122b6df7fdb"}, - {file = "snowflake_connector_python-3.11.0-cp312-cp312-win_amd64.whl", hash = "sha256:3e5489b814f311425e3966c0434f686131e48886eb7e0a8606631695f3c4bd48"}, - {file = "snowflake_connector_python-3.11.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:507c70ccd87c37a813c6aab27fe97007fb96c5372a5a33cc4b9137acb0d921e1"}, - {file = "snowflake_connector_python-3.11.0-cp38-cp38-macosx_11_0_x86_64.whl", hash = "sha256:f238a3cb4522d2943861f38cb0c9650c08c02e45a38d4eefa27f22ad95748fb4"}, - {file = "snowflake_connector_python-3.11.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f797eb2552950cf04fe07304da714f1d473f7a0c1548cfbce5614c4b0a66e441"}, - {file = "snowflake_connector_python-3.11.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3a525c60fd5922098eab2425bc3f63bb3df0f07dd54e02580505a6208f908d32"}, - {file = "snowflake_connector_python-3.11.0-cp38-cp38-win_amd64.whl", hash = "sha256:8b61dbd3581c043b338c99caff900a9cce187c83333bafdf1d57c8c126366b4a"}, - {file = "snowflake_connector_python-3.11.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:13170b419a6c2b98e23c89a459e2576955e0bae4fd267e9f44fffad642aa3ecc"}, - {file = "snowflake_connector_python-3.11.0-cp39-cp39-macosx_11_0_x86_64.whl", hash = "sha256:18bc6fd9fd544d540b06d9e97e754d0053b5cb7e5d9266586b3df8f243ef97bc"}, - {file = "snowflake_connector_python-3.11.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cd4120451e33a32fb8fa018c9cd3e56c370ab0702ffe93b4e68acdae92524c3c"}, - {file = "snowflake_connector_python-3.11.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f26ed3b5537864ee9d72f313b18b80b76136b7838774ea5bc2b4f5e1df8e9b90"}, - {file = "snowflake_connector_python-3.11.0-cp39-cp39-win_amd64.whl", hash = "sha256:fc6afb35826e958949edb2d464e0d35ec46ef41b65546a311a333f4d0a7d07a6"}, - {file = "snowflake_connector_python-3.11.0.tar.gz", hash = "sha256:3169c014a03e5f5855112605e393897a552e558953c69f25a02e33b1998864d0"}, + {file = "snowflake_connector_python-3.12.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:edf28df8be24845cfcec653b160d2b8c048d5cb0c85b051f4957f0b0aae1e493"}, + {file = "snowflake_connector_python-3.12.0-cp310-cp310-macosx_11_0_x86_64.whl", hash = "sha256:c2bbdbbb028d7d542815ed68b28200728aa6707b9354e3a447fdc8c7a34bcdce"}, + {file = "snowflake_connector_python-3.12.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a92c9a19a23033df709e63baa6ccdf6eff65210143a8c9c67a0a24bba862034b"}, + {file = "snowflake_connector_python-3.12.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8d33d845e4c68d33e73a9f64100b53342c18607ac25c4f2a27dbed2078078d12"}, + {file = "snowflake_connector_python-3.12.0-cp310-cp310-win_amd64.whl", hash = "sha256:c1d43bfaa885aab712f14f9ced232abe5023adfca7fbf7a7a0768a162523e9d6"}, + {file = "snowflake_connector_python-3.12.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:6a0cc03fb44808f3ddc464ee272f141564c8daea14475e1df5c2a54c7acb2ddf"}, + {file = "snowflake_connector_python-3.12.0-cp311-cp311-macosx_11_0_x86_64.whl", hash = "sha256:564752d22accc43351b50f676b03aa9f2b441be2641e3cf9a7790faf54eff210"}, + {file = "snowflake_connector_python-3.12.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f27d6a1a180832c7b551d38df1094a70fb79917f90c57893b9ce7e219362f6c1"}, + {file = "snowflake_connector_python-3.12.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:60675fd83022daef40541d717d006695149c512b283e35741b61a4f48ba537e9"}, + {file = "snowflake_connector_python-3.12.0-cp311-cp311-win_amd64.whl", hash = "sha256:a567b937b0179d1e95a8ad7200943d286f38d0e76df90af10f747ed9149dd681"}, + {file = "snowflake_connector_python-3.12.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:dc333fcfc383a8cab8bd7e890a7c76703e26598925a05954c75d2c50bff06071"}, + {file = "snowflake_connector_python-3.12.0-cp312-cp312-macosx_11_0_x86_64.whl", hash = "sha256:3c06bfba4a329fd4ec3feba0ada7b31f86ed4e156a9766bced52c2814d001fd2"}, + {file = "snowflake_connector_python-3.12.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:acf84b07dd2f22adfaa7d52ccd6be1722bd5a0e2b1a9b08681c3851bea05768f"}, + {file = "snowflake_connector_python-3.12.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:019b8a61e5af689451d502df2af8793fc6f20b5b0a3548fd8ad03aa8b62e7f2d"}, + {file = "snowflake_connector_python-3.12.0-cp312-cp312-win_amd64.whl", hash = "sha256:45f9b9678694f10571c1f7ec7d0d741663ad0ff61a71ae53aa71be47faa19978"}, + {file = "snowflake_connector_python-3.12.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:21cbaef51fbed719de01155079df3d004cee963d3723c1ebdb8980923f893e04"}, + {file = "snowflake_connector_python-3.12.0-cp38-cp38-macosx_11_0_x86_64.whl", hash = "sha256:c86d4a7d49f42ea0bb34218cb49c401ba995892abcfb509ea749cd0a74a8b28a"}, + {file = "snowflake_connector_python-3.12.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1aa34aec0f96d7fc7271e38c68ee0d58529875d05e084afb4fc8f09b694643c4"}, + {file = "snowflake_connector_python-3.12.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c2f621030b26a220711c64518e00059736b79c1da53afa6a8ce68b31c1941014"}, + {file = "snowflake_connector_python-3.12.0-cp38-cp38-win_amd64.whl", hash = "sha256:368e46f1d079056e028bfe8f7171fabef62eb00bcf590df294220b7a5be5d56c"}, + {file = "snowflake_connector_python-3.12.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:2735e16fffded0900f7484030613b79699afc1ed4e5cff086bd139a0ce965594"}, + {file = "snowflake_connector_python-3.12.0-cp39-cp39-macosx_11_0_x86_64.whl", hash = "sha256:c06a8e2e12284b4a4d462d0073fb4983e90ad2d6a2382926f9e3409f06c81d0b"}, + {file = "snowflake_connector_python-3.12.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:880e6e95171cd7374a86da14132fdfc4b622665f134561f4d43e3f35bdacf67d"}, + {file = "snowflake_connector_python-3.12.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6e245b84c164433454ce49d78e6bcf5c2e62e25657358bf34ab533166e588f80"}, + {file = "snowflake_connector_python-3.12.0-cp39-cp39-win_amd64.whl", hash = "sha256:85a5565b8813d164f33f32a825a70443008fe009aae050307f128a1ca892f9ed"}, + {file = "snowflake_connector_python-3.12.0.tar.gz", hash = "sha256:320e0b6f8cd8556e19c8b87249c931700238b2958313afc7a33108d67da87d82"}, ] [package.dependencies] @@ -5659,7 +5800,7 @@ urllib3 = {version = ">=1.21.1,<2.0.0", markers = "python_version < \"3.10\""} [package.extras] development = ["Cython", "coverage", "more-itertools", "numpy (<1.27.0)", "pendulum (!=2.1.1)", "pexpect", "pytest (<7.5.0)", "pytest-cov", "pytest-rerunfailures", "pytest-timeout", "pytest-xdist", "pytzdata"] pandas = ["pandas (>=1.0.0,<3.0.0)", "pyarrow"] -secure-local-storage = ["keyring (>=23.1.0,<25.0.0)"] +secure-local-storage = ["keyring (>=23.1.0,<26.0.0)"] [[package]] name = "snowflake-sqlalchemy" @@ -5693,71 +5834,71 @@ files = [ [[package]] name = "soupsieve" -version = "2.5" +version = "2.6" description = "A modern CSS selector implementation for Beautiful Soup." optional = true python-versions = ">=3.8" files = [ - {file = "soupsieve-2.5-py3-none-any.whl", hash = "sha256:eaa337ff55a1579b6549dc679565eac1e3d000563bcb1c8ab0d0fefbc0c2cdc7"}, - {file = "soupsieve-2.5.tar.gz", hash = "sha256:5663d5a7b3bfaeee0bc4372e7fc48f9cff4940b3eec54a6451cc5299f1097690"}, + {file = "soupsieve-2.6-py3-none-any.whl", hash = "sha256:e72c4ff06e4fb6e4b5a9f0f55fe6e81514581fca1515028625d0f299c602ccc9"}, + {file = "soupsieve-2.6.tar.gz", hash = "sha256:e2e68417777af359ec65daac1057404a3c8a5455bb8abc36f1a9866ab1a51abb"}, ] [[package]] name = "sqlalchemy" -version = "2.0.31" +version = "2.0.32" description = "Database Abstraction Library" optional = true python-versions = ">=3.7" files = [ - {file = "SQLAlchemy-2.0.31-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:f2a213c1b699d3f5768a7272de720387ae0122f1becf0901ed6eaa1abd1baf6c"}, - {file = "SQLAlchemy-2.0.31-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:9fea3d0884e82d1e33226935dac990b967bef21315cbcc894605db3441347443"}, - {file = "SQLAlchemy-2.0.31-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f3ad7f221d8a69d32d197e5968d798217a4feebe30144986af71ada8c548e9fa"}, - {file = "SQLAlchemy-2.0.31-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9f2bee229715b6366f86a95d497c347c22ddffa2c7c96143b59a2aa5cc9eebbc"}, - {file = "SQLAlchemy-2.0.31-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:cd5b94d4819c0c89280b7c6109c7b788a576084bf0a480ae17c227b0bc41e109"}, - {file = "SQLAlchemy-2.0.31-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:750900a471d39a7eeba57580b11983030517a1f512c2cb287d5ad0fcf3aebd58"}, - {file = "SQLAlchemy-2.0.31-cp310-cp310-win32.whl", hash = "sha256:7bd112be780928c7f493c1a192cd8c5fc2a2a7b52b790bc5a84203fb4381c6be"}, - {file = "SQLAlchemy-2.0.31-cp310-cp310-win_amd64.whl", hash = "sha256:5a48ac4d359f058474fadc2115f78a5cdac9988d4f99eae44917f36aa1476327"}, - {file = "SQLAlchemy-2.0.31-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:f68470edd70c3ac3b6cd5c2a22a8daf18415203ca1b036aaeb9b0fb6f54e8298"}, - {file = "SQLAlchemy-2.0.31-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:2e2c38c2a4c5c634fe6c3c58a789712719fa1bf9b9d6ff5ebfce9a9e5b89c1ca"}, - {file = "SQLAlchemy-2.0.31-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bd15026f77420eb2b324dcb93551ad9c5f22fab2c150c286ef1dc1160f110203"}, - {file = "SQLAlchemy-2.0.31-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2196208432deebdfe3b22185d46b08f00ac9d7b01284e168c212919891289396"}, - {file = "SQLAlchemy-2.0.31-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:352b2770097f41bff6029b280c0e03b217c2dcaddc40726f8f53ed58d8a85da4"}, - {file = "SQLAlchemy-2.0.31-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:56d51ae825d20d604583f82c9527d285e9e6d14f9a5516463d9705dab20c3740"}, - {file = "SQLAlchemy-2.0.31-cp311-cp311-win32.whl", hash = "sha256:6e2622844551945db81c26a02f27d94145b561f9d4b0c39ce7bfd2fda5776dac"}, - {file = "SQLAlchemy-2.0.31-cp311-cp311-win_amd64.whl", hash = "sha256:ccaf1b0c90435b6e430f5dd30a5aede4764942a695552eb3a4ab74ed63c5b8d3"}, - {file = "SQLAlchemy-2.0.31-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:3b74570d99126992d4b0f91fb87c586a574a5872651185de8297c6f90055ae42"}, - {file = "SQLAlchemy-2.0.31-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:6f77c4f042ad493cb8595e2f503c7a4fe44cd7bd59c7582fd6d78d7e7b8ec52c"}, - {file = "SQLAlchemy-2.0.31-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cd1591329333daf94467e699e11015d9c944f44c94d2091f4ac493ced0119449"}, - {file = "SQLAlchemy-2.0.31-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:74afabeeff415e35525bf7a4ecdab015f00e06456166a2eba7590e49f8db940e"}, - {file = "SQLAlchemy-2.0.31-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:b9c01990d9015df2c6f818aa8f4297d42ee71c9502026bb074e713d496e26b67"}, - {file = "SQLAlchemy-2.0.31-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:66f63278db425838b3c2b1c596654b31939427016ba030e951b292e32b99553e"}, - {file = "SQLAlchemy-2.0.31-cp312-cp312-win32.whl", hash = "sha256:0b0f658414ee4e4b8cbcd4a9bb0fd743c5eeb81fc858ca517217a8013d282c96"}, - {file = "SQLAlchemy-2.0.31-cp312-cp312-win_amd64.whl", hash = "sha256:fa4b1af3e619b5b0b435e333f3967612db06351217c58bfb50cee5f003db2a5a"}, - {file = "SQLAlchemy-2.0.31-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:f43e93057cf52a227eda401251c72b6fbe4756f35fa6bfebb5d73b86881e59b0"}, - {file = "SQLAlchemy-2.0.31-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d337bf94052856d1b330d5fcad44582a30c532a2463776e1651bd3294ee7e58b"}, - {file = "SQLAlchemy-2.0.31-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c06fb43a51ccdff3b4006aafee9fcf15f63f23c580675f7734245ceb6b6a9e05"}, - {file = "SQLAlchemy-2.0.31-cp37-cp37m-musllinux_1_2_aarch64.whl", hash = "sha256:b6e22630e89f0e8c12332b2b4c282cb01cf4da0d26795b7eae16702a608e7ca1"}, - {file = "SQLAlchemy-2.0.31-cp37-cp37m-musllinux_1_2_x86_64.whl", hash = "sha256:79a40771363c5e9f3a77f0e28b3302801db08040928146e6808b5b7a40749c88"}, - {file = "SQLAlchemy-2.0.31-cp37-cp37m-win32.whl", hash = "sha256:501ff052229cb79dd4c49c402f6cb03b5a40ae4771efc8bb2bfac9f6c3d3508f"}, - {file = "SQLAlchemy-2.0.31-cp37-cp37m-win_amd64.whl", hash = "sha256:597fec37c382a5442ffd471f66ce12d07d91b281fd474289356b1a0041bdf31d"}, - {file = "SQLAlchemy-2.0.31-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:dc6d69f8829712a4fd799d2ac8d79bdeff651c2301b081fd5d3fe697bd5b4ab9"}, - {file = "SQLAlchemy-2.0.31-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:23b9fbb2f5dd9e630db70fbe47d963c7779e9c81830869bd7d137c2dc1ad05fb"}, - {file = "SQLAlchemy-2.0.31-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2a21c97efcbb9f255d5c12a96ae14da873233597dfd00a3a0c4ce5b3e5e79704"}, - {file = "SQLAlchemy-2.0.31-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:26a6a9837589c42b16693cf7bf836f5d42218f44d198f9343dd71d3164ceeeac"}, - {file = "SQLAlchemy-2.0.31-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:dc251477eae03c20fae8db9c1c23ea2ebc47331bcd73927cdcaecd02af98d3c3"}, - {file = "SQLAlchemy-2.0.31-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:2fd17e3bb8058359fa61248c52c7b09a97cf3c820e54207a50af529876451808"}, - {file = "SQLAlchemy-2.0.31-cp38-cp38-win32.whl", hash = "sha256:c76c81c52e1e08f12f4b6a07af2b96b9b15ea67ccdd40ae17019f1c373faa227"}, - {file = "SQLAlchemy-2.0.31-cp38-cp38-win_amd64.whl", hash = "sha256:4b600e9a212ed59355813becbcf282cfda5c93678e15c25a0ef896b354423238"}, - {file = "SQLAlchemy-2.0.31-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:5b6cf796d9fcc9b37011d3f9936189b3c8074a02a4ed0c0fbbc126772c31a6d4"}, - {file = "SQLAlchemy-2.0.31-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:78fe11dbe37d92667c2c6e74379f75746dc947ee505555a0197cfba9a6d4f1a4"}, - {file = "SQLAlchemy-2.0.31-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2fc47dc6185a83c8100b37acda27658fe4dbd33b7d5e7324111f6521008ab4fe"}, - {file = "SQLAlchemy-2.0.31-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8a41514c1a779e2aa9a19f67aaadeb5cbddf0b2b508843fcd7bafdf4c6864005"}, - {file = "SQLAlchemy-2.0.31-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:afb6dde6c11ea4525318e279cd93c8734b795ac8bb5dda0eedd9ebaca7fa23f1"}, - {file = "SQLAlchemy-2.0.31-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:3f9faef422cfbb8fd53716cd14ba95e2ef655400235c3dfad1b5f467ba179c8c"}, - {file = "SQLAlchemy-2.0.31-cp39-cp39-win32.whl", hash = "sha256:fc6b14e8602f59c6ba893980bea96571dd0ed83d8ebb9c4479d9ed5425d562e9"}, - {file = "SQLAlchemy-2.0.31-cp39-cp39-win_amd64.whl", hash = "sha256:3cb8a66b167b033ec72c3812ffc8441d4e9f5f78f5e31e54dcd4c90a4ca5bebc"}, - {file = "SQLAlchemy-2.0.31-py3-none-any.whl", hash = "sha256:69f3e3c08867a8e4856e92d7afb618b95cdee18e0bc1647b77599722c9a28911"}, - {file = "SQLAlchemy-2.0.31.tar.gz", hash = "sha256:b607489dd4a54de56984a0c7656247504bd5523d9d0ba799aef59d4add009484"}, + {file = "SQLAlchemy-2.0.32-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:0c9045ecc2e4db59bfc97b20516dfdf8e41d910ac6fb667ebd3a79ea54084619"}, + {file = "SQLAlchemy-2.0.32-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:1467940318e4a860afd546ef61fefb98a14d935cd6817ed07a228c7f7c62f389"}, + {file = "SQLAlchemy-2.0.32-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5954463675cb15db8d4b521f3566a017c8789222b8316b1e6934c811018ee08b"}, + {file = "SQLAlchemy-2.0.32-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:167e7497035c303ae50651b351c28dc22a40bb98fbdb8468cdc971821b1ae533"}, + {file = "SQLAlchemy-2.0.32-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:b27dfb676ac02529fb6e343b3a482303f16e6bc3a4d868b73935b8792edb52d0"}, + {file = "SQLAlchemy-2.0.32-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:bf2360a5e0f7bd75fa80431bf8ebcfb920c9f885e7956c7efde89031695cafb8"}, + {file = "SQLAlchemy-2.0.32-cp310-cp310-win32.whl", hash = "sha256:306fe44e754a91cd9d600a6b070c1f2fadbb4a1a257b8781ccf33c7067fd3e4d"}, + {file = "SQLAlchemy-2.0.32-cp310-cp310-win_amd64.whl", hash = "sha256:99db65e6f3ab42e06c318f15c98f59a436f1c78179e6a6f40f529c8cc7100b22"}, + {file = "SQLAlchemy-2.0.32-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:21b053be28a8a414f2ddd401f1be8361e41032d2ef5884b2f31d31cb723e559f"}, + {file = "SQLAlchemy-2.0.32-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:b178e875a7a25b5938b53b006598ee7645172fccafe1c291a706e93f48499ff5"}, + {file = "SQLAlchemy-2.0.32-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:723a40ee2cc7ea653645bd4cf024326dea2076673fc9d3d33f20f6c81db83e1d"}, + {file = "SQLAlchemy-2.0.32-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:295ff8689544f7ee7e819529633d058bd458c1fd7f7e3eebd0f9268ebc56c2a0"}, + {file = "SQLAlchemy-2.0.32-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:49496b68cd190a147118af585173ee624114dfb2e0297558c460ad7495f9dfe2"}, + {file = "SQLAlchemy-2.0.32-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:acd9b73c5c15f0ec5ce18128b1fe9157ddd0044abc373e6ecd5ba376a7e5d961"}, + {file = "SQLAlchemy-2.0.32-cp311-cp311-win32.whl", hash = "sha256:9365a3da32dabd3e69e06b972b1ffb0c89668994c7e8e75ce21d3e5e69ddef28"}, + {file = "SQLAlchemy-2.0.32-cp311-cp311-win_amd64.whl", hash = "sha256:8bd63d051f4f313b102a2af1cbc8b80f061bf78f3d5bd0843ff70b5859e27924"}, + {file = "SQLAlchemy-2.0.32-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:6bab3db192a0c35e3c9d1560eb8332463e29e5507dbd822e29a0a3c48c0a8d92"}, + {file = "SQLAlchemy-2.0.32-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:19d98f4f58b13900d8dec4ed09dd09ef292208ee44cc9c2fe01c1f0a2fe440e9"}, + {file = "SQLAlchemy-2.0.32-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3cd33c61513cb1b7371fd40cf221256456d26a56284e7d19d1f0b9f1eb7dd7e8"}, + {file = "SQLAlchemy-2.0.32-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7d6ba0497c1d066dd004e0f02a92426ca2df20fac08728d03f67f6960271feec"}, + {file = "SQLAlchemy-2.0.32-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:2b6be53e4fde0065524f1a0a7929b10e9280987b320716c1509478b712a7688c"}, + {file = "SQLAlchemy-2.0.32-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:916a798f62f410c0b80b63683c8061f5ebe237b0f4ad778739304253353bc1cb"}, + {file = "SQLAlchemy-2.0.32-cp312-cp312-win32.whl", hash = "sha256:31983018b74908ebc6c996a16ad3690301a23befb643093fcfe85efd292e384d"}, + {file = "SQLAlchemy-2.0.32-cp312-cp312-win_amd64.whl", hash = "sha256:4363ed245a6231f2e2957cccdda3c776265a75851f4753c60f3004b90e69bfeb"}, + {file = "SQLAlchemy-2.0.32-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:b8afd5b26570bf41c35c0121801479958b4446751a3971fb9a480c1afd85558e"}, + {file = "SQLAlchemy-2.0.32-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c750987fc876813f27b60d619b987b057eb4896b81117f73bb8d9918c14f1cad"}, + {file = "SQLAlchemy-2.0.32-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ada0102afff4890f651ed91120c1120065663506b760da4e7823913ebd3258be"}, + {file = "SQLAlchemy-2.0.32-cp37-cp37m-musllinux_1_2_aarch64.whl", hash = "sha256:78c03d0f8a5ab4f3034c0e8482cfcc415a3ec6193491cfa1c643ed707d476f16"}, + {file = "SQLAlchemy-2.0.32-cp37-cp37m-musllinux_1_2_x86_64.whl", hash = "sha256:3bd1cae7519283ff525e64645ebd7a3e0283f3c038f461ecc1c7b040a0c932a1"}, + {file = "SQLAlchemy-2.0.32-cp37-cp37m-win32.whl", hash = "sha256:01438ebcdc566d58c93af0171c74ec28efe6a29184b773e378a385e6215389da"}, + {file = "SQLAlchemy-2.0.32-cp37-cp37m-win_amd64.whl", hash = "sha256:4979dc80fbbc9d2ef569e71e0896990bc94df2b9fdbd878290bd129b65ab579c"}, + {file = "SQLAlchemy-2.0.32-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:6c742be912f57586ac43af38b3848f7688863a403dfb220193a882ea60e1ec3a"}, + {file = "SQLAlchemy-2.0.32-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:62e23d0ac103bcf1c5555b6c88c114089587bc64d048fef5bbdb58dfd26f96da"}, + {file = "SQLAlchemy-2.0.32-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:251f0d1108aab8ea7b9aadbd07fb47fb8e3a5838dde34aa95a3349876b5a1f1d"}, + {file = "SQLAlchemy-2.0.32-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0ef18a84e5116340e38eca3e7f9eeaaef62738891422e7c2a0b80feab165905f"}, + {file = "SQLAlchemy-2.0.32-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:3eb6a97a1d39976f360b10ff208c73afb6a4de86dd2a6212ddf65c4a6a2347d5"}, + {file = "SQLAlchemy-2.0.32-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:0c1c9b673d21477cec17ab10bc4decb1322843ba35b481585facd88203754fc5"}, + {file = "SQLAlchemy-2.0.32-cp38-cp38-win32.whl", hash = "sha256:c41a2b9ca80ee555decc605bd3c4520cc6fef9abde8fd66b1cf65126a6922d65"}, + {file = "SQLAlchemy-2.0.32-cp38-cp38-win_amd64.whl", hash = "sha256:8a37e4d265033c897892279e8adf505c8b6b4075f2b40d77afb31f7185cd6ecd"}, + {file = "SQLAlchemy-2.0.32-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:52fec964fba2ef46476312a03ec8c425956b05c20220a1a03703537824b5e8e1"}, + {file = "SQLAlchemy-2.0.32-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:328429aecaba2aee3d71e11f2477c14eec5990fb6d0e884107935f7fb6001632"}, + {file = "SQLAlchemy-2.0.32-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:85a01b5599e790e76ac3fe3aa2f26e1feba56270023d6afd5550ed63c68552b3"}, + {file = "SQLAlchemy-2.0.32-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:aaf04784797dcdf4c0aa952c8d234fa01974c4729db55c45732520ce12dd95b4"}, + {file = "SQLAlchemy-2.0.32-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:4488120becf9b71b3ac718f4138269a6be99a42fe023ec457896ba4f80749525"}, + {file = "SQLAlchemy-2.0.32-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:14e09e083a5796d513918a66f3d6aedbc131e39e80875afe81d98a03312889e6"}, + {file = "SQLAlchemy-2.0.32-cp39-cp39-win32.whl", hash = "sha256:0d322cc9c9b2154ba7e82f7bf25ecc7c36fbe2d82e2933b3642fc095a52cfc78"}, + {file = "SQLAlchemy-2.0.32-cp39-cp39-win_amd64.whl", hash = "sha256:7dd8583df2f98dea28b5cd53a1beac963f4f9d087888d75f22fcc93a07cf8d84"}, + {file = "SQLAlchemy-2.0.32-py3-none-any.whl", hash = "sha256:e567a8793a692451f706b363ccf3c45e056b67d90ead58c3bc9471af5d212202"}, + {file = "SQLAlchemy-2.0.32.tar.gz", hash = "sha256:c1b88cc8b02b6a5f0efb0345a03672d4c897dc7d92585176f88c67346f565ea8"}, ] [package.dependencies] @@ -5801,13 +5942,13 @@ files = [ [[package]] name = "sympy" -version = "1.13.1" +version = "1.13.2" description = "Computer algebra system (CAS) in Python" optional = true python-versions = ">=3.8" files = [ - {file = "sympy-1.13.1-py3-none-any.whl", hash = "sha256:db36cdc64bf61b9b24578b6f7bab1ecdd2452cf008f34faa33776680c26d66f8"}, - {file = "sympy-1.13.1.tar.gz", hash = "sha256:9cebf7e04ff162015ce31c9c6c9144daa34a93bd082f54fd8f12deca4f47515f"}, + {file = "sympy-1.13.2-py3-none-any.whl", hash = "sha256:c51d75517712f1aed280d4ce58506a4a88d635d6b5dd48b39102a7ae1f3fcfe9"}, + {file = "sympy-1.13.2.tar.gz", hash = "sha256:401449d84d07be9d0c7a46a64bd54fe097667d5e7181bfe67ec777be9e01cb13"}, ] [package.dependencies] @@ -5816,19 +5957,6 @@ mpmath = ">=1.1.0,<1.4" [package.extras] dev = ["hypothesis (>=6.70.0)", "pytest (>=7.1.0)"] -[[package]] -name = "tbb" -version = "2021.13.0" -description = "Intel® oneAPI Threading Building Blocks (oneTBB)" -optional = true -python-versions = "*" -files = [ - {file = "tbb-2021.13.0-py2.py3-none-manylinux1_i686.whl", hash = "sha256:a2567725329639519d46d92a2634cf61e76601dac2f777a05686fea546c4fe4f"}, - {file = "tbb-2021.13.0-py2.py3-none-manylinux1_x86_64.whl", hash = "sha256:aaf667e92849adb012b8874d6393282afc318aca4407fc62f912ee30a22da46a"}, - {file = "tbb-2021.13.0-py3-none-win32.whl", hash = "sha256:6669d26703e9943f6164c6407bd4a237a45007e79b8d3832fe6999576eaaa9ef"}, - {file = "tbb-2021.13.0-py3-none-win_amd64.whl", hash = "sha256:3528a53e4bbe64b07a6112b4c5a00ff3c61924ee46c9c68e004a1ac7ad1f09c3"}, -] - [[package]] name = "tenacity" version = "8.5.0" @@ -6024,6 +6152,17 @@ dev = ["tokenizers[testing]"] docs = ["setuptools-rust", "sphinx", "sphinx-rtd-theme"] testing = ["black (==22.3)", "datasets", "numpy", "pytest", "requests", "ruff"] +[[package]] +name = "toml" +version = "0.10.2" +description = "Python Library for Tom's Obvious, Minimal Language" +optional = true +python-versions = ">=2.6, !=3.0.*, !=3.1.*, !=3.2.*" +files = [ + {file = "toml-0.10.2-py2.py3-none-any.whl", hash = "sha256:806143ae5bfb6a3c6e736a764057db0e6a0e05e338b5630894a5f779cabb4f9b"}, + {file = "toml-0.10.2.tar.gz", hash = "sha256:b3bda1d108d5dd99f4a20d24d9c348e91c4db7ab1b749200bded2f839ccbe68f"}, +] + [[package]] name = "tomli" version = "2.0.1" @@ -6037,55 +6176,54 @@ files = [ [[package]] name = "tomlkit" -version = "0.13.0" +version = "0.13.2" description = "Style preserving TOML library" optional = true python-versions = ">=3.8" files = [ - {file = "tomlkit-0.13.0-py3-none-any.whl", hash = "sha256:7075d3042d03b80f603482d69bf0c8f345c2b30e41699fd8883227f89972b264"}, - {file = "tomlkit-0.13.0.tar.gz", hash = "sha256:08ad192699734149f5b97b45f1f18dad7eb1b6d16bc72ad0c2335772650d7b72"}, + {file = "tomlkit-0.13.2-py3-none-any.whl", hash = "sha256:7a974427f6e119197f670fbbbeae7bef749a6c14e793db934baefc1b5f03efde"}, + {file = "tomlkit-0.13.2.tar.gz", hash = "sha256:fff5fe59a87295b278abd31bec92c15d9bc4a06885ab12bcea52c71119392e79"}, ] [[package]] name = "torch" -version = "2.3.1" +version = "2.4.0" description = "Tensors and Dynamic neural networks in Python with strong GPU acceleration" optional = true python-versions = ">=3.8.0" files = [ - {file = "torch-2.3.1-cp310-cp310-manylinux1_x86_64.whl", hash = "sha256:605a25b23944be5ab7c3467e843580e1d888b8066e5aaf17ff7bf9cc30001cc3"}, - {file = "torch-2.3.1-cp310-cp310-manylinux2014_aarch64.whl", hash = "sha256:f2357eb0965583a0954d6f9ad005bba0091f956aef879822274b1bcdb11bd308"}, - {file = "torch-2.3.1-cp310-cp310-win_amd64.whl", hash = "sha256:32b05fe0d1ada7f69c9f86c14ff69b0ef1957a5a54199bacba63d22d8fab720b"}, - {file = "torch-2.3.1-cp310-none-macosx_11_0_arm64.whl", hash = "sha256:7c09a94362778428484bcf995f6004b04952106aee0ef45ff0b4bab484f5498d"}, - {file = "torch-2.3.1-cp311-cp311-manylinux1_x86_64.whl", hash = "sha256:b2ec81b61bb094ea4a9dee1cd3f7b76a44555375719ad29f05c0ca8ef596ad39"}, - {file = "torch-2.3.1-cp311-cp311-manylinux2014_aarch64.whl", hash = "sha256:490cc3d917d1fe0bd027057dfe9941dc1d6d8e3cae76140f5dd9a7e5bc7130ab"}, - {file = "torch-2.3.1-cp311-cp311-win_amd64.whl", hash = "sha256:5802530783bd465fe66c2df99123c9a54be06da118fbd785a25ab0a88123758a"}, - {file = "torch-2.3.1-cp311-none-macosx_11_0_arm64.whl", hash = "sha256:a7dd4ed388ad1f3d502bf09453d5fe596c7b121de7e0cfaca1e2017782e9bbac"}, - {file = "torch-2.3.1-cp312-cp312-manylinux1_x86_64.whl", hash = "sha256:a486c0b1976a118805fc7c9641d02df7afbb0c21e6b555d3bb985c9f9601b61a"}, - {file = "torch-2.3.1-cp312-cp312-manylinux2014_aarch64.whl", hash = "sha256:224259821fe3e4c6f7edf1528e4fe4ac779c77addaa74215eb0b63a5c474d66c"}, - {file = "torch-2.3.1-cp312-cp312-win_amd64.whl", hash = "sha256:e5fdccbf6f1334b2203a61a0e03821d5845f1421defe311dabeae2fc8fbeac2d"}, - {file = "torch-2.3.1-cp312-none-macosx_11_0_arm64.whl", hash = "sha256:3c333dc2ebc189561514eda06e81df22bf8fb64e2384746b2cb9f04f96d1d4c8"}, - {file = "torch-2.3.1-cp38-cp38-manylinux1_x86_64.whl", hash = "sha256:07e9ba746832b8d069cacb45f312cadd8ad02b81ea527ec9766c0e7404bb3feb"}, - {file = "torch-2.3.1-cp38-cp38-manylinux2014_aarch64.whl", hash = "sha256:462d1c07dbf6bb5d9d2f3316fee73a24f3d12cd8dacf681ad46ef6418f7f6626"}, - {file = "torch-2.3.1-cp38-cp38-win_amd64.whl", hash = "sha256:ff60bf7ce3de1d43ad3f6969983f321a31f0a45df3690921720bcad6a8596cc4"}, - {file = "torch-2.3.1-cp38-none-macosx_11_0_arm64.whl", hash = "sha256:bee0bd33dc58aa8fc8a7527876e9b9a0e812ad08122054a5bff2ce5abf005b10"}, - {file = "torch-2.3.1-cp39-cp39-manylinux1_x86_64.whl", hash = "sha256:aaa872abde9a3d4f91580f6396d54888620f4a0b92e3976a6034759df4b961ad"}, - {file = "torch-2.3.1-cp39-cp39-manylinux2014_aarch64.whl", hash = "sha256:3d7a7f7ef21a7520510553dc3938b0c57c116a7daee20736a9e25cbc0e832bdc"}, - {file = "torch-2.3.1-cp39-cp39-win_amd64.whl", hash = "sha256:4777f6cefa0c2b5fa87223c213e7b6f417cf254a45e5829be4ccd1b2a4ee1011"}, - {file = "torch-2.3.1-cp39-none-macosx_11_0_arm64.whl", hash = "sha256:2bb5af780c55be68fe100feb0528d2edebace1d55cb2e351de735809ba7391eb"}, + {file = "torch-2.4.0-cp310-cp310-manylinux1_x86_64.whl", hash = "sha256:4ed94583e244af51d6a8d28701ca5a9e02d1219e782f5a01dd401f90af17d8ac"}, + {file = "torch-2.4.0-cp310-cp310-manylinux2014_aarch64.whl", hash = "sha256:c4ca297b7bd58b506bfd6e78ffd14eb97c0e7797dcd7965df62f50bb575d8954"}, + {file = "torch-2.4.0-cp310-cp310-win_amd64.whl", hash = "sha256:2497cbc7b3c951d69b276ca51fe01c2865db67040ac67f5fc20b03e41d16ea4a"}, + {file = "torch-2.4.0-cp310-none-macosx_11_0_arm64.whl", hash = "sha256:685418ab93730efbee71528821ff54005596970dd497bf03c89204fb7e3f71de"}, + {file = "torch-2.4.0-cp311-cp311-manylinux1_x86_64.whl", hash = "sha256:e743adadd8c8152bb8373543964551a7cb7cc20ba898dc8f9c0cdbe47c283de0"}, + {file = "torch-2.4.0-cp311-cp311-manylinux2014_aarch64.whl", hash = "sha256:7334325c0292cbd5c2eac085f449bf57d3690932eac37027e193ba775703c9e6"}, + {file = "torch-2.4.0-cp311-cp311-win_amd64.whl", hash = "sha256:97730014da4c57ffacb3c09298c6ce05400606e890bd7a05008d13dd086e46b1"}, + {file = "torch-2.4.0-cp311-none-macosx_11_0_arm64.whl", hash = "sha256:f169b4ea6dc93b3a33319611fcc47dc1406e4dd539844dcbd2dec4c1b96e166d"}, + {file = "torch-2.4.0-cp312-cp312-manylinux1_x86_64.whl", hash = "sha256:997084a0f9784d2a89095a6dc67c7925e21bf25dea0b3d069b41195016ccfcbb"}, + {file = "torch-2.4.0-cp312-cp312-manylinux2014_aarch64.whl", hash = "sha256:bc3988e8b36d1e8b998d143255d9408d8c75da4ab6dd0dcfd23b623dfb0f0f57"}, + {file = "torch-2.4.0-cp312-cp312-win_amd64.whl", hash = "sha256:3374128bbf7e62cdaed6c237bfd39809fbcfaa576bee91e904706840c3f2195c"}, + {file = "torch-2.4.0-cp312-none-macosx_11_0_arm64.whl", hash = "sha256:91aaf00bfe1ffa44dc5b52809d9a95129fca10212eca3ac26420eb11727c6288"}, + {file = "torch-2.4.0-cp38-cp38-manylinux1_x86_64.whl", hash = "sha256:cc30457ea5489c62747d3306438af00c606b509d78822a88f804202ba63111ed"}, + {file = "torch-2.4.0-cp38-cp38-manylinux2014_aarch64.whl", hash = "sha256:a046491aaf96d1215e65e1fa85911ef2ded6d49ea34c8df4d0638879f2402eef"}, + {file = "torch-2.4.0-cp38-cp38-win_amd64.whl", hash = "sha256:688eec9240f3ce775f22e1e1a5ab9894f3d5fe60f3f586deb7dbd23a46a83916"}, + {file = "torch-2.4.0-cp38-none-macosx_11_0_arm64.whl", hash = "sha256:3af4de2a618fb065e78404c4ba27a818a7b7957eaeff28c6c66ce7fb504b68b8"}, + {file = "torch-2.4.0-cp39-cp39-manylinux1_x86_64.whl", hash = "sha256:618808d3f610d5f180e47a697d4ec90b810953bb1e020f424b2ac7fb0884b545"}, + {file = "torch-2.4.0-cp39-cp39-manylinux2014_aarch64.whl", hash = "sha256:ed765d232d23566052ba83632ec73a4fccde00b4c94ad45d63b471b09d63b7a7"}, + {file = "torch-2.4.0-cp39-cp39-win_amd64.whl", hash = "sha256:a2feb98ac470109472fb10dfef38622a7ee08482a16c357863ebc7bc7db7c8f7"}, + {file = "torch-2.4.0-cp39-none-macosx_11_0_arm64.whl", hash = "sha256:8940fc8b97a4c61fdb5d46a368f21f4a3a562a17879e932eb51a5ec62310cb31"}, ] [package.dependencies] filelock = "*" fsspec = "*" jinja2 = "*" -mkl = {version = ">=2021.1.1,<=2021.4.0", markers = "platform_system == \"Windows\""} networkx = "*" nvidia-cublas-cu12 = {version = "12.1.3.1", markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\""} nvidia-cuda-cupti-cu12 = {version = "12.1.105", markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\""} nvidia-cuda-nvrtc-cu12 = {version = "12.1.105", markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\""} nvidia-cuda-runtime-cu12 = {version = "12.1.105", markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\""} -nvidia-cudnn-cu12 = {version = "8.9.2.26", markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\""} +nvidia-cudnn-cu12 = {version = "9.1.0.70", markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\""} nvidia-cufft-cu12 = {version = "11.0.2.54", markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\""} nvidia-curand-cu12 = {version = "10.3.2.106", markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\""} nvidia-cusolver-cu12 = {version = "11.4.5.107", markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\""} @@ -6093,22 +6231,22 @@ nvidia-cusparse-cu12 = {version = "12.1.0.106", markers = "platform_system == \" nvidia-nccl-cu12 = {version = "2.20.5", markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\""} nvidia-nvtx-cu12 = {version = "12.1.105", markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\""} sympy = "*" -triton = {version = "2.3.1", markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\" and python_version < \"3.12\""} +triton = {version = "3.0.0", markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\" and python_version < \"3.13\""} typing-extensions = ">=4.8.0" [package.extras] opt-einsum = ["opt-einsum (>=3.3)"] -optree = ["optree (>=0.9.1)"] +optree = ["optree (>=0.11.0)"] [[package]] name = "tqdm" -version = "4.66.4" +version = "4.66.5" description = "Fast, Extensible Progress Meter" optional = false python-versions = ">=3.7" files = [ - {file = "tqdm-4.66.4-py3-none-any.whl", hash = "sha256:b75ca56b413b030bc3f00af51fd2c1a1a5eac6a0c1cca83cbb37a5c52abce644"}, - {file = "tqdm-4.66.4.tar.gz", hash = "sha256:e4d936c9de8727928f3be6079590e97d9abfe8d39a590be678eb5919ffc186bb"}, + {file = "tqdm-4.66.5-py3-none-any.whl", hash = "sha256:90279a3770753eafc9194a0364852159802111925aa30eb3f9d85b0e805ac7cd"}, + {file = "tqdm-4.66.5.tar.gz", hash = "sha256:e1020aef2e5096702d8a025ac7d16b1577279c9d63f8375b63083e9a5f0fcbad"}, ] [package.dependencies] @@ -6122,19 +6260,19 @@ telegram = ["requests"] [[package]] name = "trafilatura" -version = "1.11.0" +version = "1.12.0" description = "Python package and command-line tool designed to gather text on the Web, includes all necessary discovery and text processing components to perform web crawling, downloads, scraping, and extraction of main texts, metadata and comments." optional = true python-versions = ">=3.6" files = [ - {file = "trafilatura-1.11.0-py3-none-any.whl", hash = "sha256:20f016be873a2cf3e02b9798f9537d09808559fcc667d42e1c019560ca45dce7"}, - {file = "trafilatura-1.11.0.tar.gz", hash = "sha256:9334ca101c40b2904af5afcee790f0374fabca3ac388811720be65cc768787a2"}, + {file = "trafilatura-1.12.0-py3-none-any.whl", hash = "sha256:544c442184db4e0a85c4dcede8b20f0d6d9202477a12faeddeb8c7c5fc5e13ca"}, + {file = "trafilatura-1.12.0.tar.gz", hash = "sha256:17d2074ecfe2c562bf0863de7e839fad14cc66d5f98090741eaa918eabfbf9d5"}, ] [package.dependencies] certifi = "*" charset-normalizer = {version = ">=3.2.0", markers = "python_version >= \"3.7\""} -courlan = ">=1.1.0" +courlan = ">=1.2.0" htmldate = ">=1.8.1" justext = ">=3.0.1" lxml = {version = ">=5.2.2", markers = "platform_system != \"Darwin\" or python_version > \"3.8\""} @@ -6146,19 +6284,19 @@ gui = ["Gooey (>=1.0.1)"] [[package]] name = "transformers" -version = "4.41.2" +version = "4.44.0" description = "State-of-the-art Machine Learning for JAX, PyTorch and TensorFlow" optional = true python-versions = ">=3.8.0" files = [ - {file = "transformers-4.41.2-py3-none-any.whl", hash = "sha256:05555d20e43f808de1ef211ab64803cdb513170cef70d29a888b589caebefc67"}, - {file = "transformers-4.41.2.tar.gz", hash = "sha256:80a4db216533d573e9cc7388646c31ed9480918feb7c55eb211249cb23567f87"}, + {file = "transformers-4.44.0-py3-none-any.whl", hash = "sha256:ea0ff72def71e9f4812d9414d4803b22681b1617aa6f511bd51cfff2b44a6fca"}, + {file = "transformers-4.44.0.tar.gz", hash = "sha256:75699495e30b7635ca444d8d372e138c687ab51a875b387e33f1fb759c37f196"}, ] [package.dependencies] accelerate = {version = ">=0.21.0", optional = true, markers = "extra == \"torch\""} filelock = "*" -huggingface-hub = ">=0.23.0,<1.0" +huggingface-hub = ">=0.23.2,<1.0" numpy = ">=1.17" packaging = ">=20.0" pyyaml = ">=5.1" @@ -6172,14 +6310,15 @@ tqdm = ">=4.27" [package.extras] accelerate = ["accelerate (>=0.21.0)"] agents = ["Pillow (>=10.0.1,<=15.0)", "accelerate (>=0.21.0)", "datasets (!=2.5.0)", "diffusers", "opencv-python", "sentencepiece (>=0.1.91,!=0.1.92)", "torch"] -all = ["Pillow (>=10.0.1,<=15.0)", "accelerate (>=0.21.0)", "av (==9.2.0)", "codecarbon (==1.2.0)", "decord (==0.6.0)", "flax (>=0.4.1,<=0.7.0)", "jax (>=0.4.1,<=0.4.13)", "jaxlib (>=0.4.1,<=0.4.13)", "kenlm", "keras-nlp (>=0.3.1)", "librosa", "onnxconverter-common", "optax (>=0.0.8,<=0.1.4)", "optuna", "phonemizer", "protobuf", "pyctcdecode (>=0.4.0)", "ray[tune] (>=2.7.0)", "scipy (<1.13.0)", "sentencepiece (>=0.1.91,!=0.1.92)", "sigopt", "tensorflow (>2.9,<2.16)", "tensorflow-text (<2.16)", "tf2onnx", "timm", "tokenizers (>=0.19,<0.20)", "torch", "torchaudio", "torchvision"] +all = ["Pillow (>=10.0.1,<=15.0)", "accelerate (>=0.21.0)", "av (==9.2.0)", "codecarbon (==1.2.0)", "decord (==0.6.0)", "flax (>=0.4.1,<=0.7.0)", "jax (>=0.4.1,<=0.4.13)", "jaxlib (>=0.4.1,<=0.4.13)", "kenlm", "keras-nlp (>=0.3.1,<0.14.0)", "librosa", "onnxconverter-common", "optax (>=0.0.8,<=0.1.4)", "optuna", "phonemizer", "protobuf", "pyctcdecode (>=0.4.0)", "ray[tune] (>=2.7.0)", "scipy (<1.13.0)", "sentencepiece (>=0.1.91,!=0.1.92)", "sigopt", "tensorflow (>2.9,<2.16)", "tensorflow-text (<2.16)", "tf2onnx", "timm (<=0.9.16)", "tokenizers (>=0.19,<0.20)", "torch", "torchaudio", "torchvision"] audio = ["kenlm", "librosa", "phonemizer", "pyctcdecode (>=0.4.0)"] +benchmark = ["optimum-benchmark (>=0.2.0)"] codecarbon = ["codecarbon (==1.2.0)"] deepspeed = ["accelerate (>=0.21.0)", "deepspeed (>=0.9.3)"] -deepspeed-testing = ["GitPython (<3.1.19)", "accelerate (>=0.21.0)", "beautifulsoup4", "cookiecutter (==1.7.3)", "datasets (!=2.5.0)", "deepspeed (>=0.9.3)", "dill (<0.3.5)", "evaluate (>=0.2.0)", "faiss-cpu", "nltk", "optuna", "parameterized", "protobuf", "psutil", "pydantic", "pytest (>=7.2.0,<8.0.0)", "pytest-rich", "pytest-timeout", "pytest-xdist", "rjieba", "rouge-score (!=0.0.7,!=0.0.8,!=0.1,!=0.1.1)", "ruff (==0.1.5)", "sacrebleu (>=1.4.12,<2.0.0)", "sacremoses", "sentencepiece (>=0.1.91,!=0.1.92)", "tensorboard", "timeout-decorator"] -dev = ["GitPython (<3.1.19)", "Pillow (>=10.0.1,<=15.0)", "accelerate (>=0.21.0)", "av (==9.2.0)", "beautifulsoup4", "codecarbon (==1.2.0)", "cookiecutter (==1.7.3)", "datasets (!=2.5.0)", "decord (==0.6.0)", "dill (<0.3.5)", "evaluate (>=0.2.0)", "faiss-cpu", "flax (>=0.4.1,<=0.7.0)", "fugashi (>=1.0)", "ipadic (>=1.0.0,<2.0)", "isort (>=5.5.4)", "jax (>=0.4.1,<=0.4.13)", "jaxlib (>=0.4.1,<=0.4.13)", "kenlm", "keras-nlp (>=0.3.1)", "librosa", "nltk", "onnxconverter-common", "optax (>=0.0.8,<=0.1.4)", "optuna", "parameterized", "phonemizer", "protobuf", "psutil", "pyctcdecode (>=0.4.0)", "pydantic", "pytest (>=7.2.0,<8.0.0)", "pytest-rich", "pytest-timeout", "pytest-xdist", "ray[tune] (>=2.7.0)", "rhoknp (>=1.1.0,<1.3.1)", "rjieba", "rouge-score (!=0.0.7,!=0.0.8,!=0.1,!=0.1.1)", "ruff (==0.1.5)", "sacrebleu (>=1.4.12,<2.0.0)", "sacremoses", "scikit-learn", "scipy (<1.13.0)", "sentencepiece (>=0.1.91,!=0.1.92)", "sigopt", "sudachidict-core (>=20220729)", "sudachipy (>=0.6.6)", "tensorboard", "tensorflow (>2.9,<2.16)", "tensorflow-text (<2.16)", "tf2onnx", "timeout-decorator", "timm", "tokenizers (>=0.19,<0.20)", "torch", "torchaudio", "torchvision", "unidic (>=1.0.2)", "unidic-lite (>=1.0.7)", "urllib3 (<2.0.0)"] -dev-tensorflow = ["GitPython (<3.1.19)", "Pillow (>=10.0.1,<=15.0)", "beautifulsoup4", "cookiecutter (==1.7.3)", "datasets (!=2.5.0)", "dill (<0.3.5)", "evaluate (>=0.2.0)", "faiss-cpu", "isort (>=5.5.4)", "kenlm", "keras-nlp (>=0.3.1)", "librosa", "nltk", "onnxconverter-common", "onnxruntime (>=1.4.0)", "onnxruntime-tools (>=1.4.2)", "parameterized", "phonemizer", "protobuf", "psutil", "pyctcdecode (>=0.4.0)", "pydantic", "pytest (>=7.2.0,<8.0.0)", "pytest-rich", "pytest-timeout", "pytest-xdist", "rjieba", "rouge-score (!=0.0.7,!=0.0.8,!=0.1,!=0.1.1)", "ruff (==0.1.5)", "sacrebleu (>=1.4.12,<2.0.0)", "sacremoses", "scikit-learn", "sentencepiece (>=0.1.91,!=0.1.92)", "tensorboard", "tensorflow (>2.9,<2.16)", "tensorflow-text (<2.16)", "tf2onnx", "timeout-decorator", "tokenizers (>=0.19,<0.20)", "urllib3 (<2.0.0)"] -dev-torch = ["GitPython (<3.1.19)", "Pillow (>=10.0.1,<=15.0)", "accelerate (>=0.21.0)", "beautifulsoup4", "codecarbon (==1.2.0)", "cookiecutter (==1.7.3)", "datasets (!=2.5.0)", "dill (<0.3.5)", "evaluate (>=0.2.0)", "faiss-cpu", "fugashi (>=1.0)", "ipadic (>=1.0.0,<2.0)", "isort (>=5.5.4)", "kenlm", "librosa", "nltk", "onnxruntime (>=1.4.0)", "onnxruntime-tools (>=1.4.2)", "optuna", "parameterized", "phonemizer", "protobuf", "psutil", "pyctcdecode (>=0.4.0)", "pydantic", "pytest (>=7.2.0,<8.0.0)", "pytest-rich", "pytest-timeout", "pytest-xdist", "ray[tune] (>=2.7.0)", "rhoknp (>=1.1.0,<1.3.1)", "rjieba", "rouge-score (!=0.0.7,!=0.0.8,!=0.1,!=0.1.1)", "ruff (==0.1.5)", "sacrebleu (>=1.4.12,<2.0.0)", "sacremoses", "scikit-learn", "sentencepiece (>=0.1.91,!=0.1.92)", "sigopt", "sudachidict-core (>=20220729)", "sudachipy (>=0.6.6)", "tensorboard", "timeout-decorator", "timm", "tokenizers (>=0.19,<0.20)", "torch", "torchaudio", "torchvision", "unidic (>=1.0.2)", "unidic-lite (>=1.0.7)", "urllib3 (<2.0.0)"] +deepspeed-testing = ["GitPython (<3.1.19)", "accelerate (>=0.21.0)", "beautifulsoup4", "cookiecutter (==1.7.3)", "datasets (!=2.5.0)", "deepspeed (>=0.9.3)", "dill (<0.3.5)", "evaluate (>=0.2.0)", "faiss-cpu", "nltk", "optuna", "parameterized", "protobuf", "psutil", "pydantic", "pytest (>=7.2.0,<8.0.0)", "pytest-rich", "pytest-timeout", "pytest-xdist", "rjieba", "rouge-score (!=0.0.7,!=0.0.8,!=0.1,!=0.1.1)", "ruff (==0.5.1)", "sacrebleu (>=1.4.12,<2.0.0)", "sacremoses", "sentencepiece (>=0.1.91,!=0.1.92)", "tensorboard", "timeout-decorator"] +dev = ["GitPython (<3.1.19)", "Pillow (>=10.0.1,<=15.0)", "accelerate (>=0.21.0)", "av (==9.2.0)", "beautifulsoup4", "codecarbon (==1.2.0)", "cookiecutter (==1.7.3)", "datasets (!=2.5.0)", "decord (==0.6.0)", "dill (<0.3.5)", "evaluate (>=0.2.0)", "faiss-cpu", "flax (>=0.4.1,<=0.7.0)", "fugashi (>=1.0)", "ipadic (>=1.0.0,<2.0)", "isort (>=5.5.4)", "jax (>=0.4.1,<=0.4.13)", "jaxlib (>=0.4.1,<=0.4.13)", "kenlm", "keras-nlp (>=0.3.1,<0.14.0)", "librosa", "nltk", "onnxconverter-common", "optax (>=0.0.8,<=0.1.4)", "optuna", "parameterized", "phonemizer", "protobuf", "psutil", "pyctcdecode (>=0.4.0)", "pydantic", "pytest (>=7.2.0,<8.0.0)", "pytest-rich", "pytest-timeout", "pytest-xdist", "ray[tune] (>=2.7.0)", "rhoknp (>=1.1.0,<1.3.1)", "rjieba", "rouge-score (!=0.0.7,!=0.0.8,!=0.1,!=0.1.1)", "ruff (==0.5.1)", "sacrebleu (>=1.4.12,<2.0.0)", "sacremoses", "scikit-learn", "scipy (<1.13.0)", "sentencepiece (>=0.1.91,!=0.1.92)", "sigopt", "sudachidict-core (>=20220729)", "sudachipy (>=0.6.6)", "tensorboard", "tensorflow (>2.9,<2.16)", "tensorflow-text (<2.16)", "tf2onnx", "timeout-decorator", "timm (<=0.9.16)", "tokenizers (>=0.19,<0.20)", "torch", "torchaudio", "torchvision", "unidic (>=1.0.2)", "unidic-lite (>=1.0.7)", "urllib3 (<2.0.0)"] +dev-tensorflow = ["GitPython (<3.1.19)", "Pillow (>=10.0.1,<=15.0)", "beautifulsoup4", "cookiecutter (==1.7.3)", "datasets (!=2.5.0)", "dill (<0.3.5)", "evaluate (>=0.2.0)", "faiss-cpu", "isort (>=5.5.4)", "kenlm", "keras-nlp (>=0.3.1,<0.14.0)", "librosa", "nltk", "onnxconverter-common", "onnxruntime (>=1.4.0)", "onnxruntime-tools (>=1.4.2)", "parameterized", "phonemizer", "protobuf", "psutil", "pyctcdecode (>=0.4.0)", "pydantic", "pytest (>=7.2.0,<8.0.0)", "pytest-rich", "pytest-timeout", "pytest-xdist", "rjieba", "rouge-score (!=0.0.7,!=0.0.8,!=0.1,!=0.1.1)", "ruff (==0.5.1)", "sacrebleu (>=1.4.12,<2.0.0)", "sacremoses", "scikit-learn", "sentencepiece (>=0.1.91,!=0.1.92)", "tensorboard", "tensorflow (>2.9,<2.16)", "tensorflow-text (<2.16)", "tf2onnx", "timeout-decorator", "tokenizers (>=0.19,<0.20)", "urllib3 (<2.0.0)"] +dev-torch = ["GitPython (<3.1.19)", "Pillow (>=10.0.1,<=15.0)", "accelerate (>=0.21.0)", "beautifulsoup4", "codecarbon (==1.2.0)", "cookiecutter (==1.7.3)", "datasets (!=2.5.0)", "dill (<0.3.5)", "evaluate (>=0.2.0)", "faiss-cpu", "fugashi (>=1.0)", "ipadic (>=1.0.0,<2.0)", "isort (>=5.5.4)", "kenlm", "librosa", "nltk", "onnxruntime (>=1.4.0)", "onnxruntime-tools (>=1.4.2)", "optuna", "parameterized", "phonemizer", "protobuf", "psutil", "pyctcdecode (>=0.4.0)", "pydantic", "pytest (>=7.2.0,<8.0.0)", "pytest-rich", "pytest-timeout", "pytest-xdist", "ray[tune] (>=2.7.0)", "rhoknp (>=1.1.0,<1.3.1)", "rjieba", "rouge-score (!=0.0.7,!=0.0.8,!=0.1,!=0.1.1)", "ruff (==0.5.1)", "sacrebleu (>=1.4.12,<2.0.0)", "sacremoses", "scikit-learn", "sentencepiece (>=0.1.91,!=0.1.92)", "sigopt", "sudachidict-core (>=20220729)", "sudachipy (>=0.6.6)", "tensorboard", "timeout-decorator", "timm (<=0.9.16)", "tokenizers (>=0.19,<0.20)", "torch", "torchaudio", "torchvision", "unidic (>=1.0.2)", "unidic-lite (>=1.0.7)", "urllib3 (<2.0.0)"] flax = ["flax (>=0.4.1,<=0.7.0)", "jax (>=0.4.1,<=0.4.13)", "jaxlib (>=0.4.1,<=0.4.13)", "optax (>=0.0.8,<=0.1.4)", "scipy (<1.13.0)"] flax-speech = ["kenlm", "librosa", "phonemizer", "pyctcdecode (>=0.4.0)"] ftfy = ["ftfy"] @@ -6190,41 +6329,46 @@ natten = ["natten (>=0.14.6,<0.15.0)"] onnx = ["onnxconverter-common", "onnxruntime (>=1.4.0)", "onnxruntime-tools (>=1.4.2)", "tf2onnx"] onnxruntime = ["onnxruntime (>=1.4.0)", "onnxruntime-tools (>=1.4.2)"] optuna = ["optuna"] -quality = ["GitPython (<3.1.19)", "datasets (!=2.5.0)", "isort (>=5.5.4)", "ruff (==0.1.5)", "urllib3 (<2.0.0)"] +quality = ["GitPython (<3.1.19)", "datasets (!=2.5.0)", "isort (>=5.5.4)", "ruff (==0.5.1)", "urllib3 (<2.0.0)"] ray = ["ray[tune] (>=2.7.0)"] retrieval = ["datasets (!=2.5.0)", "faiss-cpu"] +ruff = ["ruff (==0.5.1)"] sagemaker = ["sagemaker (>=2.31.0)"] sentencepiece = ["protobuf", "sentencepiece (>=0.1.91,!=0.1.92)"] serving = ["fastapi", "pydantic", "starlette", "uvicorn"] sigopt = ["sigopt"] sklearn = ["scikit-learn"] speech = ["kenlm", "librosa", "phonemizer", "pyctcdecode (>=0.4.0)", "torchaudio"] -testing = ["GitPython (<3.1.19)", "beautifulsoup4", "cookiecutter (==1.7.3)", "datasets (!=2.5.0)", "dill (<0.3.5)", "evaluate (>=0.2.0)", "faiss-cpu", "nltk", "parameterized", "psutil", "pydantic", "pytest (>=7.2.0,<8.0.0)", "pytest-rich", "pytest-timeout", "pytest-xdist", "rjieba", "rouge-score (!=0.0.7,!=0.0.8,!=0.1,!=0.1.1)", "ruff (==0.1.5)", "sacrebleu (>=1.4.12,<2.0.0)", "sacremoses", "sentencepiece (>=0.1.91,!=0.1.92)", "tensorboard", "timeout-decorator"] -tf = ["keras-nlp (>=0.3.1)", "onnxconverter-common", "tensorflow (>2.9,<2.16)", "tensorflow-text (<2.16)", "tf2onnx"] -tf-cpu = ["keras (>2.9,<2.16)", "keras-nlp (>=0.3.1)", "onnxconverter-common", "tensorflow-cpu (>2.9,<2.16)", "tensorflow-probability (<2.16)", "tensorflow-text (<2.16)", "tf2onnx"] +testing = ["GitPython (<3.1.19)", "beautifulsoup4", "cookiecutter (==1.7.3)", "datasets (!=2.5.0)", "dill (<0.3.5)", "evaluate (>=0.2.0)", "faiss-cpu", "nltk", "parameterized", "psutil", "pydantic", "pytest (>=7.2.0,<8.0.0)", "pytest-rich", "pytest-timeout", "pytest-xdist", "rjieba", "rouge-score (!=0.0.7,!=0.0.8,!=0.1,!=0.1.1)", "ruff (==0.5.1)", "sacrebleu (>=1.4.12,<2.0.0)", "sacremoses", "sentencepiece (>=0.1.91,!=0.1.92)", "tensorboard", "timeout-decorator"] +tf = ["keras-nlp (>=0.3.1,<0.14.0)", "onnxconverter-common", "tensorflow (>2.9,<2.16)", "tensorflow-text (<2.16)", "tf2onnx"] +tf-cpu = ["keras (>2.9,<2.16)", "keras-nlp (>=0.3.1,<0.14.0)", "onnxconverter-common", "tensorflow-cpu (>2.9,<2.16)", "tensorflow-probability (<0.24)", "tensorflow-text (<2.16)", "tf2onnx"] tf-speech = ["kenlm", "librosa", "phonemizer", "pyctcdecode (>=0.4.0)"] -timm = ["timm"] +timm = ["timm (<=0.9.16)"] tokenizers = ["tokenizers (>=0.19,<0.20)"] torch = ["accelerate (>=0.21.0)", "torch"] torch-speech = ["kenlm", "librosa", "phonemizer", "pyctcdecode (>=0.4.0)", "torchaudio"] torch-vision = ["Pillow (>=10.0.1,<=15.0)", "torchvision"] -torchhub = ["filelock", "huggingface-hub (>=0.23.0,<1.0)", "importlib-metadata", "numpy (>=1.17)", "packaging (>=20.0)", "protobuf", "regex (!=2019.12.17)", "requests", "sentencepiece (>=0.1.91,!=0.1.92)", "tokenizers (>=0.19,<0.20)", "torch", "tqdm (>=4.27)"] +torchhub = ["filelock", "huggingface-hub (>=0.23.2,<1.0)", "importlib-metadata", "numpy (>=1.17)", "packaging (>=20.0)", "protobuf", "regex (!=2019.12.17)", "requests", "sentencepiece (>=0.1.91,!=0.1.92)", "tokenizers (>=0.19,<0.20)", "torch", "tqdm (>=4.27)"] video = ["av (==9.2.0)", "decord (==0.6.0)"] vision = ["Pillow (>=10.0.1,<=15.0)"] [[package]] name = "triton" -version = "2.3.1" +version = "3.0.0" description = "A language and compiler for custom Deep Learning operations" optional = true python-versions = "*" files = [ - {file = "triton-2.3.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3c84595cbe5e546b1b290d2a58b1494df5a2ef066dd890655e5b8a8a92205c33"}, - {file = "triton-2.3.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c9d64ae33bcb3a7a18081e3a746e8cf87ca8623ca13d2c362413ce7a486f893e"}, - {file = "triton-2.3.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:eaf80e8761a9e3498aa92e7bf83a085b31959c61f5e8ac14eedd018df6fccd10"}, - {file = "triton-2.3.1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b13bf35a2b659af7159bf78e92798dc62d877aa991de723937329e2d382f1991"}, - {file = "triton-2.3.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:63381e35ded3304704ea867ffde3b7cfc42c16a55b3062d41e017ef510433d66"}, - {file = "triton-2.3.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1d968264523c7a07911c8fb51b4e0d1b920204dae71491b1fe7b01b62a31e124"}, + {file = "triton-3.0.0-1-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:e1efef76935b2febc365bfadf74bcb65a6f959a9872e5bddf44cc9e0adce1e1a"}, + {file = "triton-3.0.0-1-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:5ce8520437c602fb633f1324cc3871c47bee3b67acf9756c1a66309b60e3216c"}, + {file = "triton-3.0.0-1-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:34e509deb77f1c067d8640725ef00c5cbfcb2052a1a3cb6a6d343841f92624eb"}, + {file = "triton-3.0.0-1-cp38-cp38-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:bcbf3b1c48af6a28011a5c40a5b3b9b5330530c3827716b5fbf6d7adcc1e53e9"}, + {file = "triton-3.0.0-1-cp39-cp39-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:6e5727202f7078c56f91ff13ad0c1abab14a0e7f2c87e91b12b6f64f3e8ae609"}, + {file = "triton-3.0.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:39b052da883351fdf6be3d93cedae6db3b8e3988d3b09ed221bccecfa9612230"}, + {file = "triton-3.0.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cd34f19a8582af96e6291d4afce25dac08cb2a5d218c599163761e8e0827208e"}, + {file = "triton-3.0.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0d5e10de8c011adeb7c878c6ce0dd6073b14367749e34467f1cff2bde1b78253"}, + {file = "triton-3.0.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e8903767951bf86ec960b4fe4e21bc970055afc65e9d57e916d79ae3c93665e3"}, + {file = "triton-3.0.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:41004fb1ae9a53fcb3e970745feb87f0e3c94c6ce1ba86e95fa3b8537894bef7"}, ] [package.dependencies] @@ -6232,8 +6376,8 @@ filelock = "*" [package.extras] build = ["cmake (>=3.20)", "lit"] -tests = ["autopep8", "flake8", "isort", "numpy", "pytest", "scipy (>=1.7.1)", "torch"] -tutorials = ["matplotlib", "pandas", "tabulate", "torch"] +tests = ["autopep8", "flake8", "isort", "llnl-hatchet", "numpy", "pytest", "scipy (>=1.7.1)"] +tutorials = ["matplotlib", "pandas", "tabulate"] [[package]] name = "twine" @@ -6317,21 +6461,21 @@ files = [ [[package]] name = "typos" -version = "1.23.3" +version = "1.23.6" description = "Source Code Spelling Correction" optional = false python-versions = ">=3.7" files = [ - {file = "typos-1.23.3-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:64eb857fd5d9fbceddf84f14dcb102355e95f79e61639c96b76a81f6526fe589"}, - {file = "typos-1.23.3-py3-none-macosx_11_0_arm64.whl", hash = "sha256:3413933c235362f3a9dc99ea02acfbe096ac451f0828cf8e0617e9ea859add31"}, - {file = "typos-1.23.3-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5633d7baccc9ee5f3c0a2675c3eed6369983845994d6582f70bb2dd30b55bd64"}, - {file = "typos-1.23.3-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:07cb89e916ed3114ac5980e4df4cca936d4305f13944fa4821afdf319b24b1e2"}, - {file = "typos-1.23.3-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4f3887869fe4a16f203a749b97d32dac763d3b8bf2a71728d20726e8f1aee33e"}, - {file = "typos-1.23.3-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:eb2bf8c347e82687fa97f5208ded28377fcf4c0cdd685f5323b22cb756bd70d3"}, - {file = "typos-1.23.3-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:4286ca8b7cda18e83e7e8be6aa2b37306b75138a7b0cbf3384f983a5772c0cdb"}, - {file = "typos-1.23.3-py3-none-win32.whl", hash = "sha256:a0bd7a04f476126cb7a2e4d5c11c1083d1d68657e3349817cdb757ce110cf265"}, - {file = "typos-1.23.3-py3-none-win_amd64.whl", hash = "sha256:372139f46e57f18943e2b83d12dc9f4533a93618f0aac8d59684479628e5e576"}, - {file = "typos-1.23.3.tar.gz", hash = "sha256:2bcbfc32660170b2b4797a5613c4ce269fba2eef30d47d0a08ea0be57ddd3b99"}, + {file = "typos-1.23.6-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:9209947ab1e815bcb8cb781fc73fd6ad88eacdea7b1c15e73ca49217fa7c44e7"}, + {file = "typos-1.23.6-py3-none-macosx_11_0_arm64.whl", hash = "sha256:b049bfce407d7d61c5be4955d2fae6db644dc5d56ca236224cae0c3978024a75"}, + {file = "typos-1.23.6-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d0b17e19c5e6b4f46acf0f60d053e0c188d31c09748f487f171465623f5f3380"}, + {file = "typos-1.23.6-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b609d525078b222cf8e25bd8e5cd60a56a542129d7bccb4f6cc992f686410331"}, + {file = "typos-1.23.6-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4fbf955dc4a09a95d3358f8edb10c1418e45bf07a6c9c414432320009a74dd5f"}, + {file = "typos-1.23.6-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:c686b06039b7fd95eed661cd2093fa7f048c76cb40b6bad55827a68aa707240a"}, + {file = "typos-1.23.6-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:0fda8c8502bce101277eb0a4b4d04847fc7018e2f9cff6d2fc86b3fdec239755"}, + {file = "typos-1.23.6-py3-none-win32.whl", hash = "sha256:8edaba24813be7ef678868e8ed49c48eb70cf128afc41ae86cc2127fb32e326b"}, + {file = "typos-1.23.6-py3-none-win_amd64.whl", hash = "sha256:d47b7d0e08975adf67873a8e43dc09fc1b6ff655a4241497348808ee54442668"}, + {file = "typos-1.23.6.tar.gz", hash = "sha256:2691988d2a15cde2cdd4f2fa5fd32880765b2a68ed6ccd48d6dc693c44447bcf"}, ] [[package]] @@ -6389,6 +6533,17 @@ brotli = ["brotli (==1.0.9)", "brotli (>=1.0.9)", "brotlicffi (>=0.8.0)", "brotl secure = ["certifi", "cryptography (>=1.3.4)", "idna (>=2.0.0)", "ipaddress", "pyOpenSSL (>=0.14)", "urllib3-secure-extra"] socks = ["PySocks (>=1.5.6,!=1.5.7,<2.0)"] +[[package]] +name = "uuid6" +version = "2024.1.12" +description = "New time-based UUID formats which are suited for use as a database key" +optional = true +python-versions = ">=3.8" +files = [ + {file = "uuid6-2024.1.12-py3-none-any.whl", hash = "sha256:8150093c8d05a331bc0535bc5ef6cf57ac6eceb2404fd319bc10caee2e02c065"}, + {file = "uuid6-2024.1.12.tar.gz", hash = "sha256:ed0afb3a973057575f9883201baefe402787ca5e11e1d24e377190f0c43f1993"}, +] + [[package]] name = "virtualenv" version = "20.26.3" @@ -6429,43 +6584,46 @@ tenacity = ">=8.0.1" [[package]] name = "watchdog" -version = "4.0.1" +version = "4.0.2" description = "Filesystem events monitoring" optional = false python-versions = ">=3.8" files = [ - {file = "watchdog-4.0.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:da2dfdaa8006eb6a71051795856bedd97e5b03e57da96f98e375682c48850645"}, - {file = "watchdog-4.0.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:e93f451f2dfa433d97765ca2634628b789b49ba8b504fdde5837cdcf25fdb53b"}, - {file = "watchdog-4.0.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:ef0107bbb6a55f5be727cfc2ef945d5676b97bffb8425650dadbb184be9f9a2b"}, - {file = "watchdog-4.0.1-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:17e32f147d8bf9657e0922c0940bcde863b894cd871dbb694beb6704cfbd2fb5"}, - {file = "watchdog-4.0.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:03e70d2df2258fb6cb0e95bbdbe06c16e608af94a3ffbd2b90c3f1e83eb10767"}, - {file = "watchdog-4.0.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:123587af84260c991dc5f62a6e7ef3d1c57dfddc99faacee508c71d287248459"}, - {file = "watchdog-4.0.1-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:093b23e6906a8b97051191a4a0c73a77ecc958121d42346274c6af6520dec175"}, - {file = "watchdog-4.0.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:611be3904f9843f0529c35a3ff3fd617449463cb4b73b1633950b3d97fa4bfb7"}, - {file = "watchdog-4.0.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:62c613ad689ddcb11707f030e722fa929f322ef7e4f18f5335d2b73c61a85c28"}, - {file = "watchdog-4.0.1-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:d4925e4bf7b9bddd1c3de13c9b8a2cdb89a468f640e66fbfabaf735bd85b3e35"}, - {file = "watchdog-4.0.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:cad0bbd66cd59fc474b4a4376bc5ac3fc698723510cbb64091c2a793b18654db"}, - {file = "watchdog-4.0.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:a3c2c317a8fb53e5b3d25790553796105501a235343f5d2bf23bb8649c2c8709"}, - {file = "watchdog-4.0.1-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:c9904904b6564d4ee8a1ed820db76185a3c96e05560c776c79a6ce5ab71888ba"}, - {file = "watchdog-4.0.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:667f3c579e813fcbad1b784db7a1aaa96524bed53437e119f6a2f5de4db04235"}, - {file = "watchdog-4.0.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:d10a681c9a1d5a77e75c48a3b8e1a9f2ae2928eda463e8d33660437705659682"}, - {file = "watchdog-4.0.1-pp310-pypy310_pp73-macosx_10_9_x86_64.whl", hash = "sha256:0144c0ea9997b92615af1d94afc0c217e07ce2c14912c7b1a5731776329fcfc7"}, - {file = "watchdog-4.0.1-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:998d2be6976a0ee3a81fb8e2777900c28641fb5bfbd0c84717d89bca0addcdc5"}, - {file = "watchdog-4.0.1-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:e7921319fe4430b11278d924ef66d4daa469fafb1da679a2e48c935fa27af193"}, - {file = "watchdog-4.0.1-pp38-pypy38_pp73-macosx_11_0_arm64.whl", hash = "sha256:f0de0f284248ab40188f23380b03b59126d1479cd59940f2a34f8852db710625"}, - {file = "watchdog-4.0.1-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:bca36be5707e81b9e6ce3208d92d95540d4ca244c006b61511753583c81c70dd"}, - {file = "watchdog-4.0.1-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:ab998f567ebdf6b1da7dc1e5accfaa7c6992244629c0fdaef062f43249bd8dee"}, - {file = "watchdog-4.0.1-py3-none-manylinux2014_aarch64.whl", hash = "sha256:dddba7ca1c807045323b6af4ff80f5ddc4d654c8bce8317dde1bd96b128ed253"}, - {file = "watchdog-4.0.1-py3-none-manylinux2014_armv7l.whl", hash = "sha256:4513ec234c68b14d4161440e07f995f231be21a09329051e67a2118a7a612d2d"}, - {file = "watchdog-4.0.1-py3-none-manylinux2014_i686.whl", hash = "sha256:4107ac5ab936a63952dea2a46a734a23230aa2f6f9db1291bf171dac3ebd53c6"}, - {file = "watchdog-4.0.1-py3-none-manylinux2014_ppc64.whl", hash = "sha256:6e8c70d2cd745daec2a08734d9f63092b793ad97612470a0ee4cbb8f5f705c57"}, - {file = "watchdog-4.0.1-py3-none-manylinux2014_ppc64le.whl", hash = "sha256:f27279d060e2ab24c0aa98363ff906d2386aa6c4dc2f1a374655d4e02a6c5e5e"}, - {file = "watchdog-4.0.1-py3-none-manylinux2014_s390x.whl", hash = "sha256:f8affdf3c0f0466e69f5b3917cdd042f89c8c63aebdb9f7c078996f607cdb0f5"}, - {file = "watchdog-4.0.1-py3-none-manylinux2014_x86_64.whl", hash = "sha256:ac7041b385f04c047fcc2951dc001671dee1b7e0615cde772e84b01fbf68ee84"}, - {file = "watchdog-4.0.1-py3-none-win32.whl", hash = "sha256:206afc3d964f9a233e6ad34618ec60b9837d0582b500b63687e34011e15bb429"}, - {file = "watchdog-4.0.1-py3-none-win_amd64.whl", hash = "sha256:7577b3c43e5909623149f76b099ac49a1a01ca4e167d1785c76eb52fa585745a"}, - {file = "watchdog-4.0.1-py3-none-win_ia64.whl", hash = "sha256:d7b9f5f3299e8dd230880b6c55504a1f69cf1e4316275d1b215ebdd8187ec88d"}, - {file = "watchdog-4.0.1.tar.gz", hash = "sha256:eebaacf674fa25511e8867028d281e602ee6500045b57f43b08778082f7f8b44"}, + {file = "watchdog-4.0.2-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:ede7f010f2239b97cc79e6cb3c249e72962404ae3865860855d5cbe708b0fd22"}, + {file = "watchdog-4.0.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:a2cffa171445b0efa0726c561eca9a27d00a1f2b83846dbd5a4f639c4f8ca8e1"}, + {file = "watchdog-4.0.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:c50f148b31b03fbadd6d0b5980e38b558046b127dc483e5e4505fcef250f9503"}, + {file = "watchdog-4.0.2-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:7c7d4bf585ad501c5f6c980e7be9c4f15604c7cc150e942d82083b31a7548930"}, + {file = "watchdog-4.0.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:914285126ad0b6eb2258bbbcb7b288d9dfd655ae88fa28945be05a7b475a800b"}, + {file = "watchdog-4.0.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:984306dc4720da5498b16fc037b36ac443816125a3705dfde4fd90652d8028ef"}, + {file = "watchdog-4.0.2-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:1cdcfd8142f604630deef34722d695fb455d04ab7cfe9963055df1fc69e6727a"}, + {file = "watchdog-4.0.2-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:d7ab624ff2f663f98cd03c8b7eedc09375a911794dfea6bf2a359fcc266bff29"}, + {file = "watchdog-4.0.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:132937547a716027bd5714383dfc40dc66c26769f1ce8a72a859d6a48f371f3a"}, + {file = "watchdog-4.0.2-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:cd67c7df93eb58f360c43802acc945fa8da70c675b6fa37a241e17ca698ca49b"}, + {file = "watchdog-4.0.2-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:bcfd02377be80ef3b6bc4ce481ef3959640458d6feaae0bd43dd90a43da90a7d"}, + {file = "watchdog-4.0.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:980b71510f59c884d684b3663d46e7a14b457c9611c481e5cef08f4dd022eed7"}, + {file = "watchdog-4.0.2-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:aa160781cafff2719b663c8a506156e9289d111d80f3387cf3af49cedee1f040"}, + {file = "watchdog-4.0.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:f6ee8dedd255087bc7fe82adf046f0b75479b989185fb0bdf9a98b612170eac7"}, + {file = "watchdog-4.0.2-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:0b4359067d30d5b864e09c8597b112fe0a0a59321a0f331498b013fb097406b4"}, + {file = "watchdog-4.0.2-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:770eef5372f146997638d737c9a3c597a3b41037cfbc5c41538fc27c09c3a3f9"}, + {file = "watchdog-4.0.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:eeea812f38536a0aa859972d50c76e37f4456474b02bd93674d1947cf1e39578"}, + {file = "watchdog-4.0.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:b2c45f6e1e57ebb4687690c05bc3a2c1fb6ab260550c4290b8abb1335e0fd08b"}, + {file = "watchdog-4.0.2-pp310-pypy310_pp73-macosx_10_15_x86_64.whl", hash = "sha256:10b6683df70d340ac3279eff0b2766813f00f35a1d37515d2c99959ada8f05fa"}, + {file = "watchdog-4.0.2-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:f7c739888c20f99824f7aa9d31ac8a97353e22d0c0e54703a547a218f6637eb3"}, + {file = "watchdog-4.0.2-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:c100d09ac72a8a08ddbf0629ddfa0b8ee41740f9051429baa8e31bb903ad7508"}, + {file = "watchdog-4.0.2-pp38-pypy38_pp73-macosx_11_0_arm64.whl", hash = "sha256:f5315a8c8dd6dd9425b974515081fc0aadca1d1d61e078d2246509fd756141ee"}, + {file = "watchdog-4.0.2-pp39-pypy39_pp73-macosx_10_15_x86_64.whl", hash = "sha256:2d468028a77b42cc685ed694a7a550a8d1771bb05193ba7b24006b8241a571a1"}, + {file = "watchdog-4.0.2-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:f15edcae3830ff20e55d1f4e743e92970c847bcddc8b7509bcd172aa04de506e"}, + {file = "watchdog-4.0.2-py3-none-manylinux2014_aarch64.whl", hash = "sha256:936acba76d636f70db8f3c66e76aa6cb5136a936fc2a5088b9ce1c7a3508fc83"}, + {file = "watchdog-4.0.2-py3-none-manylinux2014_armv7l.whl", hash = "sha256:e252f8ca942a870f38cf785aef420285431311652d871409a64e2a0a52a2174c"}, + {file = "watchdog-4.0.2-py3-none-manylinux2014_i686.whl", hash = "sha256:0e83619a2d5d436a7e58a1aea957a3c1ccbf9782c43c0b4fed80580e5e4acd1a"}, + {file = "watchdog-4.0.2-py3-none-manylinux2014_ppc64.whl", hash = "sha256:88456d65f207b39f1981bf772e473799fcdc10801062c36fd5ad9f9d1d463a73"}, + {file = "watchdog-4.0.2-py3-none-manylinux2014_ppc64le.whl", hash = "sha256:32be97f3b75693a93c683787a87a0dc8db98bb84701539954eef991fb35f5fbc"}, + {file = "watchdog-4.0.2-py3-none-manylinux2014_s390x.whl", hash = "sha256:c82253cfc9be68e3e49282831afad2c1f6593af80c0daf1287f6a92657986757"}, + {file = "watchdog-4.0.2-py3-none-manylinux2014_x86_64.whl", hash = "sha256:c0b14488bd336c5b1845cee83d3e631a1f8b4e9c5091ec539406e4a324f882d8"}, + {file = "watchdog-4.0.2-py3-none-win32.whl", hash = "sha256:0d8a7e523ef03757a5aa29f591437d64d0d894635f8a50f370fe37f913ce4e19"}, + {file = "watchdog-4.0.2-py3-none-win_amd64.whl", hash = "sha256:c344453ef3bf875a535b0488e3ad28e341adbd5a9ffb0f7d62cefacc8824ef2b"}, + {file = "watchdog-4.0.2-py3-none-win_ia64.whl", hash = "sha256:baececaa8edff42cd16558a639a9b0ddf425f93d892e8392a56bf904f5eff22c"}, + {file = "watchdog-4.0.2.tar.gz", hash = "sha256:b4dfbb6c49221be4535623ea4474a4d6ee0a9cef4a80b20c28db4d858b64e270"}, ] [package.extras] @@ -6764,13 +6922,13 @@ multidict = ">=4.0" [[package]] name = "zipp" -version = "3.19.2" +version = "3.20.0" description = "Backport of pathlib-compatible object wrapper for zip files" optional = false python-versions = ">=3.8" files = [ - {file = "zipp-3.19.2-py3-none-any.whl", hash = "sha256:f091755f667055f2d02b32c53771a7a6c8b47e1fdbc4b72a8b9072b3eef8015c"}, - {file = "zipp-3.19.2.tar.gz", hash = "sha256:bf1dcf6450f873a13e952a29504887c89e6de7506209e5b1bcc3460135d4de19"}, + {file = "zipp-3.20.0-py3-none-any.whl", hash = "sha256:58da6168be89f0be59beb194da1250516fdaa062ccebd30127ac65d30045e10d"}, + {file = "zipp-3.20.0.tar.gz", hash = "sha256:0145e43d89664cfe1a2e533adc75adafed82fe2da404b4bbb6b026c0157bdb31"}, ] [package.extras] @@ -6778,7 +6936,7 @@ doc = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "rst.linke test = ["big-O", "importlib-resources", "jaraco.functools", "jaraco.itertools", "jaraco.test", "more-itertools", "pytest (>=6,!=8.1.*)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=2.2)", "pytest-ignore-flaky", "pytest-mypy", "pytest-ruff (>=0.2.1)"] [extras] -all = ["accelerate", "anthropic", "beautifulsoup4", "boto3", "cohere", "diffusers", "duckduckgo-search", "elevenlabs", "filetype", "google-generativeai", "mail-parser", "markdownify", "marqo", "ollama", "opensearch-py", "opentelemetry-api", "opentelemetry-exporter-otlp-proto-http", "opentelemetry-instrumentation", "opentelemetry-instrumentation-threading", "opentelemetry-sdk", "pandas", "pgvector", "pillow", "pinecone-client", "playwright", "psycopg2-binary", "pusher", "pymongo", "pypdf", "qdrant-client", "redis", "sentencepiece", "snowflake-sqlalchemy", "sqlalchemy", "torch", "trafilatura", "transformers", "voyageai"] +all = ["accelerate", "anthropic", "astrapy", "beautifulsoup4", "boto3", "cohere", "diffusers", "duckduckgo-search", "elevenlabs", "filetype", "google-generativeai", "mail-parser", "markdownify", "marqo", "ollama", "opensearch-py", "opentelemetry-api", "opentelemetry-exporter-otlp-proto-http", "opentelemetry-instrumentation", "opentelemetry-instrumentation-threading", "opentelemetry-sdk", "pandas", "pgvector", "pillow", "pinecone-client", "playwright", "psycopg2-binary", "pusher", "pymongo", "pypdf", "qdrant-client", "redis", "sentencepiece", "snowflake-sqlalchemy", "sqlalchemy", "torch", "trafilatura", "transformers", "voyageai"] drivers-embedding-amazon-bedrock = ["boto3"] drivers-embedding-amazon-sagemaker = ["boto3"] drivers-embedding-cohere = ["cohere"] @@ -6809,6 +6967,7 @@ drivers-sql-amazon-redshift = ["boto3"] drivers-sql-snowflake = ["snowflake-sqlalchemy", "sqlalchemy"] drivers-text-to-speech-elevenlabs = ["elevenlabs"] drivers-vector-amazon-opensearch = ["boto3", "opensearch-py"] +drivers-vector-astra-db = ["astrapy"] drivers-vector-marqo = ["marqo"] drivers-vector-mongodb = ["pymongo"] drivers-vector-opensearch = ["opensearch-py"] @@ -6829,4 +6988,4 @@ loaders-sql = ["sqlalchemy"] [metadata] lock-version = "2.0" python-versions = "^3.9" -content-hash = "7c5e3b0815882872c8476708275f77205cba2f6f468be65465edbfbf99d8671d" +content-hash = "ee23a885217a5285e3a33cac221c55f011cd4ce428b33cd8abfbdac38a27a638" diff --git a/pyproject.toml b/pyproject.toml index 65b7f6340..6c50013ad 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "griptape" -version = "0.29.2" +version = "0.30.0" description = "Modular Python framework for LLM workflows, tools, memory, and data." authors = ["Griptape "] license = "Apache 2.0" @@ -50,6 +50,7 @@ markdownify = {version = "^0.11.6", optional = true} voyageai = {version = "^0.2.1", optional = true} elevenlabs = {version = "^1.1.2", optional = true} qdrant-client = { version = "^1.10.1", optional = true } +astrapy = { version = "^1.4", optional = true } pusher = {version = "^3.3.2", optional = true} ollama = {version = "^0.3.0", optional = true} duckduckgo-search = {version = "^6.1.12", optional = true} @@ -96,6 +97,7 @@ drivers-vector-opensearch = ["opensearch-py"] drivers-vector-amazon-opensearch = ["opensearch-py", "boto3"] drivers-vector-pgvector = ["sqlalchemy", "pgvector", "psycopg2-binary"] drivers-vector-qdrant = ["qdrant-client"] +drivers-vector-astra-db = ["astrapy"] drivers-embedding-amazon-bedrock = ["boto3"] drivers-embedding-amazon-sagemaker = ["boto3"] @@ -168,6 +170,7 @@ all = [ "marqo", "pinecone-client", "qdrant-client", + "astrapy", "pymongo", "redis", "opensearch-py", @@ -222,8 +225,8 @@ pytest-clarity = "^1.0.1" optional = true [tool.poetry.group.dev.dependencies] -ruff = "^0.4.6" -pyright = "^1.1.363" +ruff = "^0.6.0" +pyright = "^1.1.376" pre-commit = "^3.7.1" boto3-stubs = {extras = ["bedrock", "iam", "opensearch", "s3", "sagemaker"], version = "^1.34.105"} typos = "^1.22.9" @@ -236,7 +239,7 @@ optional = true mkdocs = "^1.5.2" mkdocs-material = "^9.2.8" mkdocs-glightbox = "^0.3.4" -mkdocstrings = {extras = ["python"], version = "^0.23.0"} +mkdocstrings = {extras = ["python"], version = "^0.25.2"} mkdocs-gen-files = "^0.5.0" mkdocs-literate-nav = "^0.6.0" mkdocs-section-index = "^0.3.6" @@ -268,6 +271,7 @@ select = [ "G", # flake8-logging-format "T20", # flake8-print "PT", # flake8-pytest-style + "RET", # flake8-return "SIM", # flake8-simplify "TID", # flake8-tidy-imports "TCH", # flake8-type-checking @@ -296,11 +300,14 @@ ignore = [ "ANN102", # missing-type-cls "ANN401", # any-type "PT011", # pytest-raises-too-broad + "RET505" # superfluous-else-return ] -preview = true [tool.ruff.lint.pydocstyle] convention = "google" +[tool.ruff.lint.flake8-pytest-style] +fixture-parentheses = true + [tool.ruff.lint.per-file-ignores] "__init__.py" = [ "I" # isort @@ -310,6 +317,9 @@ convention = "google" "ANN201", # missing-return-type-undocumented-public-function "ANN202", # missing-return-type-private-function ] +"docs/*" = [ + "T20" # flake8-print +] [tool.ruff.lint.flake8-tidy-imports.banned-api] "attr".msg = "The attr module is deprecated, use attrs instead." diff --git a/tests/integration/drivers/vector/test_astra_db_vector_store_driver.py b/tests/integration/drivers/vector/test_astra_db_vector_store_driver.py new file mode 100644 index 000000000..caa89144c --- /dev/null +++ b/tests/integration/drivers/vector/test_astra_db_vector_store_driver.py @@ -0,0 +1,148 @@ +from __future__ import annotations + +import json +import math +import os + +import pytest + +from griptape.drivers import AstraDbVectorStoreDriver, BaseVectorStoreDriver +from tests.mocks.mock_embedding_driver import MockEmbeddingDriver + +TEST_COLLECTION_NAME = "gt_int_test" +TEST_COLLECTION_NAME_METRIC = "gt_int_test_dot" + + +class TestAstraDbVectorStoreDriver: + @pytest.fixture() + def embedding_driver(self): + def circle_fraction_string_to_vector(chunk: str) -> list[float]: + try: + fraction = float(json.loads(chunk)) + angle = fraction * math.pi * 2 + return [math.cos(angle), math.sin(angle)] + except Exception: + return [0.0, 0.0] + + return MockEmbeddingDriver(mock_output=circle_fraction_string_to_vector) + + @pytest.fixture() + def vector_store_collection(self): + import astrapy + + database = astrapy.DataAPIClient().get_database( + api_endpoint=os.environ["ASTRA_DB_API_ENDPOINT"], + token=os.environ["ASTRA_DB_APPLICATION_TOKEN"], + namespace=os.environ.get("ASTRA_DB_KEYSPACE"), + ) + collection = database.create_collection( + name=TEST_COLLECTION_NAME, + dimension=2, + metric="cosine", + ) + yield collection + collection.drop() + + @pytest.fixture() + def vector_store_driver(self, embedding_driver, vector_store_collection): + return AstraDbVectorStoreDriver( + api_endpoint=os.environ["ASTRA_DB_API_ENDPOINT"], + token=os.environ["ASTRA_DB_APPLICATION_TOKEN"], + collection_name=vector_store_collection.name, + astra_db_namespace=os.environ.get("ASTRA_DB_KEYSPACE"), + embedding_driver=embedding_driver, + ) + + def test_vector_crud(self, vector_store_driver, vector_store_collection, embedding_driver): + """Test basic vector CRUD, various call patterns.""" + vector_store_collection.delete_many({}) + + vec1 = embedding_driver.embed_string("0.012") + id1 = vector_store_driver.upsert_vector(vec1, vector_id="v1") + + vec2 = embedding_driver.embed_string("0.024") + id2 = vector_store_driver.upsert_vector(vec2, vector_id="v2", namespace="ns") + + vec3 = embedding_driver.embed_string("0.036") + id3 = vector_store_driver.upsert_vector(vec3) + + vec4 = embedding_driver.embed_string("0.048") + id4 = vector_store_driver.upsert_vector(vec4, vector_id="v4", meta={"i": 4}, namespace="ns") + + assert id1 == "v1" + assert id2 == "v2" + assert isinstance(id3, str) + assert id4 == "v4" + + # retrieve by id + e1 = vector_store_driver.load_entry(id1) + e1_n = vector_store_driver.load_entry(id1, namespace="false_ns") + e2 = vector_store_driver.load_entry(id2, namespace="ns") + e3 = vector_store_driver.load_entry(id3) + e4 = vector_store_driver.load_entry(id4) + assert e1 == BaseVectorStoreDriver.Entry( + id=id1, + vector=vec1, + ) + assert e1_n is None + assert e2 == BaseVectorStoreDriver.Entry( + id=id2, + vector=vec2, + namespace="ns", + ) + assert e3 == BaseVectorStoreDriver.Entry( + id=id3, + vector=vec3, + ) + assert e4 == BaseVectorStoreDriver.Entry( + id=id4, + vector=vec4, + meta={"i": 4}, + namespace="ns", + ) + + # retrieve multiple entries + es_ns = vector_store_driver.load_entries(namespace="ns") + es_all = vector_store_driver.load_entries() + assert len(es_ns) == 2 + assert any(e == e2 for e in es_ns) + assert any(e == e4 for e in es_ns) + assert len(es_all) == 4 + assert any(e == e1 for e in es_all) + assert any(e == e2 for e in es_all) + assert any(e == e3 for e in es_all) + assert any(e == e4 for e in es_all) + + # delete and recheck + vector_store_driver.delete_vector("fake_id") + vector_store_driver.delete_vector(id4) + es_ns_postdel = vector_store_driver.load_entries(namespace="ns") + assert len(es_ns_postdel) == 1 + assert es_ns_postdel[0] == e2 + + # queries + query_2 = vector_store_driver.query("0.060", count=2, include_vectors=True) + query_all = vector_store_driver.query("0.060", include_vectors=True) + query_2_novectors = vector_store_driver.query("0.060", count=2) + query_all_ns = vector_store_driver.query("0.060", include_vectors=True, namespace="ns") + # + d_query_2 = [self._descore_entry(ent) for ent in query_2] + assert d_query_2 == [e3, e2] + + d_query_all = [self._descore_entry(ent) for ent in query_all] + assert d_query_all == [e3, e2, e1] + d_query_2_novectors = [self._descore_entry(ent) for ent in query_2_novectors] + assert d_query_2_novectors == [ + BaseVectorStoreDriver.Entry( + id=id3, + ), + BaseVectorStoreDriver.Entry( + id=id2, + namespace="ns", + ), + ] + d_query_all_ns = [self._descore_entry(ent) for ent in query_all_ns] + assert d_query_all_ns == [e2] + + def _descore_entry(self, entry: BaseVectorStoreDriver.Entry) -> BaseVectorStoreDriver.Entry: + return BaseVectorStoreDriver.Entry.from_dict({k: v for k, v in entry.__dict__.items() if k != "score"}) diff --git a/tests/integration/tasks/test_tool_task.py b/tests/integration/tasks/test_tool_task.py index aee0af110..426dde995 100644 --- a/tests/integration/tasks/test_tool_task.py +++ b/tests/integration/tasks/test_tool_task.py @@ -10,10 +10,10 @@ class TestToolTask: def structure_tester(self, request): from griptape.structures import Agent from griptape.tasks import ToolTask - from griptape.tools import Calculator + from griptape.tools import CalculatorTool return StructureTester( - Agent(tasks=[ToolTask(tool=Calculator())], conversation_memory=None, prompt_driver=request.param) + Agent(tasks=[ToolTask(tool=CalculatorTool())], conversation_memory=None, prompt_driver=request.param) ) def test_tool_task(self, structure_tester): diff --git a/tests/integration/tasks/test_toolkit_task.py b/tests/integration/tasks/test_toolkit_task.py index 8dfcfdc73..50b4f2a97 100644 --- a/tests/integration/tasks/test_toolkit_task.py +++ b/tests/integration/tasks/test_toolkit_task.py @@ -14,18 +14,18 @@ def structure_tester(self, request): from griptape.drivers import GoogleWebSearchDriver from griptape.structures import Agent - from griptape.tools import TaskMemoryClient, WebScraper, WebSearch + from griptape.tools import PromptSummaryTool, WebScraperTool, WebSearchTool return StructureTester( Agent( tools=[ - WebSearch( + WebSearchTool( web_search_driver=GoogleWebSearchDriver( api_key=os.environ["GOOGLE_API_KEY"], search_id=os.environ["GOOGLE_API_SEARCH_ID"] ) ), - WebScraper(off_prompt=True), - TaskMemoryClient(off_prompt=False), + WebScraperTool(off_prompt=True), + PromptSummaryTool(off_prompt=False), ], conversation_memory=None, prompt_driver=request.param, diff --git a/tests/integration/test_code_blocks.py b/tests/integration/test_code_blocks.py index 2da683a2a..c9c666b0e 100644 --- a/tests/integration/test_code_blocks.py +++ b/tests/integration/test_code_blocks.py @@ -1,22 +1,47 @@ -import io import os +import subprocess import pytest -from tests.utils.code_blocks import check_py_string, get_all_code_blocks +SKIP_FILES = [ + "docs/griptape-tools/official-tools/src/computer_tool_1.py", + "docs/examples/src/load_query_and_chat_marqo_1.py", + "docs/griptape-framework/drivers/src/embedding_drivers_2.py", + "docs/griptape-framework/drivers/src/embedding_drivers_6.py", + "docs/griptape-framework/drivers/src/embedding_drivers_7.py", + "docs/griptape-framework/drivers/src/image_generation_drivers_7.py", + "docs/griptape-framework/drivers/src/image_generation_drivers_8.py", + "docs/griptape-framework/drivers/src/image_generation_drivers_9.py", + "docs/griptape-framework/drivers/src/prompt_drivers_4.py", + "docs/griptape-framework/drivers/src/prompt_drivers_12.py", + "docs/griptape-framework/drivers/src/prompt_drivers_14.py", + "docs/griptape-framework/drivers/src/observability_drivers_1.py", + "docs/griptape-framework/drivers/src/observability_drivers_2.py", + "docs/griptape-framework/structures/src/observability_1.py", + "docs/griptape-framework/structures/src/observability_2.py", +] -if "DOCS_ALL_CHANGED_FILES" in os.environ and os.environ["DOCS_ALL_CHANGED_FILES"] != "": - docs_all_changed_files = os.environ["DOCS_ALL_CHANGED_FILES"].split() - all_code_blocks = [get_all_code_blocks(changed_file) for changed_file in docs_all_changed_files] - all_code_blocks = [block for sublist in all_code_blocks for block in sublist] -else: - all_code_blocks = get_all_code_blocks("docs/**/*.md") +def discover_python_files(directory): + python_files = [] + for root, _, files in os.walk(directory): + for file in files: + if file.endswith(".py"): + path = os.path.join(root, file) + python_files.append( + pytest.param(path, marks=pytest.mark.skipif(path in SKIP_FILES, reason="Skip file")) + ) + return python_files -@pytest.mark.parametrize("block", all_code_blocks, ids=[f["id"] for f in all_code_blocks]) -def test_code_block(block, monkeypatch): - # Send some stdin for tests that use the Chat util - monkeypatch.setattr("sys.stdin", io.StringIO("Hi\nexit\n")) +@pytest.mark.parametrize("python_file", discover_python_files("docs")) +def test_python_file_execution(python_file): + """Test that the Python file executes successfully.""" + result = subprocess.run( + ["poetry", "run", "python", python_file], + capture_output=True, + text=True, + input="Hi\nexit\n", + ) - check_py_string(block["code"]) + assert result.returncode == 0, f"Execution failed for {python_file} with error: {result.stderr}" diff --git a/tests/integration/tools/test_calculator.py b/tests/integration/tools/test_calculator_tool.py similarity index 73% rename from tests/integration/tools/test_calculator.py rename to tests/integration/tools/test_calculator_tool.py index 2547b947d..c209a9a2c 100644 --- a/tests/integration/tools/test_calculator.py +++ b/tests/integration/tools/test_calculator_tool.py @@ -11,9 +11,9 @@ class TestCalculator: ) def structure_tester(self, request): from griptape.structures import Agent - from griptape.tools import Calculator + from griptape.tools import CalculatorTool - return StructureTester(Agent(tools=[Calculator()], conversation_memory=None, prompt_driver=request.param)) + return StructureTester(Agent(tools=[CalculatorTool()], conversation_memory=None, prompt_driver=request.param)) def test_calculate(self, structure_tester): structure_tester.run("What is 7 times 3 divided by 5 plus 10.") diff --git a/tests/integration/tools/test_file_manager.py b/tests/integration/tools/test_file_manager_tool.py similarity index 79% rename from tests/integration/tools/test_file_manager.py rename to tests/integration/tools/test_file_manager_tool.py index 8a283c6e8..4b5299175 100644 --- a/tests/integration/tools/test_file_manager.py +++ b/tests/integration/tools/test_file_manager_tool.py @@ -11,9 +11,9 @@ class TestFileManager: ) def structure_tester(self, request): from griptape.structures import Agent - from griptape.tools import FileManager + from griptape.tools import FileManagerTool - return StructureTester(Agent(tools=[FileManager()], conversation_memory=None, prompt_driver=request.param)) + return StructureTester(Agent(tools=[FileManagerTool()], conversation_memory=None, prompt_driver=request.param)) def test_save_content_to_disk(self, structure_tester): structure_tester.run('Write the content "Hello World!" to a file called "poem.txt".') diff --git a/tests/integration/tools/test_google_docs_client.py b/tests/integration/tools/test_google_docs_tool.py similarity index 95% rename from tests/integration/tools/test_google_docs_client.py rename to tests/integration/tools/test_google_docs_tool.py index 4d70aac17..7c8828dd3 100644 --- a/tests/integration/tools/test_google_docs_client.py +++ b/tests/integration/tools/test_google_docs_tool.py @@ -5,7 +5,7 @@ from tests.utils.structure_tester import StructureTester -class TestGoogleDocsClient: +class TestGoogleDocsTool: @pytest.fixture( autouse=True, params=StructureTester.TOOLKIT_TASK_CAPABLE_PROMPT_DRIVERS, @@ -13,12 +13,12 @@ class TestGoogleDocsClient: ) def structure_tester(self, request): from griptape.structures import Agent - from griptape.tools import GoogleDocsClient + from griptape.tools import GoogleDocsTool return StructureTester( Agent( tools=[ - GoogleDocsClient( + GoogleDocsTool( service_account_credentials={ "type": os.environ["GOOGLE_ACCOUNT_TYPE"], "project_id": os.environ["GOOGLE_PROJECT_ID"], diff --git a/tests/integration/tools/test_google_drive_client.py b/tests/integration/tools/test_google_drive_tool.py similarity index 94% rename from tests/integration/tools/test_google_drive_client.py rename to tests/integration/tools/test_google_drive_tool.py index 23ebb1b32..7fd8b9047 100644 --- a/tests/integration/tools/test_google_drive_client.py +++ b/tests/integration/tools/test_google_drive_tool.py @@ -5,7 +5,7 @@ from tests.utils.structure_tester import StructureTester -class TestGoogleDriveClient: +class TestGoogleDriveTool: @pytest.fixture( autouse=True, params=StructureTester.TOOLKIT_TASK_CAPABLE_PROMPT_DRIVERS, @@ -13,12 +13,12 @@ class TestGoogleDriveClient: ) def structure_tester(self, request): from griptape.structures import Agent - from griptape.tools import GoogleDriveClient + from griptape.tools import GoogleDriveTool return StructureTester( Agent( tools=[ - GoogleDriveClient( + GoogleDriveTool( service_account_credentials={ "type": os.environ["GOOGLE_ACCOUNT_TYPE"], "project_id": os.environ["GOOGLE_PROJECT_ID"], diff --git a/tests/mocks/docker/fake_api.py b/tests/mocks/docker/fake_api.py index 881093057..00e750232 100644 --- a/tests/mocks/docker/fake_api.py +++ b/tests/mocks/docker/fake_api.py @@ -154,7 +154,7 @@ def get_fake_inspect_container(*, tty=False): status_code = 200 response = { "Id": FAKE_CONTAINER_ID, - "Config": {"Labels": {"foo": "bar"}, "Privileged": True, "Tty": tty}, + "config": {"Labels": {"foo": "bar"}, "Privileged": True, "Tty": tty}, "ID": FAKE_CONTAINER_ID, "Image": "busybox:latest", "Name": "foobar", @@ -166,7 +166,7 @@ def get_fake_inspect_container(*, tty=False): "StartedAt": "2013-09-25T14:01:18.869545111+02:00", "Ghost": False, }, - "HostConfig": {"LogConfig": {"Type": "json-file", "Config": {}}}, + "HostConfig": {"LogConfig": {"Type": "json-file", "config": {}}}, "MacAddress": "02:42:ac:11:00:0a", } return status_code, response @@ -179,7 +179,7 @@ def get_fake_inspect_image(): "Parent": "27cf784147099545", "Created": "2013-03-23T22:24:18.818426-07:00", "Container": FAKE_CONTAINER_ID, - "Config": {"Labels": {"bar": "foo"}}, + "config": {"Labels": {"bar": "foo"}}, "ContainerConfig": { "Hostname": "", "User": "", @@ -446,7 +446,7 @@ def get_fake_network_list(): "Driver": "bridge", "EnableIPv6": False, "Internal": False, - "IPAM": {"Driver": "default", "Config": [{"Subnet": "172.17.0.0/16"}]}, + "IPAM": {"Driver": "default", "config": [{"Subnet": "172.17.0.0/16"}]}, "Containers": { FAKE_CONTAINER_ID: { "EndpointID": "ed2419a97c1d99", diff --git a/tests/mocks/mock_drivers_config.py b/tests/mocks/mock_drivers_config.py new file mode 100644 index 000000000..aa9683dbd --- /dev/null +++ b/tests/mocks/mock_drivers_config.py @@ -0,0 +1,32 @@ +from attrs import define + +from griptape.configs.drivers import DriversConfig +from griptape.drivers.vector.local_vector_store_driver import LocalVectorStoreDriver +from griptape.utils.decorators import lazy_property +from tests.mocks.mock_embedding_driver import MockEmbeddingDriver +from tests.mocks.mock_image_generation_driver import MockImageGenerationDriver +from tests.mocks.mock_image_query_driver import MockImageQueryDriver +from tests.mocks.mock_prompt_driver import MockPromptDriver + + +@define +class MockDriversConfig(DriversConfig): + @lazy_property() + def prompt_driver(self) -> MockPromptDriver: + return MockPromptDriver() + + @lazy_property() + def image_generation_driver(self) -> MockImageGenerationDriver: + return MockImageGenerationDriver() + + @lazy_property() + def image_query_driver(self) -> MockImageQueryDriver: + return MockImageQueryDriver() + + @lazy_property() + def embedding_driver(self) -> MockEmbeddingDriver: + return MockEmbeddingDriver() + + @lazy_property() + def vector_store_driver(self) -> LocalVectorStoreDriver: + return LocalVectorStoreDriver(embedding_driver=MockEmbeddingDriver()) diff --git a/tests/mocks/mock_embedding_driver.py b/tests/mocks/mock_embedding_driver.py index 46d9bf515..6fe415195 100644 --- a/tests/mocks/mock_embedding_driver.py +++ b/tests/mocks/mock_embedding_driver.py @@ -1,5 +1,7 @@ from __future__ import annotations +from typing import Callable + from attrs import define, field from griptape.drivers import BaseEmbeddingDriver @@ -12,6 +14,7 @@ class MockEmbeddingDriver(BaseEmbeddingDriver): dimensions: int = field(default=42, kw_only=True) max_attempts: int = field(default=1, kw_only=True) tokenizer: MockTokenizer = field(factory=lambda: MockTokenizer(model="foo bar"), kw_only=True) + mock_output: Callable[[str], list[float]] = field(default=lambda chunk: [0, 1], kw_only=True) def try_embed_chunk(self, chunk: str) -> list[float]: - return [0, 1] + return self.mock_output(chunk) diff --git a/tests/mocks/mock_failing_prompt_driver.py b/tests/mocks/mock_failing_prompt_driver.py index 18895fdc9..9c760aab6 100644 --- a/tests/mocks/mock_failing_prompt_driver.py +++ b/tests/mocks/mock_failing_prompt_driver.py @@ -25,12 +25,11 @@ def try_run(self, prompt_stack: PromptStack) -> Message: self.current_attempt += 1 raise Exception("failed attempt") - else: - return Message( - content=[TextMessageContent(TextArtifact("success"))], - role=Message.ASSISTANT_ROLE, - usage=Message.Usage(input_tokens=100, output_tokens=100), - ) + return Message( + content=[TextMessageContent(TextArtifact("success"))], + role=Message.ASSISTANT_ROLE, + usage=Message.Usage(input_tokens=100, output_tokens=100), + ) def try_stream(self, prompt_stack: PromptStack) -> Iterator[DeltaMessage]: if self.current_attempt < self.max_failures: diff --git a/tests/mocks/mock_futures_executor.py b/tests/mocks/mock_futures_executor.py new file mode 100644 index 000000000..cbbf84560 --- /dev/null +++ b/tests/mocks/mock_futures_executor.py @@ -0,0 +1,4 @@ +from griptape.mixins import FuturesExecutorMixin + + +class MockFuturesExecutor(FuturesExecutorMixin): ... diff --git a/tests/mocks/mock_image_generation_driver.py b/tests/mocks/mock_image_generation_driver.py index 573eb0fc4..f8d6d89ce 100644 --- a/tests/mocks/mock_image_generation_driver.py +++ b/tests/mocks/mock_image_generation_driver.py @@ -10,6 +10,8 @@ @define class MockImageGenerationDriver(BaseImageGenerationDriver): + model: str = "test-model" + def try_text_to_image(self, prompts: list[str], negative_prompts: Optional[list[str]] = None) -> ImageArtifact: return ImageArtifact(value="mock image", width=512, height=512, format="png") diff --git a/tests/mocks/mock_structure_config.py b/tests/mocks/mock_structure_config.py deleted file mode 100644 index 3f95288f4..000000000 --- a/tests/mocks/mock_structure_config.py +++ /dev/null @@ -1,23 +0,0 @@ -from attrs import Factory, define, field - -from griptape.config import StructureConfig -from tests.mocks.mock_embedding_driver import MockEmbeddingDriver -from tests.mocks.mock_image_generation_driver import MockImageGenerationDriver -from tests.mocks.mock_image_query_driver import MockImageQueryDriver -from tests.mocks.mock_prompt_driver import MockPromptDriver - - -@define -class MockStructureConfig(StructureConfig): - prompt_driver: MockPromptDriver = field( - default=Factory(lambda: MockPromptDriver()), metadata={"serializable": True} - ) - image_generation_driver: MockImageGenerationDriver = field( - default=Factory(lambda: MockImageGenerationDriver(model="dall-e-2")), metadata={"serializable": True} - ) - image_query_driver: MockImageQueryDriver = field( - default=Factory(lambda: MockImageQueryDriver(model="gpt-4-vision-preview")), metadata={"serializable": True} - ) - embedding_driver: MockEmbeddingDriver = field( - default=Factory(lambda: MockEmbeddingDriver(model="text-embedding-3-small")), metadata={"serializable": True} - ) diff --git a/tests/unit/artifacts/test_json_artifact.py b/tests/unit/artifacts/test_json_artifact.py new file mode 100644 index 000000000..06f5d6297 --- /dev/null +++ b/tests/unit/artifacts/test_json_artifact.py @@ -0,0 +1,29 @@ +import json + +import pytest + +from griptape.artifacts import JsonArtifact, TextArtifact + + +class TestJsonArtifact: + def test_value_type_conversion(self): + assert JsonArtifact({"foo": "bar"}).value == json.loads(json.dumps({"foo": "bar"})) + assert JsonArtifact({"foo": 1}).value == json.loads(json.dumps({"foo": 1})) + assert JsonArtifact({"foo": 1.0}).value == json.loads(json.dumps({"foo": 1.0})) + assert JsonArtifact({"foo": True}).value == json.loads(json.dumps({"foo": True})) + assert JsonArtifact({"foo": None}).value == json.loads(json.dumps({"foo": None})) + assert JsonArtifact([{"foo": {"bar": "baz"}}]).value == json.loads(json.dumps([{"foo": {"bar": "baz"}}])) + assert JsonArtifact(None).value == json.loads(json.dumps(None)) + assert JsonArtifact("foo").value == json.loads(json.dumps("foo")) + + def test___add__(self): + with pytest.raises(NotImplementedError): + JsonArtifact({"foo": "bar"}) + TextArtifact("invalid json") + + def test_to_text(self): + assert JsonArtifact({"foo": "bar"}).to_text() == json.dumps({"foo": "bar"}) + assert JsonArtifact({"foo": 1}).to_text() == json.dumps({"foo": 1}) + assert JsonArtifact({"foo": 1.0}).to_text() == json.dumps({"foo": 1.0}) + assert JsonArtifact({"foo": True}).to_text() == json.dumps({"foo": True}) + assert JsonArtifact({"foo": None}).to_text() == json.dumps({"foo": None}) + assert JsonArtifact([{"foo": {"bar": "baz"}}]).to_text() == json.dumps([{"foo": {"bar": "baz"}}]) diff --git a/tests/unit/common/test_observable.py b/tests/unit/common/test_observable.py index f48c3086c..c06be5bb1 100644 --- a/tests/unit/common/test_observable.py +++ b/tests/unit/common/test_observable.py @@ -19,6 +19,7 @@ def bar(*args, **kwargs): """Bar's docstring.""" if args: return args[0] + return None assert bar() is None assert bar("a") == "a" @@ -48,6 +49,7 @@ def test_observable_function_empty_parenthesis(self, observe_spy): def bar(*args, **kwargs): if args: return args[0] + return None assert bar() is None assert bar("a") == "a" @@ -73,6 +75,7 @@ def test_observable_function_args(self, observe_spy): def bar(*args, **kwargs): if args: return args[0] + return None assert bar() is None assert bar("a") == "a" diff --git a/tests/unit/common/test_prompt_stack.py b/tests/unit/common/test_prompt_stack.py index 8aba023bc..983dccc4c 100644 --- a/tests/unit/common/test_prompt_stack.py +++ b/tests/unit/common/test_prompt_stack.py @@ -1,6 +1,7 @@ import pytest from griptape.artifacts import ActionArtifact, GenericArtifact, ImageArtifact, ListArtifact, TextArtifact +from griptape.artifacts.error_artifact import ErrorArtifact from griptape.common import ( ActionCallMessageContent, ActionResultMessageContent, @@ -45,6 +46,8 @@ def test_add_message(self, prompt_stack): "role", ) + prompt_stack.add_message(ErrorArtifact("foo"), "role") + assert prompt_stack.messages[0].role == "role" assert isinstance(prompt_stack.messages[0].content[0], TextMessageContent) assert prompt_stack.messages[0].content[0].artifact.value == "foo" @@ -85,6 +88,9 @@ def test_add_message(self, prompt_stack): assert isinstance(prompt_stack.messages[6].content[0], GenericMessageContent) assert prompt_stack.messages[6].content[0].artifact.value == "foo" + assert prompt_stack.messages[7].role == "role" + assert isinstance(prompt_stack.messages[7].content[0], TextMessageContent) + def test_add_system_message(self, prompt_stack): prompt_stack.add_system_message("foo") diff --git a/tests/unit/config/test_structure_config.py b/tests/unit/config/test_structure_config.py deleted file mode 100644 index b9e3477e4..000000000 --- a/tests/unit/config/test_structure_config.py +++ /dev/null @@ -1,97 +0,0 @@ -import pytest - -from griptape.config import StructureConfig -from griptape.structures import Agent - - -class TestStructureConfig: - @pytest.fixture() - def config(self): - return StructureConfig() - - def test_to_dict(self, config): - assert config.to_dict() == { - "type": "StructureConfig", - "prompt_driver": { - "type": "DummyPromptDriver", - "temperature": 0.1, - "max_tokens": None, - "stream": False, - "use_native_tools": False, - }, - "conversation_memory_driver": None, - "embedding_driver": {"type": "DummyEmbeddingDriver"}, - "image_generation_driver": {"type": "DummyImageGenerationDriver"}, - "image_query_driver": {"type": "DummyImageQueryDriver"}, - "vector_store_driver": { - "embedding_driver": {"type": "DummyEmbeddingDriver"}, - "type": "DummyVectorStoreDriver", - }, - "text_to_speech_driver": {"type": "DummyTextToSpeechDriver"}, - "audio_transcription_driver": {"type": "DummyAudioTranscriptionDriver"}, - } - - def test_from_dict(self, config): - assert StructureConfig.from_dict(config.to_dict()).to_dict() == config.to_dict() - - def test_unchanged_merge_config(self, config): - assert ( - config.merge_config( - { - "type": "StructureConfig", - "prompt_driver": { - "type": "DummyPromptDriver", - "temperature": 0.1, - "max_tokens": None, - "stream": False, - }, - } - ).to_dict() - == config.to_dict() - ) - - def test_changed_merge_config(self, config): - config = config.merge_config( - {"prompt_driver": {"type": "DummyPromptDriver", "temperature": 0.1, "max_tokens": None, "stream": False}} - ) - - assert config.prompt_driver.temperature == 0.1 - - def test_dot_update(self, config): - config.prompt_driver.max_tokens = 10 - - assert config.prompt_driver.max_tokens == 10 - - def test_drivers(self, config): - assert config.drivers == [ - config.prompt_driver, - config.image_generation_driver, - config.image_query_driver, - config.embedding_driver, - config.vector_store_driver, - config.conversation_memory_driver, - config.text_to_speech_driver, - config.audio_transcription_driver, - ] - - def test_structure(self, config): - structure_1 = Agent( - config=config, - ) - - assert config.structure == structure_1 - assert config._event_listener is not None - for driver in config.drivers: - if driver is not None: - assert config._event_listener in driver.event_listeners - assert len(driver.event_listeners) == 1 - - structure_2 = Agent( - config=config, - ) - assert config.structure == structure_2 - assert config._event_listener is not None - for driver in config.drivers: - if driver is not None: - assert config._event_listener in driver.event_listeners - assert len(driver.event_listeners) == 1 diff --git a/tests/unit/configs/__init__.py b/tests/unit/configs/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/tests/unit/configs/drivers/__init__.py b/tests/unit/configs/drivers/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/tests/unit/config/test_amazon_bedrock_structure_config.py b/tests/unit/configs/drivers/test_amazon_bedrock_drivers_config.py similarity index 89% rename from tests/unit/config/test_amazon_bedrock_structure_config.py rename to tests/unit/configs/drivers/test_amazon_bedrock_drivers_config.py index afe9b3720..129fe281f 100644 --- a/tests/unit/config/test_amazon_bedrock_structure_config.py +++ b/tests/unit/configs/drivers/test_amazon_bedrock_drivers_config.py @@ -1,11 +1,11 @@ import boto3 import pytest -from griptape.config import AmazonBedrockStructureConfig +from griptape.configs.drivers import AmazonBedrockDriversConfig from tests.utils.aws import mock_aws_credentials -class TestAmazonBedrockStructureConfig: +class TestAmazonBedrockDriversConfig: @pytest.fixture(autouse=True) def _run_before_and_after_tests(self): mock_aws_credentials() @@ -13,11 +13,11 @@ def _run_before_and_after_tests(self): @pytest.fixture() def config(self): mock_aws_credentials() - return AmazonBedrockStructureConfig() + return AmazonBedrockDriversConfig() @pytest.fixture() def config_with_values(self): - return AmazonBedrockStructureConfig( + return AmazonBedrockDriversConfig( session=boto3.Session( aws_access_key_id="testing", aws_secret_access_key="testing", region_name="region-value" ) @@ -62,18 +62,17 @@ def test_to_dict(self, config): }, "type": "LocalVectorStoreDriver", }, - "type": "AmazonBedrockStructureConfig", + "type": "AmazonBedrockDriversConfig", "text_to_speech_driver": {"type": "DummyTextToSpeechDriver"}, "audio_transcription_driver": {"type": "DummyAudioTranscriptionDriver"}, } def test_from_dict(self, config): - assert AmazonBedrockStructureConfig.from_dict(config.to_dict()).to_dict() == config.to_dict() + assert AmazonBedrockDriversConfig.from_dict(config.to_dict()).to_dict() == config.to_dict() def test_from_dict_with_values(self, config_with_values): assert ( - AmazonBedrockStructureConfig.from_dict(config_with_values.to_dict()).to_dict() - == config_with_values.to_dict() + AmazonBedrockDriversConfig.from_dict(config_with_values.to_dict()).to_dict() == config_with_values.to_dict() ) def test_to_dict_with_values(self, config_with_values): @@ -115,7 +114,7 @@ def test_to_dict_with_values(self, config_with_values): }, "type": "LocalVectorStoreDriver", }, - "type": "AmazonBedrockStructureConfig", + "type": "AmazonBedrockDriversConfig", "text_to_speech_driver": {"type": "DummyTextToSpeechDriver"}, "audio_transcription_driver": {"type": "DummyAudioTranscriptionDriver"}, } diff --git a/tests/unit/config/test_anthropic_structure_config.py b/tests/unit/configs/drivers/test_anthropic_drivers_config.py similarity index 85% rename from tests/unit/config/test_anthropic_structure_config.py rename to tests/unit/configs/drivers/test_anthropic_drivers_config.py index 05519fa5e..b2335d92a 100644 --- a/tests/unit/config/test_anthropic_structure_config.py +++ b/tests/unit/configs/drivers/test_anthropic_drivers_config.py @@ -1,9 +1,9 @@ import pytest -from griptape.config import AnthropicStructureConfig +from griptape.configs.drivers import AnthropicDriversConfig -class TestAnthropicStructureConfig: +class TestAnthropicDriversConfig: @pytest.fixture(autouse=True) def _mock_anthropic(self, mocker): mocker.patch("anthropic.Anthropic") @@ -11,11 +11,11 @@ def _mock_anthropic(self, mocker): @pytest.fixture() def config(self): - return AnthropicStructureConfig() + return AnthropicDriversConfig() def test_to_dict(self, config): assert config.to_dict() == { - "type": "AnthropicStructureConfig", + "type": "AnthropicDriversConfig", "prompt_driver": { "type": "AnthropicPromptDriver", "temperature": 0.1, @@ -51,4 +51,4 @@ def test_to_dict(self, config): } def test_from_dict(self, config): - assert AnthropicStructureConfig.from_dict(config.to_dict()).to_dict() == config.to_dict() + assert AnthropicDriversConfig.from_dict(config.to_dict()).to_dict() == config.to_dict() diff --git a/tests/unit/config/test_azure_openai_structure_config.py b/tests/unit/configs/drivers/test_azure_openai_drivers_config.py similarity index 80% rename from tests/unit/config/test_azure_openai_structure_config.py rename to tests/unit/configs/drivers/test_azure_openai_drivers_config.py index dcdc3a1dc..5c514c947 100644 --- a/tests/unit/config/test_azure_openai_structure_config.py +++ b/tests/unit/configs/drivers/test_azure_openai_drivers_config.py @@ -1,16 +1,16 @@ import pytest -from griptape.config import AzureOpenAiStructureConfig +from griptape.configs.drivers import AzureOpenAiDriversConfig -class TestAzureOpenAiStructureConfig: +class TestAzureOpenAiDriversConfig: @pytest.fixture(autouse=True) def mock_openai(self, mocker): return mocker.patch("openai.AzureOpenAI") @pytest.fixture() def config(self): - return AzureOpenAiStructureConfig( + return AzureOpenAiDriversConfig( azure_endpoint="http://localhost:8080", azure_ad_token="test-token", azure_ad_token_provider=lambda: "test-provider", @@ -18,7 +18,7 @@ def config(self): def test_to_dict(self, config): assert config.to_dict() == { - "type": "AzureOpenAiStructureConfig", + "type": "AzureOpenAiDriversConfig", "azure_endpoint": "http://localhost:8080", "prompt_driver": { "type": "AzureOpenAiChatPromptDriver", @@ -85,16 +85,3 @@ def test_to_dict(self, config): "text_to_speech_driver": {"type": "DummyTextToSpeechDriver"}, "audio_transcription_driver": {"type": "DummyAudioTranscriptionDriver"}, } - - def test_from_dict(self, config: AzureOpenAiStructureConfig): - assert AzureOpenAiStructureConfig.from_dict(config.to_dict()).to_dict() == config.to_dict() - - # override values in the dict config - # serialize and deserialize the config - new_config = config.merge_config( - { - "prompt_driver": {"azure_deployment": "new-test-gpt-4"}, - "embedding_driver": {"model": "new-text-embedding-3-small"}, - } - ).to_dict() - assert AzureOpenAiStructureConfig.from_dict(new_config).to_dict() == new_config diff --git a/tests/unit/config/test_cohere_structure_config.py b/tests/unit/configs/drivers/test_cohere_drivers_config.py similarity index 87% rename from tests/unit/config/test_cohere_structure_config.py rename to tests/unit/configs/drivers/test_cohere_drivers_config.py index 113a589ec..3c267d73d 100644 --- a/tests/unit/config/test_cohere_structure_config.py +++ b/tests/unit/configs/drivers/test_cohere_drivers_config.py @@ -1,16 +1,16 @@ import pytest -from griptape.config import CohereStructureConfig +from griptape.configs.drivers import CohereDriversConfig -class TestCohereStructureConfig: +class TestCohereDriversConfig: @pytest.fixture() def config(self): - return CohereStructureConfig(api_key="api_key") + return CohereDriversConfig(api_key="api_key") def test_to_dict(self, config): assert config.to_dict() == { - "type": "CohereStructureConfig", + "type": "CohereDriversConfig", "image_generation_driver": {"type": "DummyImageGenerationDriver"}, "image_query_driver": {"type": "DummyImageQueryDriver"}, "conversation_memory_driver": None, diff --git a/tests/unit/configs/drivers/test_drivers_config.py b/tests/unit/configs/drivers/test_drivers_config.py new file mode 100644 index 000000000..20cc0926c --- /dev/null +++ b/tests/unit/configs/drivers/test_drivers_config.py @@ -0,0 +1,70 @@ +import pytest + +from griptape.configs.drivers import DriversConfig + + +class TestDriversConfig: + @pytest.fixture() + def config(self): + return DriversConfig() + + def test_to_dict(self, config): + assert config.to_dict() == { + "type": "DriversConfig", + "prompt_driver": { + "type": "DummyPromptDriver", + "temperature": 0.1, + "max_tokens": None, + "stream": False, + "use_native_tools": False, + }, + "conversation_memory_driver": None, + "embedding_driver": {"type": "DummyEmbeddingDriver"}, + "image_generation_driver": {"type": "DummyImageGenerationDriver"}, + "image_query_driver": {"type": "DummyImageQueryDriver"}, + "vector_store_driver": { + "embedding_driver": {"type": "DummyEmbeddingDriver"}, + "type": "DummyVectorStoreDriver", + }, + "text_to_speech_driver": {"type": "DummyTextToSpeechDriver"}, + "audio_transcription_driver": {"type": "DummyAudioTranscriptionDriver"}, + } + + def test_from_dict(self, config): + assert DriversConfig.from_dict(config.to_dict()).to_dict() == config.to_dict() + + def test_dot_update(self, config): + config.prompt_driver.max_tokens = 10 + + assert config.prompt_driver.max_tokens == 10 + + @pytest.mark.skip_mock_config() + def test_lazy_init(self): + from griptape.configs import Defaults + + assert Defaults.drivers_config._prompt_driver is None + assert Defaults.drivers_config._image_generation_driver is None + assert Defaults.drivers_config._image_query_driver is None + assert Defaults.drivers_config._embedding_driver is None + assert Defaults.drivers_config._vector_store_driver is None + assert Defaults.drivers_config._conversation_memory_driver is None + assert Defaults.drivers_config._text_to_speech_driver is None + assert Defaults.drivers_config._audio_transcription_driver is None + + assert Defaults.drivers_config.prompt_driver is not None + assert Defaults.drivers_config.image_generation_driver is not None + assert Defaults.drivers_config.image_query_driver is not None + assert Defaults.drivers_config.embedding_driver is not None + assert Defaults.drivers_config.vector_store_driver is not None + assert Defaults.drivers_config.conversation_memory_driver is None + assert Defaults.drivers_config.text_to_speech_driver is not None + assert Defaults.drivers_config.audio_transcription_driver is not None + + assert Defaults.drivers_config._prompt_driver is not None + assert Defaults.drivers_config._image_generation_driver is not None + assert Defaults.drivers_config._image_query_driver is not None + assert Defaults.drivers_config._embedding_driver is not None + assert Defaults.drivers_config._vector_store_driver is not None + assert Defaults.drivers_config._conversation_memory_driver is None + assert Defaults.drivers_config._text_to_speech_driver is not None + assert Defaults.drivers_config._audio_transcription_driver is not None diff --git a/tests/unit/config/test_google_structure_config.py b/tests/unit/configs/drivers/test_google_drivers_config.py similarity index 86% rename from tests/unit/config/test_google_structure_config.py rename to tests/unit/configs/drivers/test_google_drivers_config.py index e193cc983..f6df1afef 100644 --- a/tests/unit/config/test_google_structure_config.py +++ b/tests/unit/configs/drivers/test_google_drivers_config.py @@ -1,20 +1,20 @@ import pytest -from griptape.config import GoogleStructureConfig +from griptape.configs.drivers import GoogleDriversConfig -class TestGoogleStructureConfig: +class TestGoogleDriversConfig: @pytest.fixture(autouse=True) def mock_openai(self, mocker): return mocker.patch("google.generativeai.GenerativeModel") @pytest.fixture() def config(self): - return GoogleStructureConfig() + return GoogleDriversConfig() def test_to_dict(self, config): assert config.to_dict() == { - "type": "GoogleStructureConfig", + "type": "GoogleDriversConfig", "prompt_driver": { "type": "GooglePromptDriver", "temperature": 0.1, @@ -49,4 +49,4 @@ def test_to_dict(self, config): } def test_from_dict(self, config): - assert GoogleStructureConfig.from_dict(config.to_dict()).to_dict() == config.to_dict() + assert GoogleDriversConfig.from_dict(config.to_dict()).to_dict() == config.to_dict() diff --git a/tests/unit/config/test_openai_structure_config.py b/tests/unit/configs/drivers/test_openai_driver_config.py similarity index 91% rename from tests/unit/config/test_openai_structure_config.py rename to tests/unit/configs/drivers/test_openai_driver_config.py index 8969e0ad0..2425b178f 100644 --- a/tests/unit/config/test_openai_structure_config.py +++ b/tests/unit/configs/drivers/test_openai_driver_config.py @@ -1,20 +1,20 @@ import pytest -from griptape.config import OpenAiStructureConfig +from griptape.configs.drivers import OpenAiDriversConfig -class TestOpenAiStructureConfig: +class TestOpenAiDriversConfig: @pytest.fixture(autouse=True) def mock_openai(self, mocker): return mocker.patch("openai.OpenAI") @pytest.fixture() def config(self): - return OpenAiStructureConfig() + return OpenAiDriversConfig() def test_to_dict(self, config): assert config.to_dict() == { - "type": "OpenAiStructureConfig", + "type": "OpenAiDriversConfig", "prompt_driver": { "type": "OpenAiChatPromptDriver", "base_url": None, @@ -83,4 +83,4 @@ def test_to_dict(self, config): } def test_from_dict(self, config): - assert OpenAiStructureConfig.from_dict(config.to_dict()).to_dict() == config.to_dict() + assert OpenAiDriversConfig.from_dict(config.to_dict()).to_dict() == config.to_dict() diff --git a/tests/unit/configs/logging/__init__.py b/tests/unit/configs/logging/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/tests/unit/configs/logging/test_newline_logging_filter.py b/tests/unit/configs/logging/test_newline_logging_filter.py new file mode 100644 index 000000000..05d022dca --- /dev/null +++ b/tests/unit/configs/logging/test_newline_logging_filter.py @@ -0,0 +1,20 @@ +import io +import logging +from contextlib import redirect_stdout + +from griptape.configs import Defaults +from griptape.configs.logging import NewlineLoggingFilter +from griptape.structures import Agent + + +class TestNewlineLoggingFilter: + def test_filter(self): + # use the filter in an Agent + logger = logging.getLogger(Defaults.logging_config.logger_name) + logger.addFilter(NewlineLoggingFilter(replace_str="$$$")) + agent = Agent() + # use a context manager to capture the stdout + with io.StringIO() as buf, redirect_stdout(buf): + agent.run() + output = buf.getvalue() + assert "$$$" in output diff --git a/tests/unit/configs/logging/test_truncate_logging_filter.py b/tests/unit/configs/logging/test_truncate_logging_filter.py new file mode 100644 index 000000000..8aade25f7 --- /dev/null +++ b/tests/unit/configs/logging/test_truncate_logging_filter.py @@ -0,0 +1,20 @@ +import io +import logging +from contextlib import redirect_stdout + +from griptape.configs import Defaults +from griptape.configs.logging import TruncateLoggingFilter +from griptape.structures import Agent + + +class TestTruncateLoggingFilter: + def test_filter(self): + # use the filter in an Agent + logger = logging.getLogger(Defaults.logging_config.logger_name) + logger.addFilter(TruncateLoggingFilter(max_log_length=0)) + agent = Agent() + # use a context manager to capture the stdout + with io.StringIO() as buf, redirect_stdout(buf): + agent.run("test") + output = buf.getvalue() + assert "more characters]" in output diff --git a/tests/unit/configs/test_defaults_config.py b/tests/unit/configs/test_defaults_config.py new file mode 100644 index 000000000..afb679dd0 --- /dev/null +++ b/tests/unit/configs/test_defaults_config.py @@ -0,0 +1,14 @@ +import pytest + + +class TestDefaultsConfig: + def test_init(self): + from griptape.configs.defaults_config import _DefaultsConfig + + assert _DefaultsConfig() is _DefaultsConfig() + + def test_error_init(self): + from griptape.configs import Defaults + + with pytest.raises(TypeError): + Defaults() # pyright: ignore[reportCallIssue] diff --git a/tests/unit/conftest.py b/tests/unit/conftest.py new file mode 100644 index 000000000..a70b6b1a7 --- /dev/null +++ b/tests/unit/conftest.py @@ -0,0 +1,29 @@ +import pytest + +from tests.mocks.mock_drivers_config import MockDriversConfig + + +@pytest.fixture(autouse=True) +def mock_event_bus(): + from griptape.events import EventBus + + EventBus.clear_event_listeners() + + yield EventBus + + EventBus.clear_event_listeners() + + +@pytest.fixture(autouse=True) +def mock_config(request): + from griptape.configs import Defaults + + # Some tests we don't want to use the autouse fixture's MockDriversConfig + if "skip_mock_config" in request.keywords: + yield + + return + + Defaults.drivers_config = MockDriversConfig() + + yield Defaults diff --git a/tests/unit/drivers/audio_transcription/test_base_audio_transcription_driver.py b/tests/unit/drivers/audio_transcription/test_base_audio_transcription_driver.py index 519e40f57..29aecfdf9 100644 --- a/tests/unit/drivers/audio_transcription/test_base_audio_transcription_driver.py +++ b/tests/unit/drivers/audio_transcription/test_base_audio_transcription_driver.py @@ -3,7 +3,7 @@ import pytest from griptape.artifacts import AudioArtifact -from griptape.events.event_listener import EventListener +from griptape.events import EventBus, EventListener from tests.mocks.mock_audio_transcription_driver import MockAudioTranscriptionDriver @@ -12,9 +12,9 @@ class TestBaseAudioTranscriptionDriver: def driver(self): return MockAudioTranscriptionDriver() - def test_run_publish_events(self, driver): + def test_run_publish_events(self, driver, mock_config): mock_handler = Mock() - driver.add_event_listener(EventListener(handler=mock_handler)) + EventBus.add_event_listener(EventListener(handler=mock_handler)) driver.run( AudioArtifact( diff --git a/tests/unit/drivers/event_listener/test_base_event_listener_driver.py b/tests/unit/drivers/event_listener/test_base_event_listener_driver.py index 04cfef34b..114778f72 100644 --- a/tests/unit/drivers/event_listener/test_base_event_listener_driver.py +++ b/tests/unit/drivers/event_listener/test_base_event_listener_driver.py @@ -12,9 +12,7 @@ def test_publish_event(self): driver.publish_event(MockEvent().to_dict()) - executor.__enter__.assert_called_once() executor.submit.assert_called_once() - executor.__exit__.assert_called_once() def test__safe_try_publish_event(self): driver = MockEventListenerDriver(batched=False) diff --git a/tests/unit/drivers/event_listener/test_griptape_cloud_event_listener_driver.py b/tests/unit/drivers/event_listener/test_griptape_cloud_event_listener_driver.py index 1cd198756..0bf298870 100644 --- a/tests/unit/drivers/event_listener/test_griptape_cloud_event_listener_driver.py +++ b/tests/unit/drivers/event_listener/test_griptape_cloud_event_listener_driver.py @@ -87,7 +87,3 @@ def try_publish_event_payload_batch(self, mock_post, driver): json=event.to_dict(), headers={"Authorization": "Bearer foo bar"}, ) - - def test_no_structure_run_id(self): - with pytest.raises(ValueError): - GriptapeCloudEventListenerDriver(api_key="foo bar") diff --git a/tests/unit/drivers/image_generation/test_base_image_generation_driver.py b/tests/unit/drivers/image_generation/test_base_image_generation_driver.py index 7447b2c08..96b615a58 100644 --- a/tests/unit/drivers/image_generation/test_base_image_generation_driver.py +++ b/tests/unit/drivers/image_generation/test_base_image_generation_driver.py @@ -3,6 +3,7 @@ import pytest from griptape.artifacts.image_artifact import ImageArtifact +from griptape.events import EventBus from griptape.events.event_listener import EventListener from tests.mocks.mock_image_generation_driver import MockImageGenerationDriver @@ -14,7 +15,7 @@ def driver(self): def test_run_text_to_image_publish_events(self, driver): mock_handler = Mock() - driver.add_event_listener(EventListener(handler=mock_handler)) + EventBus.add_event_listener(EventListener(handler=mock_handler)) driver.run_text_to_image( ["foo", "bar"], @@ -30,7 +31,7 @@ def test_run_text_to_image_publish_events(self, driver): def test_run_image_variation_publish_events(self, driver): mock_handler = Mock() - driver.add_event_listener(EventListener(handler=mock_handler)) + EventBus.add_event_listener(EventListener(handler=mock_handler)) driver.run_image_variation( ["foo", "bar"], @@ -52,7 +53,7 @@ def test_run_image_variation_publish_events(self, driver): def test_run_image_image_inpainting_publish_events(self, driver): mock_handler = Mock() - driver.add_event_listener(EventListener(handler=mock_handler)) + EventBus.add_event_listener(EventListener(handler=mock_handler)) driver.run_image_inpainting( ["foo", "bar"], @@ -80,7 +81,7 @@ def test_run_image_image_inpainting_publish_events(self, driver): def test_run_image_image_outpainting_publish_events(self, driver): mock_handler = Mock() - driver.add_event_listener(EventListener(handler=mock_handler)) + EventBus.add_event_listener(EventListener(handler=mock_handler)) driver.run_image_outpainting( ["foo", "bar"], diff --git a/tests/unit/drivers/image_query/test_base_image_query_driver.py b/tests/unit/drivers/image_query/test_base_image_query_driver.py index 14de15f2d..a77fb268e 100644 --- a/tests/unit/drivers/image_query/test_base_image_query_driver.py +++ b/tests/unit/drivers/image_query/test_base_image_query_driver.py @@ -2,7 +2,7 @@ import pytest -from griptape.events.event_listener import EventListener +from griptape.events import EventBus, EventListener from tests.mocks.mock_image_query_driver import MockImageQueryDriver @@ -13,7 +13,7 @@ def driver(self): def test_query_publishes_events(self, driver): mock_handler = Mock() - driver.add_event_listener(EventListener(handler=mock_handler)) + EventBus.add_event_listener(EventListener(handler=mock_handler)) driver.query("foo", []) diff --git a/tests/unit/drivers/memory/conversation/test_dynamodb_conversation_memory_driver.py b/tests/unit/drivers/memory/conversation/test_dynamodb_conversation_memory_driver.py index 8e700d0a5..f1a5df1be 100644 --- a/tests/unit/drivers/memory/conversation/test_dynamodb_conversation_memory_driver.py +++ b/tests/unit/drivers/memory/conversation/test_dynamodb_conversation_memory_driver.py @@ -6,7 +6,6 @@ from griptape.memory.structure import ConversationMemory from griptape.structures import Pipeline from griptape.tasks import PromptTask -from tests.mocks.mock_prompt_driver import MockPromptDriver from tests.utils.aws import mock_aws_credentials @@ -40,7 +39,6 @@ def test_store(self): session = boto3.Session(region_name=self.AWS_REGION) dynamodb = session.resource("dynamodb") table = dynamodb.Table(self.DYNAMODB_TABLE_NAME) - prompt_driver = MockPromptDriver() memory_driver = AmazonDynamoDbConversationMemoryDriver( session=session, table_name=self.DYNAMODB_TABLE_NAME, @@ -49,7 +47,7 @@ def test_store(self): partition_key_value=self.PARTITION_KEY_VALUE, ) memory = ConversationMemory(driver=memory_driver) - pipeline = Pipeline(prompt_driver=prompt_driver, conversation_memory=memory) + pipeline = Pipeline(conversation_memory=memory) pipeline.add_task(PromptTask("test")) @@ -65,7 +63,6 @@ def test_store_with_sort_key(self): session = boto3.Session(region_name=self.AWS_REGION) dynamodb = session.resource("dynamodb") table = dynamodb.Table(self.DYNAMODB_TABLE_NAME) - prompt_driver = MockPromptDriver() memory_driver = AmazonDynamoDbConversationMemoryDriver( session=session, table_name=self.DYNAMODB_TABLE_NAME, @@ -76,7 +73,7 @@ def test_store_with_sort_key(self): sort_key_value="foo", ) memory = ConversationMemory(driver=memory_driver) - pipeline = Pipeline(prompt_driver=prompt_driver, conversation_memory=memory) + pipeline = Pipeline(conversation_memory=memory) pipeline.add_task(PromptTask("test")) @@ -89,7 +86,6 @@ def test_store_with_sort_key(self): assert "Item" in response def test_load(self): - prompt_driver = MockPromptDriver() memory_driver = AmazonDynamoDbConversationMemoryDriver( session=boto3.Session(region_name=self.AWS_REGION), table_name=self.DYNAMODB_TABLE_NAME, @@ -98,7 +94,7 @@ def test_load(self): partition_key_value=self.PARTITION_KEY_VALUE, ) memory = ConversationMemory(driver=memory_driver) - pipeline = Pipeline(prompt_driver=prompt_driver, conversation_memory=memory) + pipeline = Pipeline(conversation_memory=memory) pipeline.add_task(PromptTask("test")) @@ -113,7 +109,6 @@ def test_load(self): assert new_memory.runs[0].output.value == "mock output" def test_load_with_sort_key(self): - prompt_driver = MockPromptDriver() memory_driver = AmazonDynamoDbConversationMemoryDriver( session=boto3.Session(region_name=self.AWS_REGION), table_name=self.DYNAMODB_TABLE_NAME, @@ -124,7 +119,7 @@ def test_load_with_sort_key(self): sort_key_value="foo", ) memory = ConversationMemory(driver=memory_driver) - pipeline = Pipeline(prompt_driver=prompt_driver, conversation_memory=memory) + pipeline = Pipeline(conversation_memory=memory) pipeline.add_task(PromptTask("test")) diff --git a/tests/unit/drivers/memory/conversation/test_griptape_cloud_conversation_memory_driver.py b/tests/unit/drivers/memory/conversation/test_griptape_cloud_conversation_memory_driver.py new file mode 100644 index 000000000..707132ef5 --- /dev/null +++ b/tests/unit/drivers/memory/conversation/test_griptape_cloud_conversation_memory_driver.py @@ -0,0 +1,91 @@ +import json + +import pytest + +from griptape.artifacts import BaseArtifact +from griptape.drivers import GriptapeCloudConversationMemoryDriver +from griptape.memory.structure import BaseConversationMemory, ConversationMemory, Run, SummaryConversationMemory + +TEST_CONVERSATION = '{"type": "SummaryConversationMemory", "runs": [{"type": "Run", "id": "729ca6be5d79433d9762eb06dfd677e2", "input": {"type": "TextArtifact", "id": "1234", "value": "Hi There, Hello"}, "output": {"type": "TextArtifact", "id": "123", "value": "Hello! How can I assist you today?"}}], "max_runs": 2}' + + +class TestGriptapeCloudConversationMemoryDriver: + @pytest.fixture(autouse=True) + def _mock_requests(self, mocker): + def get(*args, **kwargs): + if str(args[0]).endswith("/messages"): + return mocker.Mock( + raise_for_status=lambda: None, + json=lambda: { + "messages": [ + { + "message_id": "123", + "input": '{"type": "TextArtifact", "id": "1234", "value": "Hi There, Hello"}', + "output": '{"type": "TextArtifact", "id": "123", "value": "Hello! How can I assist you today?"}', + "index": 0, + } + ] + }, + ) + else: + thread_id = args[0].split("/")[-1] + return mocker.Mock( + raise_for_status=lambda: None, + json=lambda: { + "metadata": json.loads(TEST_CONVERSATION), + "name": "test", + "thread_id": "test_metadata", + } + if thread_id == "test_metadata" + else {"name": "test", "thread_id": "test"}, + ) + + mocker.patch( + "requests.get", + side_effect=get, + ) + mocker.patch( + "requests.post", + return_value=mocker.Mock( + raise_for_status=lambda: None, + json=lambda: {"thread_id": "test", "name": "test"}, + ), + ) + mocker.patch( + "requests.patch", + return_value=mocker.Mock( + raise_for_status=lambda: None, + ), + ) + + @pytest.fixture() + def driver(self): + return GriptapeCloudConversationMemoryDriver(api_key="test", thread_id="test") + + def test_no_api_key(self): + with pytest.raises(ValueError): + GriptapeCloudConversationMemoryDriver(api_key=None, thread_id="test") + + def test_no_thread_id(self): + driver = GriptapeCloudConversationMemoryDriver(api_key="test") + assert driver.thread_id == "test" + + def test_store(self, driver): + memory = ConversationMemory( + runs=[ + Run(input=BaseArtifact.from_dict(run["input"]), output=BaseArtifact.from_dict(run["output"])) + for run in json.loads(TEST_CONVERSATION)["runs"] + ], + ) + assert driver.store(memory) is None + + def test_load(self, driver): + memory = driver.load() + assert isinstance(memory, BaseConversationMemory) + assert len(memory.runs) == 1 + + def test_load_metadata(self, driver): + driver.thread_id = "test_metadata" + memory = driver.load() + assert isinstance(memory, SummaryConversationMemory) + assert len(memory.runs) == 1 diff --git a/tests/unit/drivers/memory/conversation/test_local_conversation_memory_driver.py b/tests/unit/drivers/memory/conversation/test_local_conversation_memory_driver.py index e1a383ab9..dff66d0fc 100644 --- a/tests/unit/drivers/memory/conversation/test_local_conversation_memory_driver.py +++ b/tests/unit/drivers/memory/conversation/test_local_conversation_memory_driver.py @@ -7,7 +7,6 @@ from griptape.memory.structure import ConversationMemory from griptape.structures import Pipeline from griptape.tasks import PromptTask -from tests.mocks.mock_prompt_driver import MockPromptDriver class TestLocalConversationMemoryDriver: @@ -22,10 +21,9 @@ def _run_before_and_after_tests(self): self.__delete_file(self.MEMORY_FILE_PATH) def test_store(self): - prompt_driver = MockPromptDriver() memory_driver = LocalConversationMemoryDriver(file_path=self.MEMORY_FILE_PATH) memory = ConversationMemory(driver=memory_driver, autoload=False) - pipeline = Pipeline(prompt_driver=prompt_driver, conversation_memory=memory) + pipeline = Pipeline(conversation_memory=memory) pipeline.add_task(PromptTask("test")) @@ -41,10 +39,9 @@ def test_store(self): assert True def test_load(self): - prompt_driver = MockPromptDriver() memory_driver = LocalConversationMemoryDriver(file_path=self.MEMORY_FILE_PATH) memory = ConversationMemory(driver=memory_driver, autoload=False, max_runs=5) - pipeline = Pipeline(prompt_driver=prompt_driver, conversation_memory=memory) + pipeline = Pipeline(conversation_memory=memory) pipeline.add_task(PromptTask("test")) @@ -60,10 +57,9 @@ def test_load(self): assert new_memory.max_runs == 5 def test_autoload(self): - prompt_driver = MockPromptDriver() memory_driver = LocalConversationMemoryDriver(file_path=self.MEMORY_FILE_PATH) memory = ConversationMemory(driver=memory_driver) - pipeline = Pipeline(prompt_driver=prompt_driver, conversation_memory=memory) + pipeline = Pipeline(conversation_memory=memory) pipeline.add_task(PromptTask("test")) diff --git a/tests/unit/drivers/observability/test_open_telemetry_observability_driver.py b/tests/unit/drivers/observability/test_open_telemetry_observability_driver.py index 4f7ce50f0..758505b26 100644 --- a/tests/unit/drivers/observability/test_open_telemetry_observability_driver.py +++ b/tests/unit/drivers/observability/test_open_telemetry_observability_driver.py @@ -8,7 +8,6 @@ from griptape.drivers import OpenTelemetryObservabilityDriver from griptape.observability.observability import Observability from griptape.structures.agent import Agent -from tests.mocks.mock_prompt_driver import MockPromptDriver from tests.utils.expected_spans import ExpectedSpan, ExpectedSpans @@ -170,7 +169,7 @@ def test_observability_agent(self, driver, mock_span_exporter): ) with Observability(observability_driver=driver): - agent = Agent(prompt_driver=MockPromptDriver()) + agent = Agent() agent.run("Hi") assert mock_span_exporter.export.call_count == 1 diff --git a/tests/unit/drivers/prompt/test_amazon_bedrock_prompt_driver.py b/tests/unit/drivers/prompt/test_amazon_bedrock_prompt_driver.py index 6a58b09dc..ebe25bb28 100644 --- a/tests/unit/drivers/prompt/test_amazon_bedrock_prompt_driver.py +++ b/tests/unit/drivers/prompt/test_amazon_bedrock_prompt_driver.py @@ -19,6 +19,7 @@ class TestAmazonBedrockPromptDriver: "properties": { "values": { "additionalProperties": False, + "description": "Test input", "properties": {"test": {"type": "string"}}, "required": ["test"], "type": "object", @@ -42,6 +43,7 @@ class TestAmazonBedrockPromptDriver: "properties": { "values": { "additionalProperties": False, + "description": "Test input", "properties": {"test": {"type": "string"}}, "required": ["test"], "type": "object", @@ -65,6 +67,7 @@ class TestAmazonBedrockPromptDriver: "properties": { "values": { "additionalProperties": False, + "description": "Test input", "properties": {"test": {"type": "string"}}, "required": ["test"], "type": "object", @@ -120,6 +123,7 @@ class TestAmazonBedrockPromptDriver: "properties": { "values": { "additionalProperties": False, + "description": "Test input", "properties": {"test": {"type": "string"}}, "required": ["test"], "type": "object", @@ -143,6 +147,7 @@ class TestAmazonBedrockPromptDriver: "properties": { "values": { "additionalProperties": False, + "description": "Test input", "properties": {"test": {"type": "string"}}, "required": ["test"], "type": "object", diff --git a/tests/unit/drivers/prompt/test_anthropic_prompt_driver.py b/tests/unit/drivers/prompt/test_anthropic_prompt_driver.py index 3b1343336..88c1e75ff 100644 --- a/tests/unit/drivers/prompt/test_anthropic_prompt_driver.py +++ b/tests/unit/drivers/prompt/test_anthropic_prompt_driver.py @@ -20,6 +20,7 @@ class TestAnthropicPromptDriver: "properties": { "values": { "additionalProperties": False, + "description": "Test input", "properties": {"test": {"type": "string"}}, "required": ["test"], "type": "object", @@ -39,6 +40,7 @@ class TestAnthropicPromptDriver: "properties": { "values": { "additionalProperties": False, + "description": "Test input", "properties": {"test": {"type": "string"}}, "required": ["test"], "type": "object", @@ -58,6 +60,7 @@ class TestAnthropicPromptDriver: "properties": { "values": { "additionalProperties": False, + "description": "Test input", "properties": {"test": {"type": "string"}}, "required": ["test"], "type": "object", @@ -101,6 +104,7 @@ class TestAnthropicPromptDriver: "properties": { "values": { "additionalProperties": False, + "description": "Test input", "properties": {"test": {"type": "string"}}, "required": ["test"], "type": "object", @@ -120,6 +124,7 @@ class TestAnthropicPromptDriver: "properties": { "values": { "additionalProperties": False, + "description": "Test input", "properties": {"test": {"type": "string"}}, "required": ["test"], "type": "object", diff --git a/tests/unit/drivers/prompt/test_base_prompt_driver.py b/tests/unit/drivers/prompt/test_base_prompt_driver.py index 2708b0a88..3efe85c98 100644 --- a/tests/unit/drivers/prompt/test_base_prompt_driver.py +++ b/tests/unit/drivers/prompt/test_base_prompt_driver.py @@ -1,7 +1,7 @@ from griptape.artifacts import ErrorArtifact, TextArtifact from griptape.common import Message, PromptStack from griptape.events import FinishPromptEvent, StartPromptEvent -from griptape.mixins import EventPublisherMixin +from griptape.events.event_bus import _EventBus from griptape.structures import Pipeline from griptape.tasks import PromptTask, ToolkitTask from tests.mocks.mock_failing_prompt_driver import MockFailingPromptDriver @@ -10,26 +10,25 @@ class TestBasePromptDriver: - def test_run_via_pipeline_retries_success(self): - driver = MockPromptDriver(max_attempts=1) - pipeline = Pipeline(prompt_driver=driver) + def test_run_via_pipeline_retries_success(self, mock_config): + mock_config.drivers_config.prompt_driver = MockPromptDriver(max_attempts=2) + pipeline = Pipeline() pipeline.add_task(PromptTask("test")) assert isinstance(pipeline.run().output_task.output, TextArtifact) - def test_run_via_pipeline_retries_failure(self): - driver = MockFailingPromptDriver(max_failures=2, max_attempts=1) - pipeline = Pipeline(prompt_driver=driver) + def test_run_via_pipeline_retries_failure(self, mock_config): + mock_config.drivers_config.prompt_driver = MockFailingPromptDriver(max_failures=2, max_attempts=1) + pipeline = Pipeline() pipeline.add_task(PromptTask("test")) assert isinstance(pipeline.run().output_task.output, ErrorArtifact) def test_run_via_pipeline_publishes_events(self, mocker): - mock_publish_event = mocker.patch.object(EventPublisherMixin, "publish_event") - driver = MockPromptDriver() - pipeline = Pipeline(prompt_driver=driver) + mock_publish_event = mocker.patch.object(_EventBus, "publish_event") + pipeline = Pipeline() pipeline.add_task(PromptTask("test")) pipeline.run() @@ -42,14 +41,13 @@ def test_run(self): assert isinstance(MockPromptDriver().run(PromptStack(messages=[])), Message) def test_run_with_stream(self): - pipeline = Pipeline() - result = MockPromptDriver(stream=True, event_listeners=pipeline.event_listeners).run(PromptStack(messages=[])) + result = MockPromptDriver(stream=True).run(PromptStack(messages=[])) assert isinstance(result, Message) assert result.value == "mock output" - def test_run_with_tools(self): - driver = MockPromptDriver(max_attempts=1, use_native_tools=True) - pipeline = Pipeline(prompt_driver=driver) + def test_run_with_tools(self, mock_config): + mock_config.drivers_config.prompt_driver = MockPromptDriver(max_attempts=1, use_native_tools=True) + pipeline = Pipeline() pipeline.add_task(ToolkitTask(tools=[MockTool()])) @@ -57,9 +55,9 @@ def test_run_with_tools(self): assert isinstance(output, TextArtifact) assert output.value == "mock output" - def test_run_with_tools_and_stream(self): - driver = MockPromptDriver(max_attempts=1, stream=True, use_native_tools=True) - pipeline = Pipeline(prompt_driver=driver) + def test_run_with_tools_and_stream(self, mock_config): + mock_config.driver = MockPromptDriver(max_attempts=1, stream=True, use_native_tools=True) + pipeline = Pipeline() pipeline.add_task(ToolkitTask(tools=[MockTool()])) diff --git a/tests/unit/drivers/prompt/test_google_prompt_driver.py b/tests/unit/drivers/prompt/test_google_prompt_driver.py index 0dc797a74..ce3db921f 100644 --- a/tests/unit/drivers/prompt/test_google_prompt_driver.py +++ b/tests/unit/drivers/prompt/test_google_prompt_driver.py @@ -1,4 +1,4 @@ -from unittest.mock import Mock +from unittest.mock import MagicMock, Mock import pytest from google.generativeai.protos import FunctionCall, FunctionResponse, Part @@ -46,11 +46,19 @@ class TestGooglePromptDriver: @pytest.fixture() def mock_generative_model(self, mocker): mock_generative_model = mocker.patch("google.generativeai.GenerativeModel") - mock_function_call = Mock(type="tool_use", id="MockTool_test", args={"foo": "bar"}) + mocker.patch("google.protobuf.json_format.MessageToDict").return_value = { + "args": {"foo": "bar"}, + } + mock_function_call = MagicMock( + type="tool_use", name="bar", id="MockTool_test", _pb=MagicMock(args={"foo": "bar"}) + ) mock_function_call.name = "MockTool_test" mock_generative_model.return_value.generate_content.return_value = Mock( - parts=[Mock(text="model-output", function_call=None), Mock(text=None, function_call=mock_function_call)], - usage_metadata=Mock(prompt_token_count=5, candidates_token_count=10), + parts=[ + Mock(text="model-output", function_call=None), + MagicMock(name="foo", text=None, function_call=mock_function_call), + ], + usage_metadata=MagicMock(prompt_token_count=5, candidates_token_count=10), ) return mock_generative_model @@ -58,21 +66,26 @@ def mock_generative_model(self, mocker): @pytest.fixture() def mock_stream_generative_model(self, mocker): mock_generative_model = mocker.patch("google.generativeai.GenerativeModel") - mock_function_call_delta = Mock(type="tool_use", id="MockTool_test", args={"foo": "bar"}) + mocker.patch("google.protobuf.json_format.MessageToDict").return_value = { + "args": {"foo": "bar"}, + } + mock_function_call_delta = MagicMock( + type="tool_use", name="func call", id="MockTool_test", _pb=MagicMock(args={"foo": "bar"}) + ) mock_function_call_delta.name = "MockTool_test" mock_generative_model.return_value.generate_content.return_value = iter( [ - Mock( - parts=[Mock(text="model-output")], - usage_metadata=Mock(prompt_token_count=5, candidates_token_count=5), + MagicMock( + parts=[MagicMock(text="model-output")], + usage_metadata=MagicMock(prompt_token_count=5, candidates_token_count=5), ), - Mock( - parts=[Mock(text=None, function_call=mock_function_call_delta)], - usage_metadata=Mock(prompt_token_count=5, candidates_token_count=5), + MagicMock( + parts=[MagicMock(text=None, function_call=mock_function_call_delta)], + usage_metadata=MagicMock(prompt_token_count=5, candidates_token_count=5), ), - Mock( - parts=[Mock(text="model-output")], - usage_metadata=Mock(prompt_token_count=5, candidates_token_count=5), + MagicMock( + parts=[MagicMock(text="model-output", id="3")], + usage_metadata=MagicMock(prompt_token_count=5, candidates_token_count=5), ), ] ) diff --git a/tests/unit/drivers/prompt/test_hugging_face_pipeline_prompt_driver.py b/tests/unit/drivers/prompt/test_hugging_face_pipeline_prompt_driver.py index 5323f5d2d..0ece6c976 100644 --- a/tests/unit/drivers/prompt/test_hugging_face_pipeline_prompt_driver.py +++ b/tests/unit/drivers/prompt/test_hugging_face_pipeline_prompt_driver.py @@ -7,8 +7,7 @@ class TestHuggingFacePipelinePromptDriver: @pytest.fixture(autouse=True) def mock_pipeline(self, mocker): - mock_pipeline = mocker.patch("transformers.pipeline") - return mock_pipeline + return mocker.patch("transformers.pipeline") @pytest.fixture(autouse=True) def mock_generator(self, mock_pipeline): diff --git a/tests/unit/drivers/prompt/test_ollama_prompt_driver.py b/tests/unit/drivers/prompt/test_ollama_prompt_driver.py index 797880fdc..0fc9e0f09 100644 --- a/tests/unit/drivers/prompt/test_ollama_prompt_driver.py +++ b/tests/unit/drivers/prompt/test_ollama_prompt_driver.py @@ -14,6 +14,7 @@ class TestOllamaPromptDriver: "name": "MockTool_test", "parameters": { "additionalProperties": False, + "description": "Test input", "properties": {"test": {"type": "string"}}, "required": ["test"], "type": "object", @@ -27,6 +28,7 @@ class TestOllamaPromptDriver: "name": "MockTool_test_error", "parameters": { "additionalProperties": False, + "description": "Test input", "properties": {"test": {"type": "string"}}, "required": ["test"], "type": "object", @@ -40,6 +42,7 @@ class TestOllamaPromptDriver: "name": "MockTool_test_exception", "parameters": { "additionalProperties": False, + "description": "Test input", "properties": {"test": {"type": "string"}}, "required": ["test"], "type": "object", @@ -67,6 +70,7 @@ class TestOllamaPromptDriver: "name": "MockTool_test_str_output", "parameters": { "additionalProperties": False, + "description": "Test input", "properties": {"test": {"type": "string"}}, "required": ["test"], "type": "object", @@ -80,6 +84,7 @@ class TestOllamaPromptDriver: "name": "MockTool_test_without_default_memory", "parameters": { "additionalProperties": False, + "description": "Test input", "properties": {"test": {"type": "string"}}, "required": ["test"], "type": "object", diff --git a/tests/unit/drivers/prompt/test_openai_chat_prompt_driver.py b/tests/unit/drivers/prompt/test_openai_chat_prompt_driver.py index 73073a0f0..ae42aa3a1 100644 --- a/tests/unit/drivers/prompt/test_openai_chat_prompt_driver.py +++ b/tests/unit/drivers/prompt/test_openai_chat_prompt_driver.py @@ -23,6 +23,7 @@ class TestOpenAiChatPromptDriverFixtureMixin: "properties": { "values": { "additionalProperties": False, + "description": "Test input", "properties": {"test": {"type": "string"}}, "required": ["test"], "type": "object", @@ -45,6 +46,7 @@ class TestOpenAiChatPromptDriverFixtureMixin: "properties": { "values": { "additionalProperties": False, + "description": "Test input", "properties": {"test": {"type": "string"}}, "required": ["test"], "type": "object", @@ -67,6 +69,7 @@ class TestOpenAiChatPromptDriverFixtureMixin: "properties": { "values": { "additionalProperties": False, + "description": "Test input", "properties": {"test": {"type": "string"}}, "required": ["test"], "type": "object", @@ -119,6 +122,7 @@ class TestOpenAiChatPromptDriverFixtureMixin: "properties": { "values": { "additionalProperties": False, + "description": "Test input", "properties": {"test": {"type": "string"}}, "required": ["test"], "type": "object", @@ -141,6 +145,7 @@ class TestOpenAiChatPromptDriverFixtureMixin: "properties": { "values": { "additionalProperties": False, + "description": "Test input", "properties": {"test": {"type": "string"}}, "required": ["test"], "type": "object", diff --git a/tests/unit/drivers/sql/test_snowflake_sql_driver.py b/tests/unit/drivers/sql/test_snowflake_sql_driver.py index a2887cb12..a758bb3a2 100644 --- a/tests/unit/drivers/sql/test_snowflake_sql_driver.py +++ b/tests/unit/drivers/sql/test_snowflake_sql_driver.py @@ -21,8 +21,7 @@ class Column: name: str type: str = "VARCHAR" - mock_table = mocker.MagicMock(name="table", columns=[Column("first_name"), Column("last_name")]) - return mock_table + return mocker.MagicMock(name="table", columns=[Column("first_name"), Column("last_name")]) @pytest.fixture() def mock_metadata(self, mocker): @@ -49,27 +48,22 @@ def mock_snowflake_engine(self, mocker): @pytest.fixture() def mock_snowflake_connection(self, mocker): - mock_connection = mocker.MagicMock(spec=SnowflakeConnection, name="connection") - return mock_connection + return mocker.MagicMock(spec=SnowflakeConnection, name="connection") @pytest.fixture() def mock_snowflake_connection_no_schema(self, mocker): - mock_connection = mocker.MagicMock(spec=SnowflakeConnection, name="connection_no_schema", schema=None) - return mock_connection + return mocker.MagicMock(spec=SnowflakeConnection, name="connection_no_schema", schema=None) @pytest.fixture() def mock_snowflake_connection_no_database(self, mocker): - mock_connection = mocker.MagicMock(spec=SnowflakeConnection, name="connection_no_database", database=None) - return mock_connection + return mocker.MagicMock(spec=SnowflakeConnection, name="connection_no_database", database=None) @pytest.fixture() def driver(self, mock_snowflake_engine, mock_snowflake_connection): def get_connection(): return mock_snowflake_connection - new_driver = SnowflakeSqlDriver(connection_func=get_connection, engine=mock_snowflake_engine) - - return new_driver + return SnowflakeSqlDriver(connection_func=get_connection, engine=mock_snowflake_engine) def test_connection_function_wrong_return_type(self): def get_connection() -> Any: diff --git a/tests/unit/drivers/structure_run/test_local_structure_run_driver.py b/tests/unit/drivers/structure_run/test_local_structure_run_driver.py index 316f7bf71..2dd68e24e 100644 --- a/tests/unit/drivers/structure_run/test_local_structure_run_driver.py +++ b/tests/unit/drivers/structure_run/test_local_structure_run_driver.py @@ -9,7 +9,7 @@ class TestLocalStructureRunDriver: def test_run(self): pipeline = Pipeline() - driver = LocalStructureRunDriver(structure_factory_fn=lambda: Agent(prompt_driver=MockPromptDriver())) + driver = LocalStructureRunDriver(structure_factory_fn=lambda: Agent()) task = StructureRunTask(driver=driver) @@ -17,10 +17,11 @@ def test_run(self): assert task.run().to_text() == "mock output" - def test_run_with_env(self): + def test_run_with_env(self, mock_config): pipeline = Pipeline() - agent = Agent(prompt_driver=MockPromptDriver(mock_output=lambda _: os.environ["KEY"])) + mock_config.drivers_config.prompt_driver = MockPromptDriver(mock_output=lambda _: os.environ["KEY"]) + agent = Agent() driver = LocalStructureRunDriver(structure_factory_fn=lambda: agent, env={"KEY": "value"}) task = StructureRunTask(driver=driver) diff --git a/tests/unit/drivers/text_to_speech/test_base_audio_transcription_driver.py b/tests/unit/drivers/text_to_speech/test_base_audio_transcription_driver.py index 8af5dc827..ab448c7c1 100644 --- a/tests/unit/drivers/text_to_speech/test_base_audio_transcription_driver.py +++ b/tests/unit/drivers/text_to_speech/test_base_audio_transcription_driver.py @@ -2,7 +2,7 @@ import pytest -from griptape.events.event_listener import EventListener +from griptape.events import EventBus, EventListener from tests.mocks.mock_text_to_speech_driver import MockTextToSpeechDriver @@ -13,7 +13,7 @@ def driver(self): def test_text_to_audio_publish_events(self, driver): mock_handler = Mock() - driver.add_event_listener(EventListener(handler=mock_handler)) + EventBus.add_event_listener(EventListener(handler=mock_handler)) driver.run_text_to_audio( ["foo", "bar"], diff --git a/tests/unit/drivers/vector/test_astra_db_vector_store_driver.py b/tests/unit/drivers/vector/test_astra_db_vector_store_driver.py new file mode 100644 index 000000000..b544a3494 --- /dev/null +++ b/tests/unit/drivers/vector/test_astra_db_vector_store_driver.py @@ -0,0 +1,136 @@ +from unittest.mock import MagicMock + +import pytest + +from griptape.drivers import AstraDbVectorStoreDriver, BaseVectorStoreDriver +from tests.mocks.mock_embedding_driver import MockEmbeddingDriver + + +class TestAstraDbVectorStoreDriver: + @pytest.fixture(autouse=True) + def base_mock_collection(self, mocker): + return mocker.patch("astrapy.DataAPIClient").return_value.get_database.return_value.get_collection + + @pytest.fixture() + def mock_collection(self, base_mock_collection, one_document): + """Augmented with specific response to certain method calls.""" + # insert_one with server-side provided ID + mock_insert_one_return_value = MagicMock() + mock_insert_one_return_value.inserted_id = "insert_one_server_side_id" + base_mock_collection.return_value.insert_one.return_value = mock_insert_one_return_value + # find_one + base_mock_collection.return_value.find_one.return_value = one_document + # find + base_mock_collection.return_value.find.return_value = [one_document] + # + return base_mock_collection + + @pytest.fixture() + def mock_collection_findnothing(self, base_mock_collection): + """`find` and `find_one` return nothing.""" + base_mock_collection.return_value.find_one.return_value = None + base_mock_collection.return_value.find.return_value = [] + return base_mock_collection + + @pytest.fixture() + def driver(self, mock_collection): + return AstraDbVectorStoreDriver( + api_endpoint="ep", + token="to", + collection_name="co", + astra_db_namespace="ns", + embedding_driver=MockEmbeddingDriver(dimensions=3), + ) + + @pytest.fixture() + def one_document( + self, + ): + return { + "_id": "doc_id", + "$vector": [3.0, 2.0, 1.0], + "meta": "doc_meta", + "namespace": "doc_namespace", + "$similarity": 10, + } + + @pytest.fixture() + def one_entry(self, one_document): + return BaseVectorStoreDriver.Entry( + id=one_document["_id"], + vector=one_document["$vector"], + meta=one_document["meta"], + namespace=one_document["namespace"], + ) + + @pytest.fixture() + def one_query_entry(self, one_document): + return BaseVectorStoreDriver.Entry( + id=one_document["_id"], + vector=one_document["$vector"], + meta=one_document["meta"], + namespace=one_document["namespace"], + score=one_document["$similarity"], + ) + + def test_delete_vector(self, driver, mock_collection): + driver.delete_vector("deletee_id") + mock_collection.return_value.delete_one.assert_called_once() + + def test_upsert_vector_with_id(self, driver, mock_collection): + upserted_id = driver.upsert_vector([1.0, 2.0, 3.0], vector_id="some_vector_id", namespace="some_namespace") + assert upserted_id == "some_vector_id" + mock_collection.return_value.find_one_and_replace.assert_called_once() + + def test_upsert_vector_no_id(self, driver, mock_collection): + upserted_id = driver.upsert_vector([1.0, 2.0, 3.0], namespace="some_namespace") + assert upserted_id == "insert_one_server_side_id" + mock_collection.return_value.insert_one.assert_called_once() + + def test_load_entry(self, driver, mock_collection, one_entry): + entry = driver.load_entry("vector_id", namespace="some_namespace") + assert entry == one_entry + mock_collection.return_value.find_one.assert_called_once_with( + filter={"_id": "vector_id", "namespace": "some_namespace"}, + projection={"*": 1}, + ) + + def test_load_entry_empty(self, driver, mock_collection_findnothing): + entry = driver.load_entry("vector_id", namespace="some_namespace") + assert entry is None + mock_collection_findnothing.return_value.find_one.assert_called_once_with( + filter={"_id": "vector_id", "namespace": "some_namespace"}, + projection={"*": 1}, + ) + + def test_load_entries(self, driver, mock_collection, one_entry): + entries = driver.load_entries(namespace="some_namespace") + assert entries == [one_entry] + mock_collection.return_value.find.assert_called_once_with( + filter={"namespace": "some_namespace"}, + projection={"*": 1}, + ) + + def test_query_allparams(self, driver, mock_collection, one_query_entry): + entries1 = driver.query("some query", count=999, namespace="some_namespace", include_vectors=True) + assert entries1 == [one_query_entry] + query_vector = driver.embedding_driver.embed_string("some query") + mock_collection.return_value.find.assert_called_once_with( + filter={"namespace": "some_namespace"}, + sort={"$vector": query_vector}, + limit=999, + projection={"*": 1}, + include_similarity=True, + ) + + def test_query_minparams(self, driver, mock_collection, one_query_entry): + entries0 = driver.query("some query") + assert entries0 == [one_query_entry] + query_vector = driver.embedding_driver.embed_string("some query") + mock_collection.return_value.find.assert_called_once_with( + filter={}, + sort={"$vector": query_vector}, + limit=BaseVectorStoreDriver.DEFAULT_QUERY_COUNT, + projection=None, + include_similarity=True, + ) diff --git a/tests/unit/drivers/vector/test_local_vector_store_driver.py b/tests/unit/drivers/vector/test_local_vector_store_driver.py index 6f022793c..2504b2486 100644 --- a/tests/unit/drivers/vector/test_local_vector_store_driver.py +++ b/tests/unit/drivers/vector/test_local_vector_store_driver.py @@ -22,3 +22,16 @@ def test_upsert_text_artifacts_list(self, driver): assert len(driver.load_artifacts(namespace="foo")) == 0 assert len(driver.load_artifacts()) == 2 + + def test_upsert_text_artifacts_stress_test(self, driver): + driver.upsert_text_artifacts( + { + "test1": [TextArtifact(f"foo-{i}") for i in range(0, 1000)], + "test2": [TextArtifact(f"foo-{i}") for i in range(0, 1000)], + "test3": [TextArtifact(f"foo-{i}") for i in range(0, 1000)], + } + ) + + assert len(driver.query("foo", namespace="test1")) == 1000 + assert len(driver.query("foo", namespace="test2")) == 1000 + assert len(driver.query("foo", namespace="test3")) == 1000 diff --git a/tests/unit/drivers/vector/test_qdrant_vector_store_driver.py b/tests/unit/drivers/vector/test_qdrant_vector_store_driver.py index 0b22784eb..ffb359953 100644 --- a/tests/unit/drivers/vector/test_qdrant_vector_store_driver.py +++ b/tests/unit/drivers/vector/test_qdrant_vector_store_driver.py @@ -20,7 +20,7 @@ def mock_engine(self): @pytest.fixture(autouse=True) def driver(self, embedding_driver, mocker): mocker.patch("qdrant_client.QdrantClient") - driver = QdrantVectorStoreDriver( + return QdrantVectorStoreDriver( url="http://some_url", port=8080, grpc_port=50051, @@ -36,7 +36,6 @@ def driver(self, embedding_driver, mocker): content_payload_key="data", embedding_driver=embedding_driver, ) - return driver def test_attrs_post_init(self, driver): with patch("griptape.drivers.vector.qdrant_vector_store_driver.import_optional_dependency") as mock_import: diff --git a/tests/unit/drivers/web_search/test_duck_duck_go_web_search_driver.py b/tests/unit/drivers/web_search/test_duck_duck_go_web_search_driver.py index 3d0a782eb..be79798ba 100644 --- a/tests/unit/drivers/web_search/test_duck_duck_go_web_search_driver.py +++ b/tests/unit/drivers/web_search/test_duck_duck_go_web_search_driver.py @@ -13,14 +13,21 @@ def driver(self, mocker): {"title": "foo", "href": "bar", "body": "baz"}, {"title": "foo2", "href": "bar2", "body": "baz2"}, ] - - mocker.patch("duckduckgo_search.DDGS.text", return_value=mock_response) - + mock_ddg = mocker.Mock( + text=lambda *args, **kwargs: mock_response, + ) + mocker.patch("duckduckgo_search.DDGS", return_value=mock_ddg) return DuckDuckGoWebSearchDriver() @pytest.fixture() def driver_with_error(self, mocker): - mocker.patch("duckduckgo_search.DDGS.text", side_effect=Exception("test_error")) + def error(*args, **kwargs): + raise Exception("test_error") + + mock_ddg = mocker.Mock( + text=error, + ) + mocker.patch("duckduckgo_search.DDGS", return_value=mock_ddg) return DuckDuckGoWebSearchDriver() diff --git a/tests/unit/engines/extraction/test_csv_extraction_engine.py b/tests/unit/engines/extraction/test_csv_extraction_engine.py index f69d8a0ba..893c21d60 100644 --- a/tests/unit/engines/extraction/test_csv_extraction_engine.py +++ b/tests/unit/engines/extraction/test_csv_extraction_engine.py @@ -1,16 +1,15 @@ import pytest from griptape.engines import CsvExtractionEngine -from tests.mocks.mock_prompt_driver import MockPromptDriver class TestCsvExtractionEngine: @pytest.fixture() def engine(self): - return CsvExtractionEngine(prompt_driver=MockPromptDriver()) + return CsvExtractionEngine(column_names=["test1"]) def test_extract(self, engine): - result = engine.extract("foo", column_names=["test1"]) + result = engine.extract("foo") assert len(result.value) == 1 assert result.value[0].value == {"test1": "mock output"} diff --git a/tests/unit/engines/extraction/test_json_extraction_engine.py b/tests/unit/engines/extraction/test_json_extraction_engine.py index d95adbb43..48430f1e5 100644 --- a/tests/unit/engines/extraction/test_json_extraction_engine.py +++ b/tests/unit/engines/extraction/test_json_extraction_engine.py @@ -12,22 +12,26 @@ def engine(self): return JsonExtractionEngine( prompt_driver=MockPromptDriver( mock_output='[{"test_key_1": "test_value_1"}, {"test_key_2": "test_value_2"}]' - ) + ), + template_schema=Schema({"foo": "bar"}).json_schema("TemplateSchema"), ) def test_extract(self, engine): - json_schema = Schema({"foo": "bar"}).json_schema("TemplateSchema") - result = engine.extract("foo", template_schema=json_schema) + result = engine.extract("foo") assert len(result.value) == 2 assert result.value[0].value == '{"test_key_1": "test_value_1"}' assert result.value[1].value == '{"test_key_2": "test_value_2"}' def test_extract_error(self, engine): - assert isinstance(engine.extract("foo", template_schema=lambda: "non serializable"), ErrorArtifact) + engine.template_schema = lambda: "non serializable" + assert isinstance(engine.extract("foo"), ErrorArtifact) def test_json_to_text_artifacts(self, engine): assert [ a.value for a in engine.json_to_text_artifacts('[{"test_key_1": "test_value_1"}, {"test_key_2": "test_value_2"}]') ] == ['{"test_key_1": "test_value_1"}', '{"test_key_2": "test_value_2"}'] + + def test_json_to_text_artifacts_no_matches(self, engine): + assert engine.json_to_text_artifacts("asdfasdfasdf") == [] diff --git a/tests/unit/engines/query/__init__.py b/tests/unit/engines/query/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/tests/unit/engines/query/test_translate_query_rag_module.py b/tests/unit/engines/query/test_translate_query_rag_module.py new file mode 100644 index 000000000..a04a5b619 --- /dev/null +++ b/tests/unit/engines/query/test_translate_query_rag_module.py @@ -0,0 +1,10 @@ +from griptape.engines.rag import RagContext +from griptape.engines.rag.modules import TranslateQueryRagModule +from tests.mocks.mock_prompt_driver import MockPromptDriver + + +class TestTranslateQueryRagModule: + def test_run(self): + module = TranslateQueryRagModule(prompt_driver=MockPromptDriver(), language="english") + + assert module.run(RagContext(query="foo")).query == "mock output" diff --git a/tests/unit/engines/rag/modules/generation/test_footnote_prompt_response_rag_module.py b/tests/unit/engines/rag/modules/generation/test_footnote_prompt_response_rag_module.py index 385cf0c04..430f67ef9 100644 --- a/tests/unit/engines/rag/modules/generation/test_footnote_prompt_response_rag_module.py +++ b/tests/unit/engines/rag/modules/generation/test_footnote_prompt_response_rag_module.py @@ -4,16 +4,15 @@ from griptape.common import Reference from griptape.engines.rag import RagContext from griptape.engines.rag.modules import FootnotePromptResponseRagModule -from tests.mocks.mock_prompt_driver import MockPromptDriver class TestFootnotePromptResponseRagModule: @pytest.fixture() def module(self): - return FootnotePromptResponseRagModule(prompt_driver=MockPromptDriver()) + return FootnotePromptResponseRagModule() def test_run(self, module): - assert module.run(RagContext(query="test")).output.value == "mock output" + assert module.run(RagContext(query="test")).value == "mock output" def test_prompt(self, module): system_message = module.default_system_template_generator( diff --git a/tests/unit/engines/rag/modules/generation/test_metadata_before_response_rag_module.py b/tests/unit/engines/rag/modules/generation/test_metadata_before_response_rag_module.py deleted file mode 100644 index 9519c8017..000000000 --- a/tests/unit/engines/rag/modules/generation/test_metadata_before_response_rag_module.py +++ /dev/null @@ -1,21 +0,0 @@ -from griptape.engines.rag import RagContext -from griptape.engines.rag.modules import MetadataBeforeResponseRagModule - - -class TestMetadataBeforeResponseRagModule: - def test_run(self): - module = MetadataBeforeResponseRagModule(name="foo") - - assert ( - "foo" in module.run(RagContext(module_configs={"foo": {"metadata": "foo"}}, query="test")).before_query[0] - ) - - def test_run_with_override(self): - module = MetadataBeforeResponseRagModule(name="foo", metadata="bar") - - assert ( - "bar" - in module.run( - RagContext(module_configs={"foo": {"query_params": {"metadata": "foo"}}}, query="test") - ).before_query[0] - ) diff --git a/tests/unit/engines/rag/modules/generation/test_prompt_response_rag_module.py b/tests/unit/engines/rag/modules/generation/test_prompt_response_rag_module.py index 2f8a912e2..cc8d35f0e 100644 --- a/tests/unit/engines/rag/modules/generation/test_prompt_response_rag_module.py +++ b/tests/unit/engines/rag/modules/generation/test_prompt_response_rag_module.py @@ -3,20 +3,25 @@ from griptape.artifacts import TextArtifact from griptape.engines.rag import RagContext from griptape.engines.rag.modules import PromptResponseRagModule +from griptape.rules import Rule, Ruleset from tests.mocks.mock_prompt_driver import MockPromptDriver class TestPromptResponseRagModule: @pytest.fixture() def module(self): - return PromptResponseRagModule(prompt_driver=MockPromptDriver()) + return PromptResponseRagModule( + prompt_driver=MockPromptDriver(), + rulesets=[Ruleset(name="test", rules=[Rule("*RULESET*")])], + metadata="*META*", + ) def test_run(self, module): - assert module.run(RagContext(query="test")).output.value == "mock output" + assert module.run(RagContext(query="test")).value == "mock output" def test_prompt(self, module): system_message = module.default_system_template_generator( - RagContext(query="test", before_query=["*RULESET*", "*META*"], after_query=[]), + RagContext(query="test"), artifacts=[TextArtifact("*TEXT SEGMENT 1*"), TextArtifact("*TEXT SEGMENT 2*")], ) diff --git a/tests/unit/engines/rag/modules/generation/test_rulesets_before_response_rag_module.py b/tests/unit/engines/rag/modules/generation/test_rulesets_before_response_rag_module.py deleted file mode 100644 index bc85cf266..000000000 --- a/tests/unit/engines/rag/modules/generation/test_rulesets_before_response_rag_module.py +++ /dev/null @@ -1,10 +0,0 @@ -from griptape.engines.rag import RagContext -from griptape.engines.rag.modules import RulesetsBeforeResponseRagModule -from griptape.rules import Rule, Ruleset - - -class TestRulesetsBeforeResponseRagModule: - def test_run(self): - module = RulesetsBeforeResponseRagModule(rulesets=[Ruleset(name="test ruleset", rules=[Rule("test rule")])]) - - assert "test rule" in module.run(RagContext(query="test")).before_query[0] diff --git a/tests/unit/engines/rag/modules/generation/test_text_chunks_response_rag_module.py b/tests/unit/engines/rag/modules/generation/test_text_chunks_response_rag_module.py index ae4410b2c..05b8042e6 100644 --- a/tests/unit/engines/rag/modules/generation/test_text_chunks_response_rag_module.py +++ b/tests/unit/engines/rag/modules/generation/test_text_chunks_response_rag_module.py @@ -13,4 +13,4 @@ def module(self): def test_run(self, module): text_chunks = [TextArtifact("foo"), TextArtifact("bar")] - assert module.run(RagContext(query="test", text_chunks=text_chunks)).output.value == text_chunks + assert module.run(RagContext(query="test", text_chunks=text_chunks)).value == text_chunks diff --git a/tests/unit/engines/rag/modules/retrieval/test_vector_store_retrieval_rag_module.py b/tests/unit/engines/rag/modules/retrieval/test_vector_store_retrieval_rag_module.py index 9fecc3c0e..96a91280e 100644 --- a/tests/unit/engines/rag/modules/retrieval/test_vector_store_retrieval_rag_module.py +++ b/tests/unit/engines/rag/modules/retrieval/test_vector_store_retrieval_rag_module.py @@ -40,22 +40,18 @@ def test_run_with_namespace(self): def test_run_with_namespace_overrides(self): vector_store_driver = LocalVectorStoreDriver(embedding_driver=MockEmbeddingDriver()) module = VectorStoreRetrievalRagModule( - vector_store_driver=vector_store_driver, query_params={"namespace": "test"} + name="TestModule", vector_store_driver=vector_store_driver, query_params={"namespace": "test"} ) vector_store_driver.upsert_text_artifact(TextArtifact("foobar1"), namespace="test") vector_store_driver.upsert_text_artifact(TextArtifact("foobar2"), namespace="test") result1 = module.run( - RagContext( - query="test", module_configs={"VectorStoreRetrievalRagModule": {"query_params": {"namespace": "empty"}}} - ) + RagContext(query="test", module_configs={"TestModule": {"query_params": {"namespace": "empty"}}}) ) result2 = module.run( - RagContext( - query="test", module_configs={"VectorStoreRetrievalRagModule": {"query_params": {"namespace": "test"}}} - ) + RagContext(query="test", module_configs={"TestModule": {"query_params": {"namespace": "test"}}}) ) assert len(result1) == 0 diff --git a/tests/unit/engines/rag/modules/test_base_rag_nodule.py b/tests/unit/engines/rag/modules/test_base_rag_nodule.py index be1fda861..b5db2502d 100644 --- a/tests/unit/engines/rag/modules/test_base_rag_nodule.py +++ b/tests/unit/engines/rag/modules/test_base_rag_nodule.py @@ -3,13 +3,19 @@ class TestBaseRagModule: - def test_generate_query_prompt_stack(self): - prompt_stack = MockRagModule().generate_query_prompt_stack("test system", "test query") + def test_generate_prompt_stack(self): + prompt_stack = MockRagModule().generate_prompt_stack("test system", "test query") assert len(prompt_stack.messages) == 2 assert prompt_stack.messages[0].is_system() assert prompt_stack.messages[1].is_user() + def test_generate_prompt_stack_with_empty_system_message(self): + prompt_stack = MockRagModule().generate_prompt_stack(None, "test query") + + assert len(prompt_stack.messages) == 1 + assert prompt_stack.messages[0].is_user() + def test_get_context_param(self): module = MockRagModule(name="boo") context = RagContext(query="test") diff --git a/tests/unit/engines/rag/test_rag_engine.py b/tests/unit/engines/rag/test_rag_engine.py index c3d728bb3..71db4e01f 100644 --- a/tests/unit/engines/rag/test_rag_engine.py +++ b/tests/unit/engines/rag/test_rag_engine.py @@ -19,18 +19,18 @@ def engine(self): ) ] ), - response_stage=ResponseRagStage(response_module=PromptResponseRagModule(prompt_driver=MockPromptDriver())), + response_stage=ResponseRagStage( + response_modules=[PromptResponseRagModule(prompt_driver=MockPromptDriver())] + ), ) def test_module_name_uniqueness(self): - vector_store_driver = LocalVectorStoreDriver(embedding_driver=MockEmbeddingDriver()) - with pytest.raises(ValueError): RagEngine( retrieval_stage=RetrievalRagStage( retrieval_modules=[ - VectorStoreRetrievalRagModule(name="test", vector_store_driver=vector_store_driver), - VectorStoreRetrievalRagModule(name="test", vector_store_driver=vector_store_driver), + VectorStoreRetrievalRagModule(name="test"), + VectorStoreRetrievalRagModule(name="test"), ] ) ) @@ -38,14 +38,14 @@ def test_module_name_uniqueness(self): assert RagEngine( retrieval_stage=RetrievalRagStage( retrieval_modules=[ - VectorStoreRetrievalRagModule(name="test1", vector_store_driver=vector_store_driver), - VectorStoreRetrievalRagModule(name="test2", vector_store_driver=vector_store_driver), + VectorStoreRetrievalRagModule(name="test1"), + VectorStoreRetrievalRagModule(name="test2"), ] ) ) def test_process_query(self, engine): - assert engine.process_query("test").output.value == "mock output" + assert engine.process_query("test").outputs[0].value == "mock output" def test_process(self, engine): - assert engine.process(RagContext(query="test")).output.value == "mock output" + assert engine.process(RagContext(query="test")).outputs[0].value == "mock output" diff --git a/tests/unit/engines/summary/test_prompt_summary_engine.py b/tests/unit/engines/summary/test_prompt_summary_engine.py index 4d9c65e03..138444ae3 100644 --- a/tests/unit/engines/summary/test_prompt_summary_engine.py +++ b/tests/unit/engines/summary/test_prompt_summary_engine.py @@ -12,7 +12,7 @@ class TestPromptSummaryEngine: @pytest.fixture() def engine(self): - return PromptSummaryEngine(prompt_driver=MockPromptDriver()) + return PromptSummaryEngine() def test_summarize_text(self, engine): assert engine.summarize_text("foobar") == "mock output" @@ -24,10 +24,10 @@ def test_summarize_artifacts(self, engine): def test_max_token_multiplier_invalid(self, engine): with pytest.raises(ValueError): - PromptSummaryEngine(prompt_driver=MockPromptDriver(), max_token_multiplier=0) + PromptSummaryEngine(max_token_multiplier=0) with pytest.raises(ValueError): - PromptSummaryEngine(prompt_driver=MockPromptDriver(), max_token_multiplier=10000) + PromptSummaryEngine(max_token_multiplier=10000) def test_chunked_summary(self, engine): def smaller_input(prompt_stack: PromptStack): diff --git a/tests/unit/events/test_event_bus.py b/tests/unit/events/test_event_bus.py new file mode 100644 index 000000000..cc432dafb --- /dev/null +++ b/tests/unit/events/test_event_bus.py @@ -0,0 +1,50 @@ +from unittest.mock import Mock + +from griptape.events import EventBus, EventListener +from tests.mocks.mock_event import MockEvent + + +class TestEventBus: + def test_init(self): + from griptape.events.event_bus import _EventBus + + assert _EventBus() is _EventBus() + + def test_add_event_listeners(self): + EventBus.add_event_listeners([EventListener(), EventListener()]) + assert len(EventBus.event_listeners) == 2 + + def test_remove_event_listeners(self): + listeners = [EventListener(), EventListener()] + EventBus.add_event_listeners(listeners) + EventBus.remove_event_listeners(listeners) + assert len(EventBus.event_listeners) == 0 + + def test_add_event_listener(self): + EventBus.add_event_listener(EventListener()) + EventBus.add_event_listener(EventListener()) + + assert len(EventBus.event_listeners) == 2 + + def test_remove_event_listener(self): + listener = EventListener() + EventBus.add_event_listener(listener) + EventBus.remove_event_listener(listener) + + assert len(EventBus.event_listeners) == 0 + + def test_remove_unknown_event_listener(self): + EventBus.remove_event_listener(EventListener()) + + def test_publish_event(self): + # Given + mock_handler = Mock() + mock_handler.return_value = None + EventBus.add_event_listeners([EventListener(handler=mock_handler)]) + mock_event = MockEvent() + + # When + EventBus.publish_event(mock_event) + + # Then + mock_handler.assert_called_once_with(mock_event) diff --git a/tests/unit/events/test_event_listener.py b/tests/unit/events/test_event_listener.py index b245c2be9..a6d90d4fc 100644 --- a/tests/unit/events/test_event_listener.py +++ b/tests/unit/events/test_event_listener.py @@ -4,6 +4,7 @@ from griptape.events import ( CompletionChunkEvent, + EventBus, EventListener, FinishActionsSubtaskEvent, FinishPromptEvent, @@ -24,20 +25,21 @@ class TestEventListener: @pytest.fixture() - def pipeline(self): + def pipeline(self, mock_config): + mock_config.drivers_config.prompt_driver = MockPromptDriver(stream=True) task = ToolkitTask("test", tools=[MockTool(name="Tool1")]) - pipeline = Pipeline(prompt_driver=MockPromptDriver(stream=True)) + pipeline = Pipeline() pipeline.add_task(task) task.add_subtask(ActionsSubtask("foo")) return pipeline - def test_untyped_listeners(self, pipeline): + def test_untyped_listeners(self, pipeline, mock_config): event_handler_1 = Mock() event_handler_2 = Mock() - pipeline.event_listeners = [EventListener(handler=event_handler_1), EventListener(handler=event_handler_2)] + EventBus.add_event_listeners([EventListener(handler=event_handler_1), EventListener(handler=event_handler_2)]) # can't mock subtask events, so must manually call pipeline.tasks[0].subtasks[0].before_run() @@ -47,7 +49,7 @@ def test_untyped_listeners(self, pipeline): assert event_handler_1.call_count == 9 assert event_handler_2.call_count == 9 - def test_typed_listeners(self, pipeline): + def test_typed_listeners(self, pipeline, mock_config): start_prompt_event_handler = Mock() finish_prompt_event_handler = Mock() start_task_event_handler = Mock() @@ -58,17 +60,19 @@ def test_typed_listeners(self, pipeline): finish_structure_run_event_handler = Mock() completion_chunk_handler = Mock() - pipeline.event_listeners = [ - EventListener(start_prompt_event_handler, event_types=[StartPromptEvent]), - EventListener(finish_prompt_event_handler, event_types=[FinishPromptEvent]), - EventListener(start_task_event_handler, event_types=[StartTaskEvent]), - EventListener(finish_task_event_handler, event_types=[FinishTaskEvent]), - EventListener(start_subtask_event_handler, event_types=[StartActionsSubtaskEvent]), - EventListener(finish_subtask_event_handler, event_types=[FinishActionsSubtaskEvent]), - EventListener(start_structure_run_event_handler, event_types=[StartStructureRunEvent]), - EventListener(finish_structure_run_event_handler, event_types=[FinishStructureRunEvent]), - EventListener(completion_chunk_handler, event_types=[CompletionChunkEvent]), - ] + EventBus.add_event_listeners( + [ + EventListener(start_prompt_event_handler, event_types=[StartPromptEvent]), + EventListener(finish_prompt_event_handler, event_types=[FinishPromptEvent]), + EventListener(start_task_event_handler, event_types=[StartTaskEvent]), + EventListener(finish_task_event_handler, event_types=[FinishTaskEvent]), + EventListener(start_subtask_event_handler, event_types=[StartActionsSubtaskEvent]), + EventListener(finish_subtask_event_handler, event_types=[FinishActionsSubtaskEvent]), + EventListener(start_structure_run_event_handler, event_types=[StartStructureRunEvent]), + EventListener(finish_structure_run_event_handler, event_types=[FinishStructureRunEvent]), + EventListener(completion_chunk_handler, event_types=[CompletionChunkEvent]), + ] + ) # can't mock subtask events, so must manually call pipeline.tasks[0].subtasks[0].before_run() @@ -86,25 +90,25 @@ def test_typed_listeners(self, pipeline): completion_chunk_handler.assert_called_once() def test_add_remove_event_listener(self, pipeline): - pipeline.event_listeners = [] + EventBus.clear_event_listeners() mock1 = Mock() mock2 = Mock() # duplicate event listeners will only get added once - event_listener_1 = pipeline.add_event_listener(EventListener(mock1, event_types=[StartPromptEvent])) - pipeline.add_event_listener(EventListener(mock1, event_types=[StartPromptEvent])) + event_listener_1 = EventBus.add_event_listener(EventListener(mock1, event_types=[StartPromptEvent])) + EventBus.add_event_listener(EventListener(mock1, event_types=[StartPromptEvent])) - event_listener_3 = pipeline.add_event_listener(EventListener(mock1, event_types=[FinishPromptEvent])) - event_listener_4 = pipeline.add_event_listener(EventListener(mock2, event_types=[StartPromptEvent])) + event_listener_3 = EventBus.add_event_listener(EventListener(mock1, event_types=[FinishPromptEvent])) + event_listener_4 = EventBus.add_event_listener(EventListener(mock2, event_types=[StartPromptEvent])) - event_listener_5 = pipeline.add_event_listener(EventListener(mock2)) + event_listener_5 = EventBus.add_event_listener(EventListener(mock2)) - assert len(pipeline.event_listeners) == 4 + assert len(EventBus.event_listeners) == 4 - pipeline.remove_event_listener(event_listener_1) - pipeline.remove_event_listener(event_listener_3) - pipeline.remove_event_listener(event_listener_4) - pipeline.remove_event_listener(event_listener_5) - assert len(pipeline.event_listeners) == 0 + EventBus.remove_event_listener(event_listener_1) + EventBus.remove_event_listener(event_listener_3) + EventBus.remove_event_listener(event_listener_4) + EventBus.remove_event_listener(event_listener_5) + assert len(EventBus.event_listeners) == 0 def test_publish_event(self): mock_event_listener_driver = Mock() diff --git a/tests/unit/events/test_finish_actions_subtask_event.py b/tests/unit/events/test_finish_actions_subtask_event.py index 5e2a0807a..5fc35755b 100644 --- a/tests/unit/events/test_finish_actions_subtask_event.py +++ b/tests/unit/events/test_finish_actions_subtask_event.py @@ -3,7 +3,6 @@ from griptape.events import FinishActionsSubtaskEvent from griptape.structures import Agent from griptape.tasks import ActionsSubtask, ToolkitTask -from tests.mocks.mock_prompt_driver import MockPromptDriver from tests.mocks.mock_tool.tool import MockTool @@ -17,7 +16,7 @@ def finish_subtask_event(self): "Answer: test output" ) task = ToolkitTask(tools=[MockTool()]) - agent = Agent(prompt_driver=MockPromptDriver()) + agent = Agent() agent.add_task(task) subtask = ActionsSubtask(valid_input) task.add_subtask(subtask) diff --git a/tests/unit/events/test_finish_task_event.py b/tests/unit/events/test_finish_task_event.py index df1d6d42a..2568752bb 100644 --- a/tests/unit/events/test_finish_task_event.py +++ b/tests/unit/events/test_finish_task_event.py @@ -3,14 +3,13 @@ from griptape.events import FinishTaskEvent from griptape.structures import Agent from griptape.tasks import PromptTask -from tests.mocks.mock_prompt_driver import MockPromptDriver class TestFinishTaskEvent: @pytest.fixture() def finish_task_event(self): task = PromptTask() - agent = Agent(prompt_driver=MockPromptDriver()) + agent = Agent() agent.add_task(task) agent.run() diff --git a/tests/unit/events/test_start_actions_subtask_event.py b/tests/unit/events/test_start_actions_subtask_event.py index 8b628057c..b7236911f 100644 --- a/tests/unit/events/test_start_actions_subtask_event.py +++ b/tests/unit/events/test_start_actions_subtask_event.py @@ -3,7 +3,6 @@ from griptape.events import StartActionsSubtaskEvent from griptape.structures import Agent from griptape.tasks import ActionsSubtask, ToolkitTask -from tests.mocks.mock_prompt_driver import MockPromptDriver from tests.mocks.mock_tool.tool import MockTool @@ -17,7 +16,7 @@ def start_subtask_event(self): "Answer: test output" ) task = ToolkitTask(tools=[MockTool()]) - agent = Agent(prompt_driver=MockPromptDriver()) + agent = Agent() agent.add_task(task) subtask = ActionsSubtask(valid_input) task.add_subtask(subtask) diff --git a/tests/unit/events/test_start_task_event.py b/tests/unit/events/test_start_task_event.py index ea027f147..111d35934 100644 --- a/tests/unit/events/test_start_task_event.py +++ b/tests/unit/events/test_start_task_event.py @@ -3,14 +3,13 @@ from griptape.events import StartTaskEvent from griptape.structures import Agent from griptape.tasks import PromptTask -from tests.mocks.mock_prompt_driver import MockPromptDriver class TestStartTaskEvent: @pytest.fixture() def start_task_event(self): task = PromptTask() - agent = Agent(prompt_driver=MockPromptDriver()) + agent = Agent() agent.add_task(task) agent.run() diff --git a/tests/unit/memory/structure/test_conversation_memory.py b/tests/unit/memory/structure/test_conversation_memory.py index 2ffd7b8cb..3f9ac2344 100644 --- a/tests/unit/memory/structure/test_conversation_memory.py +++ b/tests/unit/memory/structure/test_conversation_memory.py @@ -60,7 +60,7 @@ def test_from_json(self): def test_buffering(self): memory = ConversationMemory(max_runs=2) - pipeline = Pipeline(conversation_memory=memory, prompt_driver=MockPromptDriver()) + pipeline = Pipeline(conversation_memory=memory) pipeline.add_tasks(PromptTask()) @@ -75,7 +75,7 @@ def test_buffering(self): assert pipeline.conversation_memory.runs[1].input.value == "run5" def test_add_to_prompt_stack_autopruing_disabled(self): - agent = Agent(prompt_driver=MockPromptDriver()) + agent = Agent() memory = ConversationMemory( autoprune=False, runs=[ @@ -94,9 +94,13 @@ def test_add_to_prompt_stack_autopruing_disabled(self): assert len(prompt_stack.messages) == 12 - def test_add_to_prompt_stack_autopruning_enabled(self): + def test_add_to_prompt_stack_autopruning_enabled(self, mock_config): # All memory is pruned. - agent = Agent(prompt_driver=MockPromptDriver(tokenizer=MockTokenizer(model="foo", max_input_tokens=0))) + + mock_config.drivers_config.prompt_driver = MockPromptDriver( + tokenizer=MockTokenizer(model="foo", max_input_tokens=0) + ) + agent = Agent() memory = ConversationMemory( autoprune=True, runs=[ @@ -117,7 +121,10 @@ def test_add_to_prompt_stack_autopruning_enabled(self): assert len(prompt_stack.messages) == 3 # No memory is pruned. - agent = Agent(prompt_driver=MockPromptDriver(tokenizer=MockTokenizer(model="foo", max_input_tokens=1000))) + mock_config.drivers_config.prompt_driver = MockPromptDriver( + tokenizer=MockTokenizer(model="foo", max_input_tokens=1000) + ) + agent = Agent() memory = ConversationMemory( autoprune=True, runs=[ @@ -140,7 +147,10 @@ def test_add_to_prompt_stack_autopruning_enabled(self): # One memory is pruned. # MockTokenizer's max_input_tokens set to one below the sum of memory + system prompt tokens # so that a single memory is pruned. - agent = Agent(prompt_driver=MockPromptDriver(tokenizer=MockTokenizer(model="foo", max_input_tokens=160))) + mock_config.drivers_config.prompt_driver = MockPromptDriver( + tokenizer=MockTokenizer(model="foo", max_input_tokens=160) + ) + agent = Agent() memory = ConversationMemory( autoprune=True, runs=[ diff --git a/tests/unit/memory/structure/test_summary_conversation_memory.py b/tests/unit/memory/structure/test_summary_conversation_memory.py index 4396c7b23..42246e349 100644 --- a/tests/unit/memory/structure/test_summary_conversation_memory.py +++ b/tests/unit/memory/structure/test_summary_conversation_memory.py @@ -5,14 +5,13 @@ from griptape.structures import Pipeline from griptape.tasks import PromptTask from tests.mocks.mock_prompt_driver import MockPromptDriver -from tests.mocks.mock_structure_config import MockStructureConfig class TestSummaryConversationMemory: def test_unsummarized_subtasks(self): - memory = SummaryConversationMemory(offset=1, prompt_driver=MockPromptDriver()) + memory = SummaryConversationMemory(offset=1) - pipeline = Pipeline(conversation_memory=memory, prompt_driver=MockPromptDriver()) + pipeline = Pipeline(conversation_memory=memory) pipeline.add_tasks(PromptTask("test")) @@ -24,9 +23,9 @@ def test_unsummarized_subtasks(self): assert len(memory.unsummarized_runs()) == 1 def test_after_run(self): - memory = SummaryConversationMemory(offset=1, prompt_driver=MockPromptDriver()) + memory = SummaryConversationMemory(offset=1) - pipeline = Pipeline(conversation_memory=memory, prompt_driver=MockPromptDriver()) + pipeline = Pipeline(conversation_memory=memory) pipeline.add_tasks(PromptTask("test")) @@ -85,7 +84,7 @@ def test_from_json(self): def test_config_prompt_driver(self): memory = SummaryConversationMemory() - pipeline = Pipeline(conversation_memory=memory, config=MockStructureConfig()) + pipeline = Pipeline(conversation_memory=memory) pipeline.add_tasks(PromptTask("test")) diff --git a/tests/unit/memory/tool/storage/test_blob_artifact_storage.py b/tests/unit/memory/tool/storage/test_blob_artifact_storage.py index c7f2cfcbd..78d1c662d 100644 --- a/tests/unit/memory/tool/storage/test_blob_artifact_storage.py +++ b/tests/unit/memory/tool/storage/test_blob_artifact_storage.py @@ -25,13 +25,3 @@ def test_load_artifacts(self, storage): def test_can_store(self, storage): assert not storage.can_store(TextArtifact("foo")) assert storage.can_store(BlobArtifact(b"foo")) - - def test_summarize(self, storage): - storage.store_artifact("foo", BlobArtifact(b"test")) - - assert storage.summarize("foo").value == "can't summarize artifacts" - - def test_query(self, storage): - storage.store_artifact("foo", BlobArtifact(b"test")) - - assert storage.query("foo", "query").value == "can't query artifacts" diff --git a/tests/unit/memory/tool/storage/test_text_artifact_storage.py b/tests/unit/memory/tool/storage/test_text_artifact_storage.py index 64f44c581..2f49421d4 100644 --- a/tests/unit/memory/tool/storage/test_text_artifact_storage.py +++ b/tests/unit/memory/tool/storage/test_text_artifact_storage.py @@ -25,13 +25,3 @@ def test_load_artifacts(self, storage): def test_can_store(self, storage): assert storage.can_store(TextArtifact("foo")) assert not storage.can_store(BlobArtifact(b"foo")) - - def test_summarize(self, storage): - storage.store_artifact("foo", TextArtifact("test")) - - assert storage.summarize("foo").value == "mock output" - - def test_query(self, storage): - storage.store_artifact("foo", TextArtifact("test")) - - assert storage.query("foo", "query").value == "mock output" diff --git a/tests/unit/memory/tool/test_task_memory.py b/tests/unit/memory/tool/test_task_memory.py index 53e4703a6..2f6ffe1c9 100644 --- a/tests/unit/memory/tool/test_task_memory.py +++ b/tests/unit/memory/tool/test_task_memory.py @@ -96,13 +96,3 @@ def test_load_artifacts_for_blob_list_artifact(self, memory): ) assert len(memory.load_artifacts("test")) == 2 - - def test_summarize_namespace(self, memory): - memory.store_artifact("foo", TextArtifact("test")) - - assert memory.summarize_namespace("foo").value == "mock output" - - def test_query_namespace(self, memory): - memory.store_artifact("foo", TextArtifact("test")) - - assert memory.query_namespace("foo", "query").value == "mock output" diff --git a/tests/unit/mixins/test_activity_mixin.py b/tests/unit/mixins/test_activity_mixin.py index f31f9f0e8..1d684e2a5 100644 --- a/tests/unit/mixins/test_activity_mixin.py +++ b/tests/unit/mixins/test_activity_mixin.py @@ -20,7 +20,7 @@ def test_activity_description(self, tool): def test_activity_schema(self, tool): schema = tool.activity_schema(tool.test).json_schema("InputSchema") - assert schema == Schema({"values": tool.test.config["schema"].schema}).json_schema("InputSchema") + assert schema == Schema({"values": getattr(tool.test, "config")["schema"]}).json_schema("InputSchema") assert schema["properties"].get("artifact") is None def test_activity_with_no_schema(self, tool): @@ -73,11 +73,31 @@ def test_enable_activities(self, tool): assert len(tool.activities()) > 0 - def test_activity_to_input(self, tool): - activity_input = tool.activity_to_input(tool.test) - assert str(activity_input) == str( - {Literal("input", description=""): {"values": Schema({Literal("test"): str}, description="Test input")}} + def test_extra_schema_properties(self): + tool = MockTool( + test_field="hello", + test_int=5, + extra_schema_properties={"test": {Literal("new_property"): str, Optional("optional_property"): int}}, ) + schema = tool.activity_schema(tool.test).json_schema("InputSchema") - activity_input = tool.activity_to_input(tool.test_no_schema) - assert activity_input == {Optional("input"): {}} + assert schema == { + "$id": "InputSchema", + "$schema": "http://json-schema.org/draft-07/schema#", + "properties": { + "values": { + "description": "Test input", + "properties": { + "test": {"type": "string"}, + "new_property": {"type": "string"}, + "optional_property": {"type": "integer"}, + }, + "required": ["test", "new_property"], + "additionalProperties": False, + "type": "object", + } + }, + "required": ["values"], + "additionalProperties": False, + "type": "object", + } diff --git a/tests/unit/mixins/test_events_mixin.py b/tests/unit/mixins/test_events_mixin.py deleted file mode 100644 index 99f5541ba..000000000 --- a/tests/unit/mixins/test_events_mixin.py +++ /dev/null @@ -1,59 +0,0 @@ -from unittest.mock import Mock - -from griptape.events import EventListener -from griptape.mixins import EventPublisherMixin -from tests.mocks.mock_event import MockEvent - - -class TestEventsMixin: - def test_init(self): - assert EventPublisherMixin() - - def test_add_event_listeners(self): - mixin = EventPublisherMixin() - - mixin.add_event_listeners([EventListener(), EventListener()]) - assert len(mixin.event_listeners) == 2 - - def test_remove_event_listeners(self): - mixin = EventPublisherMixin() - - listeners = [EventListener(), EventListener()] - mixin.add_event_listeners(listeners) - mixin.remove_event_listeners(listeners) - assert len(mixin.event_listeners) == 0 - - def test_add_event_listener(self): - mixin = EventPublisherMixin() - - mixin.add_event_listener(EventListener()) - mixin.add_event_listener(EventListener()) - - assert len(mixin.event_listeners) == 2 - - def test_remove_event_listener(self): - mixin = EventPublisherMixin() - - listener = EventListener() - mixin.add_event_listener(listener) - mixin.remove_event_listener(listener) - - assert len(mixin.event_listeners) == 0 - - def test_remove_unknown_event_listener(self): - mixin = EventPublisherMixin() - - mixin.remove_event_listener(EventListener()) - - def test_publish_event(self): - # Given - mock_handler = Mock() - mock_handler.return_value = None - mixin = EventPublisherMixin(event_listeners=[EventListener(handler=mock_handler)]) - mock_event = MockEvent() - - # When - mixin.publish_event(mock_event) - - # Then - mock_handler.assert_called_once_with(mock_event) diff --git a/tests/unit/mixins/test_futures_executor_mixin.py b/tests/unit/mixins/test_futures_executor_mixin.py new file mode 100644 index 000000000..3be336687 --- /dev/null +++ b/tests/unit/mixins/test_futures_executor_mixin.py @@ -0,0 +1,10 @@ +from concurrent import futures + +from tests.mocks.mock_futures_executor import MockFuturesExecutor + + +class TestFuturesExecutorMixin: + def test_futures_executor(self): + executor = futures.ThreadPoolExecutor() + + assert MockFuturesExecutor(futures_executor_fn=lambda: executor).futures_executor == executor diff --git a/tests/unit/structures/test_agent.py b/tests/unit/structures/test_agent.py index a09ad0f9a..235363bbe 100644 --- a/tests/unit/structures/test_agent.py +++ b/tests/unit/structures/test_agent.py @@ -1,13 +1,11 @@ import pytest -from griptape.engines import PromptSummaryEngine from griptape.memory import TaskMemory from griptape.memory.structure import ConversationMemory from griptape.memory.task.storage import TextArtifactStorage from griptape.rules import Rule, Ruleset from griptape.structures import Agent from griptape.tasks import BaseTask, PromptTask, ToolkitTask -from tests.mocks.mock_embedding_driver import MockEmbeddingDriver from tests.mocks.mock_prompt_driver import MockPromptDriver from tests.mocks.mock_tool.tool import MockTool @@ -76,18 +74,6 @@ def test_with_no_task_memory_and_empty_tool_output_memory(self): assert agent.tools[0].input_memory[0] == agent.task_memory assert agent.tools[0].output_memory == {} - def test_embedding_driver(self): - embedding_driver = MockEmbeddingDriver() - agent = Agent(tools=[MockTool()], embedding_driver=embedding_driver) - - storage = list(agent.task_memory.artifact_storages.values())[0] - assert isinstance(storage, TextArtifactStorage) - memory_embedding_driver = storage.rag_engine.retrieval_stage.retrieval_modules[ - 0 - ].vector_store_driver.embedding_driver - - assert memory_embedding_driver == embedding_driver - def test_without_default_task_memory(self): agent = Agent(task_memory=None, tools=[MockTool()]) @@ -230,33 +216,13 @@ def test_context(self): assert context["structure"] == agent - def test_task_memory_defaults(self): - prompt_driver = MockPromptDriver() - embedding_driver = MockEmbeddingDriver() - agent = Agent(prompt_driver=prompt_driver, embedding_driver=embedding_driver) + def test_task_memory_defaults(self, mock_config): + agent = Agent() storage = list(agent.task_memory.artifact_storages.values())[0] assert isinstance(storage, TextArtifactStorage) - assert storage.rag_engine.response_stage.response_module.prompt_driver == prompt_driver - assert ( - storage.rag_engine.retrieval_stage.retrieval_modules[0].vector_store_driver.embedding_driver - == embedding_driver - ) - assert isinstance(storage.summary_engine, PromptSummaryEngine) - assert storage.summary_engine.prompt_driver == prompt_driver - assert storage.csv_extraction_engine.prompt_driver == prompt_driver - assert storage.json_extraction_engine.prompt_driver == prompt_driver - - def test_deprecation(self): - with pytest.deprecated_call(): - Agent(prompt_driver=MockPromptDriver()) - - with pytest.deprecated_call(): - Agent(embedding_driver=MockEmbeddingDriver()) - - with pytest.deprecated_call(): - Agent(stream=True) + assert storage.vector_store_driver.embedding_driver == mock_config.drivers_config.embedding_driver def finished_tasks(self): task = PromptTask("test prompt") diff --git a/tests/unit/structures/test_pipeline.py b/tests/unit/structures/test_pipeline.py index 38f8abfb3..a7f7f40c1 100644 --- a/tests/unit/structures/test_pipeline.py +++ b/tests/unit/structures/test_pipeline.py @@ -4,14 +4,11 @@ from griptape.artifacts import ErrorArtifact, TextArtifact from griptape.memory.structure import ConversationMemory -from griptape.memory.task.storage import TextArtifactStorage from griptape.rules import Rule, Ruleset from griptape.structures import Pipeline from griptape.tasks import BaseTask, CodeExecutionTask, PromptTask, ToolkitTask from griptape.tokenizers import OpenAiTokenizer -from tests.mocks.mock_prompt_driver import MockPromptDriver from tests.mocks.mock_tool.tool import MockTool -from tests.unit.structures.test_agent import MockEmbeddingDriver class TestPipeline: @@ -31,10 +28,8 @@ def fn(task): return CodeExecutionTask(run_fn=fn) def test_init(self): - driver = MockPromptDriver() - pipeline = Pipeline(prompt_driver=driver, rulesets=[Ruleset("TestRuleset", [Rule("test")])]) + pipeline = Pipeline(rulesets=[Ruleset("TestRuleset", [Rule("test")])]) - assert pipeline.prompt_driver is driver assert pipeline.input_task is None assert pipeline.output_task is None assert pipeline.rulesets[0].name == "TestRuleset" @@ -103,20 +98,6 @@ def test_with_task_memory(self): assert pipeline.tasks[0].tools[0].output_memory is not None assert pipeline.tasks[0].tools[0].output_memory["test"][0] == pipeline.task_memory - def test_embedding_driver(self): - embedding_driver = MockEmbeddingDriver() - pipeline = Pipeline(embedding_driver=embedding_driver) - - pipeline.add_task(ToolkitTask(tools=[MockTool()])) - - storage = list(pipeline.task_memory.artifact_storages.values())[0] - assert isinstance(storage, TextArtifactStorage) - memory_embedding_driver = storage.rag_engine.retrieval_stage.retrieval_modules[ - 0 - ].vector_store_driver.embedding_driver - - assert memory_embedding_driver == embedding_driver - def test_with_task_memory_and_empty_tool_output_memory(self): pipeline = Pipeline() @@ -139,7 +120,7 @@ def test_with_memory(self): second_task = PromptTask("test2") third_task = PromptTask("test3") - pipeline = Pipeline(prompt_driver=MockPromptDriver(), conversation_memory=ConversationMemory()) + pipeline = Pipeline(conversation_memory=ConversationMemory()) pipeline + [first_task, second_task, third_task] @@ -174,7 +155,7 @@ def test_tasks_order(self): second_task = PromptTask("test2") third_task = PromptTask("test3") - pipeline = Pipeline(prompt_driver=MockPromptDriver()) + pipeline = Pipeline() pipeline + first_task pipeline + second_task @@ -189,7 +170,7 @@ def test_add_task(self): first_task = PromptTask("test1") second_task = PromptTask("test2") - pipeline = Pipeline(prompt_driver=MockPromptDriver()) + pipeline = Pipeline() pipeline + first_task pipeline + second_task @@ -208,7 +189,7 @@ def test_add_tasks(self): first_task = PromptTask("test1") second_task = PromptTask("test2") - pipeline = Pipeline(prompt_driver=MockPromptDriver()) + pipeline = Pipeline() pipeline + [first_task, second_task] @@ -227,7 +208,7 @@ def test_insert_task_in_middle(self): second_task = PromptTask("test2", id="test2") third_task = PromptTask("test3", id="test3") - pipeline = Pipeline(prompt_driver=MockPromptDriver()) + pipeline = Pipeline() pipeline + [first_task, second_task] pipeline.insert_task(first_task, third_task) @@ -251,7 +232,7 @@ def test_insert_task_at_end(self): second_task = PromptTask("test2", id="test2") third_task = PromptTask("test3", id="test3") - pipeline = Pipeline(prompt_driver=MockPromptDriver()) + pipeline = Pipeline() pipeline + [first_task, second_task] pipeline.insert_task(second_task, third_task) @@ -271,7 +252,7 @@ def test_insert_task_at_end(self): assert [child.id for child in third_task.children] == [] def test_prompt_stack_without_memory(self): - pipeline = Pipeline(conversation_memory=None, prompt_driver=MockPromptDriver(), rules=[Rule("test")]) + pipeline = Pipeline(conversation_memory=None, rules=[Rule("test")]) task1 = PromptTask("test") task2 = PromptTask("test") @@ -292,7 +273,7 @@ def test_prompt_stack_without_memory(self): assert len(task2.prompt_stack.messages) == 3 def test_prompt_stack_with_memory(self): - pipeline = Pipeline(prompt_driver=MockPromptDriver(), rules=[Rule("test")]) + pipeline = Pipeline(rules=[Rule("test")]) task1 = PromptTask("test") task2 = PromptTask("test") @@ -321,7 +302,7 @@ def test_text_artifact_token_count(self): def test_run(self): task = PromptTask("test") - pipeline = Pipeline(prompt_driver=MockPromptDriver()) + pipeline = Pipeline() pipeline + task assert task.state == BaseTask.State.PENDING @@ -333,7 +314,7 @@ def test_run(self): def test_run_with_args(self): task = PromptTask("{{ args[0] }}-{{ args[1] }}") - pipeline = Pipeline(prompt_driver=MockPromptDriver()) + pipeline = Pipeline() pipeline + [task] pipeline._execution_args = ("test1", "test2") @@ -348,7 +329,7 @@ def test_context(self): parent = PromptTask("parent") task = PromptTask("test") child = PromptTask("child") - pipeline = Pipeline(prompt_driver=MockPromptDriver()) + pipeline = Pipeline() pipeline + [parent, task, child] @@ -365,28 +346,35 @@ def test_context(self): assert context["parent"] == parent assert context["child"] == child - def test_deprecation(self): - with pytest.deprecated_call(): - Pipeline(prompt_driver=MockPromptDriver()) - - with pytest.deprecated_call(): - Pipeline(embedding_driver=MockEmbeddingDriver()) - - with pytest.deprecated_call(): - Pipeline(stream=True) - def test_run_with_error_artifact(self, error_artifact_task, waiting_task): end_task = PromptTask("end") - pipeline = Pipeline(prompt_driver=MockPromptDriver(), tasks=[waiting_task, error_artifact_task, end_task]) + pipeline = Pipeline(tasks=[waiting_task, error_artifact_task, end_task]) pipeline.run() assert pipeline.output is None def test_run_with_error_artifact_no_fail_fast(self, error_artifact_task, waiting_task): end_task = PromptTask("end") - pipeline = Pipeline( - prompt_driver=MockPromptDriver(), tasks=[waiting_task, error_artifact_task, end_task], fail_fast=False - ) + pipeline = Pipeline(tasks=[waiting_task, error_artifact_task, end_task], fail_fast=False) pipeline.run() assert pipeline.output is not None + + def test_add_duplicate_task(self): + task = PromptTask("test") + pipeline = Pipeline() + + pipeline + task + pipeline + task + + assert len(pipeline.tasks) == 1 + + def test_add_duplicate_task_directly(self): + task = PromptTask("test") + pipeline = Pipeline() + + pipeline + task + pipeline.tasks.append(task) + + with pytest.raises(ValueError, match=f"Duplicate task with id {task.id} found."): + pipeline.run() diff --git a/tests/unit/structures/test_workflow.py b/tests/unit/structures/test_workflow.py index 2be164ea7..45610bf20 100644 --- a/tests/unit/structures/test_workflow.py +++ b/tests/unit/structures/test_workflow.py @@ -4,12 +4,9 @@ from griptape.artifacts import ErrorArtifact, TextArtifact from griptape.memory.structure import ConversationMemory -from griptape.memory.task.storage import TextArtifactStorage from griptape.rules import Rule, Ruleset from griptape.structures import Workflow from griptape.tasks import BaseTask, CodeExecutionTask, PromptTask, ToolkitTask -from tests.mocks.mock_embedding_driver import MockEmbeddingDriver -from tests.mocks.mock_prompt_driver import MockPromptDriver from tests.mocks.mock_tool.tool import MockTool @@ -30,10 +27,8 @@ def fn(task): return CodeExecutionTask(run_fn=fn) def test_init(self): - driver = MockPromptDriver() - workflow = Workflow(prompt_driver=driver, rulesets=[Ruleset("TestRuleset", [Rule("test")])]) + workflow = Workflow(rulesets=[Ruleset("TestRuleset", [Rule("test")])]) - assert workflow.prompt_driver is driver assert len(workflow.tasks) == 0 assert workflow.rulesets[0].name == "TestRuleset" assert workflow.rulesets[0].rules[0].value == "test" @@ -100,20 +95,6 @@ def test_with_task_memory(self): assert workflow.tasks[0].tools[0].output_memory is not None assert workflow.tasks[0].tools[0].output_memory["test"][0] == workflow.task_memory - def test_embedding_driver(self): - embedding_driver = MockEmbeddingDriver() - workflow = Workflow(embedding_driver=embedding_driver) - - workflow.add_task(ToolkitTask(tools=[MockTool()])) - - storage = list(workflow.task_memory.artifact_storages.values())[0] - assert isinstance(storage, TextArtifactStorage) - memory_embedding_driver = storage.rag_engine.retrieval_stage.retrieval_modules[ - 0 - ].vector_store_driver.embedding_driver - - assert memory_embedding_driver == embedding_driver - def test_with_task_memory_and_empty_tool_output_memory(self): workflow = Workflow() @@ -136,7 +117,7 @@ def test_with_memory(self): second_task = PromptTask("test2") third_task = PromptTask("test3") - workflow = Workflow(prompt_driver=MockPromptDriver(), conversation_memory=ConversationMemory()) + workflow = Workflow(conversation_memory=ConversationMemory()) workflow + [first_task, second_task, third_task] @@ -170,7 +151,7 @@ def test_add_task(self): first_task = PromptTask("test1") second_task = PromptTask("test2") - workflow = Workflow(prompt_driver=MockPromptDriver()) + workflow = Workflow() workflow + first_task workflow.add_task(second_task) @@ -189,7 +170,7 @@ def test_add_tasks(self): first_task = PromptTask("test1") second_task = PromptTask("test2") - workflow = Workflow(prompt_driver=MockPromptDriver()) + workflow = Workflow() workflow + [first_task, second_task] @@ -206,7 +187,7 @@ def test_add_tasks(self): def test_run(self): task1 = PromptTask("test") task2 = PromptTask("test") - workflow = Workflow(prompt_driver=MockPromptDriver()) + workflow = Workflow() workflow + [task1, task2] assert task1.state == BaseTask.State.PENDING @@ -219,7 +200,7 @@ def test_run(self): def test_run_with_args(self): task = PromptTask("{{ args[0] }}-{{ args[1] }}") - workflow = Workflow(prompt_driver=MockPromptDriver()) + workflow = Workflow() workflow + task workflow._execution_args = ("test1", "test2") @@ -241,7 +222,7 @@ def test_run_with_args(self): ], ) def test_run_raises_on_missing_parent_or_child_id(self, tasks): - workflow = Workflow(prompt_driver=MockPromptDriver(), tasks=tasks) + workflow = Workflow(tasks=tasks) with pytest.raises(ValueError) as e: workflow.run() @@ -250,7 +231,6 @@ def test_run_raises_on_missing_parent_or_child_id(self, tasks): def test_run_topology_1_declarative_parents(self): workflow = Workflow( - prompt_driver=MockPromptDriver(), tasks=[ PromptTask("test1", id="task1"), PromptTask("test2", id="task2", parent_ids=["task1"]), @@ -265,7 +245,6 @@ def test_run_topology_1_declarative_parents(self): def test_run_topology_1_declarative_children(self): workflow = Workflow( - prompt_driver=MockPromptDriver(), tasks=[ PromptTask("test1", id="task1", child_ids=["task2", "task3"]), PromptTask("test2", id="task2", child_ids=["task4"]), @@ -280,7 +259,6 @@ def test_run_topology_1_declarative_children(self): def test_run_topology_1_declarative_mixed(self): workflow = Workflow( - prompt_driver=MockPromptDriver(), tasks=[ PromptTask("test1", id="task1", child_ids=["task3"]), PromptTask("test2", id="task2", parent_ids=["task1"], child_ids=["task4"]), @@ -299,9 +277,9 @@ def test_run_topology_1_imperative_parents(self): task3 = PromptTask("test3", id="task3") task4 = PromptTask("test4", id="task4") task2.add_parent(task1) - task3.add_parent("task1") - task4.add_parents([task2, "task3"]) - workflow = Workflow(prompt_driver=MockPromptDriver(), tasks=[task1, task2, task3, task4]) + task3.add_parent(task1) + task4.add_parents([task2, task3]) + workflow = Workflow(tasks=[task1, task2, task3, task4]) workflow.run() @@ -315,7 +293,35 @@ def test_run_topology_1_imperative_children(self): task1.add_children([task2, task3]) task2.add_child(task4) task3.add_child(task4) - workflow = Workflow(prompt_driver=MockPromptDriver(), tasks=[task1, task2, task3, task4]) + workflow = Workflow(tasks=[task1, task2, task3, task4]) + + workflow.run() + + self._validate_topology_1(workflow) + + def test_run_topology_1_imperative_parents_structure_init(self): + workflow = Workflow() + task1 = PromptTask("test1", id="task1") + task2 = PromptTask("test2", id="task2", structure=workflow) + task3 = PromptTask("test3", id="task3", structure=workflow) + task4 = PromptTask("test4", id="task4", structure=workflow) + task2.add_parent(task1) + task3.add_parent(task1) + task4.add_parents([task2, task3]) + + workflow.run() + + self._validate_topology_1(workflow) + + def test_run_topology_1_imperative_children_structure_init(self): + workflow = Workflow() + task1 = PromptTask("test1", id="task1", structure=workflow) + task2 = PromptTask("test2", id="task2", structure=workflow) + task3 = PromptTask("test3", id="task3", structure=workflow) + task4 = PromptTask("test4", id="task4") + task1.add_children([task2, task3]) + task2.add_child(task4) + task3.add_child(task4) workflow.run() @@ -328,7 +334,7 @@ def test_run_topology_1_imperative_mixed(self): task4 = PromptTask("test4", id="task4") task1.add_children([task2, task3]) task4.add_parents([task2, task3]) - workflow = Workflow(prompt_driver=MockPromptDriver(), tasks=[task1, task2, task3, task4]) + workflow = Workflow(tasks=[task1, task2, task3, task4]) workflow.run() @@ -339,7 +345,7 @@ def test_run_topology_1_imperative_insert(self): task2 = PromptTask("test2", id="task2") task3 = PromptTask("test3", id="task3") task4 = PromptTask("test4", id="task4") - workflow = Workflow(prompt_driver=MockPromptDriver()) + workflow = Workflow() # task1 splits into task2 and task3 # task2 and task3 converge into task4 @@ -356,7 +362,7 @@ def test_run_topology_1_missing_parent(self): task2 = PromptTask("test2", id="task2") task3 = PromptTask("test3", id="task3") task4 = PromptTask("test4", id="task4") - workflow = Workflow(prompt_driver=MockPromptDriver()) + workflow = Workflow() # task1 never added to workflow workflow + task4 @@ -368,7 +374,7 @@ def test_run_topology_1_id_equality(self): task2 = PromptTask("test2", id="task2") task3 = PromptTask("test3", id="task3") task4 = PromptTask("test4", id="task4") - workflow = Workflow(prompt_driver=MockPromptDriver()) + workflow = Workflow() # task4 never added to workflow workflow + task1 @@ -382,7 +388,7 @@ def test_run_topology_1_object_equality(self): task2 = PromptTask("test2", id="task2") task3 = PromptTask("test3", id="task3") task4 = PromptTask("test4", id="task4") - workflow = Workflow(prompt_driver=MockPromptDriver()) + workflow = Workflow() workflow + task1 workflow + task4 @@ -391,7 +397,6 @@ def test_run_topology_1_object_equality(self): def test_run_topology_2_declarative_parents(self): workflow = Workflow( - prompt_driver=MockPromptDriver(), tasks=[ PromptTask("testa", id="taska"), PromptTask("testb", id="taskb", parent_ids=["taska"]), @@ -407,7 +412,6 @@ def test_run_topology_2_declarative_parents(self): def test_run_topology_2_declarative_children(self): workflow = Workflow( - prompt_driver=MockPromptDriver(), tasks=[ PromptTask("testa", id="taska", child_ids=["taskb", "taskc", "taskd", "taske"]), PromptTask("testb", id="taskb", child_ids=["taskd"]), @@ -428,10 +432,10 @@ def test_run_topology_2_imperative_parents(self): taskd = PromptTask("testd", id="taskd") taske = PromptTask("teste", id="taske") taskb.add_parent(taska) - taskc.add_parent("taska") + taskc.add_parent(taska) taskd.add_parents([taska, taskb, taskc]) - taske.add_parents(["taska", taskd, "taskc"]) - workflow = Workflow(prompt_driver=MockPromptDriver(), tasks=[taska, taskb, taskc, taskd, taske]) + taske.add_parents([taska, taskd, taskc]) + workflow = Workflow(tasks=[taska, taskb, taskc, taskd, taske]) workflow.run() @@ -447,7 +451,7 @@ def test_run_topology_2_imperative_children(self): taskb.add_child(taskd) taskc.add_children([taskd, taske]) taskd.add_child(taske) - workflow = Workflow(prompt_driver=MockPromptDriver(), tasks=[taska, taskb, taskc, taskd, taske]) + workflow = Workflow(tasks=[taska, taskb, taskc, taskd, taske]) workflow.run() @@ -462,8 +466,8 @@ def test_run_topology_2_imperative_mixed(self): taska.add_children([taskb, taskc, taskd, taske]) taskb.add_child(taskd) taskd.add_parent(taskc) - taske.add_parents(["taska", taskd, "taskc"]) - workflow = Workflow(prompt_driver=MockPromptDriver(), tasks=[taska, taskb, taskc, taskd, taske]) + taske.add_parents([taska, taskd, taskc]) + workflow = Workflow(tasks=[taska, taskb, taskc, taskd, taske]) workflow.run() @@ -475,7 +479,7 @@ def test_run_topology_2_imperative_insert(self): taskc = PromptTask("testc", id="taskc") taskd = PromptTask("testd", id="taskd") taske = PromptTask("teste", id="taske") - workflow = Workflow(prompt_driver=MockPromptDriver()) + workflow = Workflow() workflow.add_task(taska) workflow.add_task(taske) taske.add_parent(taska) @@ -489,7 +493,6 @@ def test_run_topology_2_imperative_insert(self): def test_run_topology_3_declarative_parents(self): workflow = Workflow( - prompt_driver=MockPromptDriver(), tasks=[ PromptTask("test1", id="task1"), PromptTask("test2", id="task2", parent_ids=["task4"]), @@ -504,7 +507,6 @@ def test_run_topology_3_declarative_parents(self): def test_run_topology_3_declarative_children(self): workflow = Workflow( - prompt_driver=MockPromptDriver(), tasks=[ PromptTask("test1", id="task1", child_ids=["task4"]), PromptTask("test2", id="task2", child_ids=["task3"]), @@ -519,7 +521,6 @@ def test_run_topology_3_declarative_children(self): def test_run_topology_3_declarative_mixed(self): workflow = Workflow( - prompt_driver=MockPromptDriver(), tasks=[ PromptTask("test1", id="task1"), PromptTask("test2", id="task2", parent_ids=["task4"], child_ids=["task3"]), @@ -537,7 +538,7 @@ def test_run_topology_3_imperative_insert(self): task2 = PromptTask("test2", id="task2") task3 = PromptTask("test3", id="task3") task4 = PromptTask("test4", id="task4") - workflow = Workflow(prompt_driver=MockPromptDriver()) + workflow = Workflow() workflow + task1 workflow + task2 @@ -552,7 +553,6 @@ def test_run_topology_3_imperative_insert(self): def test_run_topology_4_declarative_parents(self): workflow = Workflow( - prompt_driver=MockPromptDriver(), tasks=[ PromptTask(id="collect_movie_info"), PromptTask(id="movie_info_1", parent_ids=["collect_movie_info"]), @@ -572,7 +572,6 @@ def test_run_topology_4_declarative_parents(self): def test_run_topology_4_declarative_children(self): workflow = Workflow( - prompt_driver=MockPromptDriver(), tasks=[ PromptTask(id="collect_movie_info", child_ids=["movie_info_1", "movie_info_2", "movie_info_3"]), PromptTask(id="movie_info_1", child_ids=["compare_movies"]), @@ -592,7 +591,6 @@ def test_run_topology_4_declarative_children(self): def test_run_topology_4_declarative_mixed(self): workflow = Workflow( - prompt_driver=MockPromptDriver(), tasks=[ PromptTask(id="collect_movie_info"), PromptTask(id="movie_info_1", parent_ids=["collect_movie_info"], child_ids=["compare_movies"]), @@ -622,7 +620,7 @@ def test_run_topology_4_imperative_insert(self): publish_website = PromptTask(id="publish_website") movie_info_3 = PromptTask(id="movie_info_3") - workflow = Workflow(prompt_driver=MockPromptDriver()) + workflow = Workflow() workflow.add_tasks(collect_movie_info, summarize_to_slack) workflow.insert_tasks(collect_movie_info, [movie_info_1, movie_info_2, movie_info_3], summarize_to_slack) workflow.insert_tasks([movie_info_1, movie_info_2, movie_info_3], compare_movies, summarize_to_slack) @@ -644,7 +642,7 @@ def test_run_topology_4_imperative_insert(self): ], ) def test_run_raises_on_cycle(self, tasks): - workflow = Workflow(prompt_driver=MockPromptDriver(), tasks=tasks) + workflow = Workflow(tasks=tasks) with pytest.raises(ValueError) as e: workflow.run() @@ -656,7 +654,7 @@ def test_input_task(self): task2 = PromptTask("prompt2") task3 = PromptTask("prompt3") task4 = PromptTask("prompt4") - workflow = Workflow(prompt_driver=MockPromptDriver()) + workflow = Workflow() workflow + task1 workflow + task4 @@ -669,7 +667,7 @@ def test_output_task(self): task2 = PromptTask("prompt2") task3 = PromptTask("prompt3") task4 = PromptTask("prompt4") - workflow = Workflow(prompt_driver=MockPromptDriver()) + workflow = Workflow() workflow + task1 workflow + task4 @@ -681,7 +679,7 @@ def test_output_task(self): task1.add_children([task2, task3]) # task4 is the final task, but its defined at index 0 - workflow = Workflow(prompt_driver=MockPromptDriver(), tasks=[task4, task1, task2, task3]) + workflow = Workflow(tasks=[task4, task1, task2, task3]) # output_task topologically should be task4 assert task4 == workflow.output_task @@ -691,7 +689,7 @@ def test_to_graph(self): task2 = PromptTask("prompt2", id="task2") task3 = PromptTask("prompt3", id="task3") task4 = PromptTask("prompt4", id="task4") - workflow = Workflow(prompt_driver=MockPromptDriver()) + workflow = Workflow() workflow + task1 workflow + task4 @@ -708,7 +706,7 @@ def test_order_tasks(self): task2 = PromptTask("prompt2", id="task2") task3 = PromptTask("prompt3", id="task3") task4 = PromptTask("prompt4", id="task4") - workflow = Workflow(prompt_driver=MockPromptDriver()) + workflow = Workflow() workflow + task1 workflow + task4 @@ -725,7 +723,7 @@ def test_context(self): parent = PromptTask("parent") task = PromptTask("test") child = PromptTask("child") - workflow = Workflow(prompt_driver=MockPromptDriver()) + workflow = Workflow() workflow + parent workflow + task @@ -748,20 +746,10 @@ def test_context(self): assert context["parents"] == {parent.id: parent} assert context["children"] == {child.id: child} - def test_deprecation(self): - with pytest.deprecated_call(): - Workflow(prompt_driver=MockPromptDriver()) - - with pytest.deprecated_call(): - Workflow(embedding_driver=MockEmbeddingDriver()) - - with pytest.deprecated_call(): - Workflow(stream=True) - def test_run_with_error_artifact(self, error_artifact_task, waiting_task): end_task = PromptTask("end") end_task.add_parents([error_artifact_task, waiting_task]) - workflow = Workflow(prompt_driver=MockPromptDriver(), tasks=[waiting_task, error_artifact_task, end_task]) + workflow = Workflow(tasks=[waiting_task, error_artifact_task, end_task]) workflow.run() assert workflow.output is None @@ -769,9 +757,7 @@ def test_run_with_error_artifact(self, error_artifact_task, waiting_task): def test_run_with_error_artifact_no_fail_fast(self, error_artifact_task, waiting_task): end_task = PromptTask("end") end_task.add_parents([error_artifact_task, waiting_task]) - workflow = Workflow( - prompt_driver=MockPromptDriver(), tasks=[waiting_task, error_artifact_task, end_task], fail_fast=False - ) + workflow = Workflow(tasks=[waiting_task, error_artifact_task, end_task], fail_fast=False) workflow.run() assert workflow.output is not None @@ -781,8 +767,8 @@ def _validate_topology_1(workflow) -> None: assert len(workflow.tasks) == 4 assert workflow.input_task.id == "task1" assert workflow.output_task.id == "task4" - assert workflow.input_task.id == workflow.tasks[0].id - assert workflow.output_task.id == workflow.tasks[-1].id + assert workflow.input_task.id == workflow.order_tasks()[0].id + assert workflow.output_task.id == workflow.order_tasks()[-1].id task1 = workflow.find_task("task1") assert task1.state == BaseTask.State.FINISHED @@ -810,8 +796,6 @@ def _validate_topology_2(workflow) -> None: assert len(workflow.tasks) == 5 assert workflow.input_task.id == "taska" assert workflow.output_task.id == "taske" - assert workflow.input_task.id == workflow.tasks[0].id - assert workflow.output_task.id == workflow.tasks[-1].id taska = workflow.find_task("taska") assert taska.state == BaseTask.State.FINISHED @@ -832,6 +816,8 @@ def _validate_topology_2(workflow) -> None: assert taskd.state == BaseTask.State.FINISHED assert sorted(taskd.parent_ids) == ["taska", "taskb", "taskc"] assert taskd.child_ids == ["taske"] + assert workflow.input_task.id == workflow.order_tasks()[0].id + assert workflow.output_task.id == workflow.order_tasks()[-1].id taske = workflow.find_task("taske") assert taske.state == BaseTask.State.FINISHED @@ -843,9 +829,6 @@ def _validate_topology_3(workflow) -> None: assert len(workflow.tasks) == 4 assert workflow.input_task.id == "task1" assert workflow.output_task.id == "task3" - assert workflow.input_task.id == workflow.tasks[0].id - assert workflow.output_task.id == workflow.tasks[-1].id - task1 = workflow.find_task("task1") assert task1.state == BaseTask.State.FINISHED assert task1.parent_ids == [] @@ -855,6 +838,8 @@ def _validate_topology_3(workflow) -> None: assert task2.state == BaseTask.State.FINISHED assert task2.parent_ids == ["task4"] assert task2.child_ids == ["task3"] + assert workflow.input_task.id == workflow.order_tasks()[0].id + assert workflow.output_task.id == workflow.order_tasks()[-1].id task3 = workflow.find_task("task3") assert task3.state == BaseTask.State.FINISHED @@ -871,8 +856,8 @@ def _validate_topology_4(workflow) -> None: assert len(workflow.tasks) == 9 assert workflow.input_task.id == "collect_movie_info" assert workflow.output_task.id == "summarize_to_slack" - assert workflow.input_task.id == workflow.tasks[0].id - assert workflow.output_task.id == workflow.tasks[-1].id + assert workflow.input_task.id == workflow.order_tasks()[0].id + assert workflow.output_task.id == workflow.order_tasks()[-1].id collect_movie_info = workflow.find_task("collect_movie_info") assert collect_movie_info.parent_ids == [] diff --git a/tests/unit/tasks/test_audio_transcription_task.py b/tests/unit/tasks/test_audio_transcription_task.py index 734e111cf..33405ad10 100644 --- a/tests/unit/tasks/test_audio_transcription_task.py +++ b/tests/unit/tasks/test_audio_transcription_task.py @@ -6,8 +6,6 @@ from griptape.engines import AudioTranscriptionEngine from griptape.structures import Agent, Pipeline from griptape.tasks import AudioTranscriptionTask, BaseTask -from tests.mocks.mock_prompt_driver import MockPromptDriver -from tests.mocks.mock_structure_config import MockStructureConfig class TestAudioTranscriptionTask: @@ -34,7 +32,7 @@ def callable_input(task: BaseTask) -> AudioArtifact: def test_config_audio_transcription_engine(self, audio_artifact): task = AudioTranscriptionTask(audio_artifact) - Agent(config=MockStructureConfig()).add_task(task) + Agent().add_task(task) assert isinstance(task.audio_transcription_engine, AudioTranscriptionEngine) @@ -42,7 +40,7 @@ def test_run(self, audio_artifact, audio_transcription_engine): audio_transcription_engine.run.return_value = TextArtifact("mock transcription") task = AudioTranscriptionTask(audio_artifact, audio_transcription_engine=audio_transcription_engine) - pipeline = Pipeline(prompt_driver=MockPromptDriver()) + pipeline = Pipeline() pipeline.add_task(task) assert pipeline.run().output.to_text() == "mock transcription" diff --git a/tests/unit/tasks/test_base_multi_text_input_task.py b/tests/unit/tasks/test_base_multi_text_input_task.py index 3d8d67a55..8eaa832ae 100644 --- a/tests/unit/tasks/test_base_multi_text_input_task.py +++ b/tests/unit/tasks/test_base_multi_text_input_task.py @@ -1,7 +1,6 @@ from griptape.artifacts import TextArtifact from griptape.structures import Pipeline from tests.mocks.mock_multi_text_input_task import MockMultiTextInputTask -from tests.mocks.mock_prompt_driver import MockPromptDriver class TestBaseMultiTextInputTask: @@ -42,7 +41,7 @@ def test_full_context(self): parent = MockMultiTextInputTask(("parent1", "parent2")) subtask = MockMultiTextInputTask(("test1", "test2"), context={"foo": "bar"}) child = MockMultiTextInputTask(("child2", "child2")) - pipeline = Pipeline(prompt_driver=MockPromptDriver()) + pipeline = Pipeline() pipeline.add_tasks(parent, subtask, child) diff --git a/tests/unit/tasks/test_base_task.py b/tests/unit/tasks/test_base_task.py index 87602dbbb..94a13b938 100644 --- a/tests/unit/tasks/test_base_task.py +++ b/tests/unit/tasks/test_base_task.py @@ -3,11 +3,10 @@ import pytest from griptape.artifacts import TextArtifact +from griptape.events import EventBus from griptape.events.event_listener import EventListener from griptape.structures import Agent, Workflow from griptape.tasks import ActionsSubtask -from tests.mocks.mock_embedding_driver import MockEmbeddingDriver -from tests.mocks.mock_prompt_driver import MockPromptDriver from tests.mocks.mock_task import MockTask from tests.mocks.mock_tool.tool import MockTool @@ -15,12 +14,11 @@ class TestBaseTask: @pytest.fixture() def task(self): + EventBus.add_event_listeners([EventListener(handler=Mock())]) agent = Agent( - prompt_driver=MockPromptDriver(), - embedding_driver=MockEmbeddingDriver(), tools=[MockTool()], - event_listeners=[EventListener(handler=Mock())], ) + EventBus.add_event_listeners([EventListener(handler=Mock())]) agent.add_task(MockTask("foobar", max_meta_memory_entries=2)) @@ -76,7 +74,81 @@ def test_parents_output(self, task): assert child.parents_output_text == "foobar1\nfoobar3" + def test_parents_property_no_structure(self, task): + workflow = Workflow() + task1 = MockTask("foobar1", id="foobar1") + task2 = MockTask("foobar2", id="foobar2") + task3 = MockTask("foobar3", id="foobar3") + child = MockTask("foobar", id="foobar") + + child.add_parent(task1) + child.add_parent(task2) + child.add_parent(task3) + + with pytest.raises(ValueError, match="Structure must be set to access parents"): + child.parents # noqa: B018 + + workflow.add_tasks(task1, task2, task3, child) + child.structure = workflow + + assert len(child.parents) == 3 + + def test_children_property_no_structure(self, task): + workflow = Workflow() + task1 = MockTask("foobar1", id="foobar1") + task2 = MockTask("foobar2", id="foobar2") + task3 = MockTask("foobar3", id="foobar3") + parent = MockTask("foobar", id="foobar") + + parent.add_child(task1) + parent.add_child(task2) + parent.add_child(task3) + + with pytest.raises(ValueError, match="Structure must be set to access children"): + parent.children # noqa: B018 + + workflow.add_tasks(task1, task2, task3, parent) + parent.structure = workflow + + assert len(parent.children) == 3 + def test_execute_publish_events(self, task): task.execute() - assert task.structure.event_listeners[0].handler.call_count == 2 + assert EventBus.event_listeners[0].handler.call_count == 2 + + def test_add_parent(self, task): + parent = MockTask("parent foobar", id="parent_foobar") + + result = task.add_parent(parent) + + assert parent.id in task.parent_ids + assert task.id in parent.child_ids + assert result == task + + def test_add_child(self, task): + child = MockTask("child foobar", id="child_foobar") + + result = task.add_child(child) + + assert child.id in task.child_ids + assert task.id in child.parent_ids + assert result == task + + def test_add_parent_bitshift(self, task): + parent = MockTask("parent foobar", id="parent_foobar") + + added_task = task << parent + + assert parent.id in task.parent_ids + assert task.id in parent.child_ids + assert added_task == parent + + def test_add_child_bitshift(self, task): + child = MockTask("child foobar", id="child_foobar") + + added_task = task >> child + + assert child.id in task.child_ids + assert task.id in child.parent_ids + assert added_task == child diff --git a/tests/unit/tasks/test_base_text_input_task.py b/tests/unit/tasks/test_base_text_input_task.py index 86dc98805..ff6afe42b 100644 --- a/tests/unit/tasks/test_base_text_input_task.py +++ b/tests/unit/tasks/test_base_text_input_task.py @@ -1,7 +1,6 @@ from griptape.artifacts import TextArtifact from griptape.rules import Rule, Ruleset from griptape.structures import Pipeline -from tests.mocks.mock_prompt_driver import MockPromptDriver from tests.mocks.mock_text_input_task import MockTextInputTask @@ -31,7 +30,7 @@ def test_full_context(self): parent = MockTextInputTask("parent") subtask = MockTextInputTask("test", context={"foo": "bar"}) child = MockTextInputTask("child") - pipeline = Pipeline(prompt_driver=MockPromptDriver()) + pipeline = Pipeline() pipeline.add_tasks(parent, subtask, child) diff --git a/tests/unit/tasks/test_code_execution_task.py b/tests/unit/tasks/test_code_execution_task.py index 3178e29db..e2c492fad 100644 --- a/tests/unit/tasks/test_code_execution_task.py +++ b/tests/unit/tasks/test_code_execution_task.py @@ -1,7 +1,6 @@ from griptape.artifacts import BaseArtifact, ErrorArtifact, TextArtifact from griptape.structures import Pipeline from griptape.tasks import CodeExecutionTask -from tests.mocks.mock_prompt_driver import MockPromptDriver def hello_world(task: CodeExecutionTask) -> BaseArtifact: @@ -27,7 +26,7 @@ def test_hello_world_fn(self): # Using a Pipeline # Overriding the input because we are implementing the task not the Pipeline def test_noop_fn(self): - pipeline = Pipeline(prompt_driver=MockPromptDriver()) + pipeline = Pipeline() task = CodeExecutionTask("No Op", run_fn=non_outputting) pipeline.add_task(task) temp = task.run() diff --git a/tests/unit/tasks/test_csv_extraction_task.py b/tests/unit/tasks/test_csv_extraction_task.py deleted file mode 100644 index 7d37c3897..000000000 --- a/tests/unit/tasks/test_csv_extraction_task.py +++ /dev/null @@ -1,33 +0,0 @@ -import pytest - -from griptape.engines import CsvExtractionEngine -from griptape.structures import Agent -from griptape.tasks import CsvExtractionTask -from tests.mocks.mock_prompt_driver import MockPromptDriver -from tests.mocks.mock_structure_config import MockStructureConfig - - -class TestCsvExtractionTask: - @pytest.fixture() - def task(self): - return CsvExtractionTask(args={"column_names": ["test1"]}) - - def test_run(self, task): - agent = Agent(config=MockStructureConfig()) - - agent.add_task(task) - - result = task.run() - - assert len(result.value) == 1 - assert result.value[0].value == {"test1": "mock output"} - - def test_config_extraction_engine(self, task): - Agent(config=MockStructureConfig()).add_task(task) - - assert isinstance(task.extraction_engine, CsvExtractionEngine) - assert isinstance(task.extraction_engine.prompt_driver, MockPromptDriver) - - def test_missing_extraction_engine(self, task): - with pytest.raises(ValueError): - task.extraction_engine # noqa: B018 diff --git a/tests/unit/tasks/test_extraction_task.py b/tests/unit/tasks/test_extraction_task.py index afa73a506..2d7ab442c 100644 --- a/tests/unit/tasks/test_extraction_task.py +++ b/tests/unit/tasks/test_extraction_task.py @@ -3,15 +3,12 @@ from griptape.engines import CsvExtractionEngine from griptape.structures import Agent from griptape.tasks import ExtractionTask -from tests.mocks.mock_prompt_driver import MockPromptDriver class TestExtractionTask: @pytest.fixture() def task(self): - return ExtractionTask( - extraction_engine=CsvExtractionEngine(prompt_driver=MockPromptDriver()), args={"column_names": ["test1"]} - ) + return ExtractionTask(extraction_engine=CsvExtractionEngine(column_names=["test1"])) def test_run(self, task): agent = Agent() diff --git a/tests/unit/tasks/test_image_query_task.py b/tests/unit/tasks/test_image_query_task.py index 447faa01c..01c116772 100644 --- a/tests/unit/tasks/test_image_query_task.py +++ b/tests/unit/tasks/test_image_query_task.py @@ -8,7 +8,6 @@ from griptape.structures import Agent from griptape.tasks import BaseTask, ImageQueryTask from tests.mocks.mock_image_query_driver import MockImageQueryDriver -from tests.mocks.mock_structure_config import MockStructureConfig class TestImageQueryTask: @@ -61,17 +60,11 @@ def test_list_input(self, text_artifact: TextArtifact, image_artifact: ImageArti def test_config_image_generation_engine(self, text_artifact, image_artifact): task = ImageQueryTask((text_artifact, [image_artifact, image_artifact])) - Agent(config=MockStructureConfig()).add_task(task) + Agent().add_task(task) assert isinstance(task.image_query_engine, ImageQueryEngine) assert isinstance(task.image_query_engine.image_query_driver, MockImageQueryDriver) - def test_missing_image_generation_engine(self, text_artifact, image_artifact): - task = ImageQueryTask((text_artifact, [image_artifact, image_artifact])) - - with pytest.raises(ValueError, match="Image Query Engine"): - task.image_query_engine # noqa: B018 - def test_run(self, image_query_engine, text_artifact, image_artifact): task = ImageQueryTask((text_artifact, [image_artifact, image_artifact]), image_query_engine=image_query_engine) task.run() diff --git a/tests/unit/tasks/test_inpainting_image_generation_task.py b/tests/unit/tasks/test_inpainting_image_generation_task.py index 61c437bb7..5c4507d49 100644 --- a/tests/unit/tasks/test_inpainting_image_generation_task.py +++ b/tests/unit/tasks/test_inpainting_image_generation_task.py @@ -8,7 +8,6 @@ from griptape.structures import Agent from griptape.tasks import BaseTask, InpaintingImageGenerationTask from tests.mocks.mock_image_generation_driver import MockImageGenerationDriver -from tests.mocks.mock_structure_config import MockStructureConfig class TestInpaintingImageGenerationTask: @@ -51,13 +50,7 @@ def test_bad_input(self, image_artifact): def test_config_image_generation_engine(self, text_artifact, image_artifact): task = InpaintingImageGenerationTask((text_artifact, image_artifact, image_artifact)) - Agent(config=MockStructureConfig()).add_task(task) + Agent().add_task(task) assert isinstance(task.image_generation_engine, InpaintingImageGenerationEngine) assert isinstance(task.image_generation_engine.image_generation_driver, MockImageGenerationDriver) - - def test_missing_image_generation_engine(self, text_artifact, image_artifact): - task = InpaintingImageGenerationTask((text_artifact, image_artifact, image_artifact)) - - with pytest.raises(ValueError): - task.image_generation_engine # noqa: B018 diff --git a/tests/unit/tasks/test_json_extraction_task.py b/tests/unit/tasks/test_json_extraction_task.py deleted file mode 100644 index ba7d1ce30..000000000 --- a/tests/unit/tasks/test_json_extraction_task.py +++ /dev/null @@ -1,38 +0,0 @@ -import pytest -from schema import Schema - -from griptape.engines import JsonExtractionEngine -from griptape.structures import Agent -from griptape.tasks import JsonExtractionTask -from tests.mocks.mock_prompt_driver import MockPromptDriver -from tests.mocks.mock_structure_config import MockStructureConfig - - -class TestJsonExtractionTask: - @pytest.fixture() - def task(self): - return JsonExtractionTask("foo", args={"template_schema": Schema({"foo": "bar"}).json_schema("TemplateSchema")}) - - def test_run(self, task): - mock_config = MockStructureConfig() - assert isinstance(mock_config.prompt_driver, MockPromptDriver) - mock_config.prompt_driver.mock_output = '[{"test_key_1": "test_value_1"}, {"test_key_2": "test_value_2"}]' - agent = Agent(config=mock_config) - - agent.add_task(task) - - result = task.run() - - assert len(result.value) == 2 - assert result.value[0].value == '{"test_key_1": "test_value_1"}' - assert result.value[1].value == '{"test_key_2": "test_value_2"}' - - def test_config_extraction_engine(self, task): - Agent(config=MockStructureConfig()).add_task(task) - - assert isinstance(task.extraction_engine, JsonExtractionEngine) - assert isinstance(task.extraction_engine.prompt_driver, MockPromptDriver) - - def test_missing_extraction_engine(self, task): - with pytest.raises(ValueError): - task.extraction_engine # noqa: B018 diff --git a/tests/unit/tasks/test_outpainting_image_generation_task.py b/tests/unit/tasks/test_outpainting_image_generation_task.py index 593451120..ba5e52a82 100644 --- a/tests/unit/tasks/test_outpainting_image_generation_task.py +++ b/tests/unit/tasks/test_outpainting_image_generation_task.py @@ -8,7 +8,6 @@ from griptape.structures import Agent from griptape.tasks import BaseTask, OutpaintingImageGenerationTask from tests.mocks.mock_image_generation_driver import MockImageGenerationDriver -from tests.mocks.mock_structure_config import MockStructureConfig class TestOutpaintingImageGenerationTask: @@ -51,13 +50,7 @@ def test_bad_input(self, image_artifact): def test_config_image_generation_engine(self, text_artifact, image_artifact): task = OutpaintingImageGenerationTask((text_artifact, image_artifact, image_artifact)) - Agent(config=MockStructureConfig()).add_task(task) + Agent().add_task(task) assert isinstance(task.image_generation_engine, OutpaintingImageGenerationEngine) assert isinstance(task.image_generation_engine.image_generation_driver, MockImageGenerationDriver) - - def test_missing_image_generation_engine(self, text_artifact, image_artifact): - task = OutpaintingImageGenerationTask((text_artifact, image_artifact, image_artifact)) - - with pytest.raises(ValueError): - task.image_generation_engine # noqa: B018 diff --git a/tests/unit/tasks/test_prompt_image_generation_task.py b/tests/unit/tasks/test_prompt_image_generation_task.py index 1c4b639fb..3ad0302f2 100644 --- a/tests/unit/tasks/test_prompt_image_generation_task.py +++ b/tests/unit/tasks/test_prompt_image_generation_task.py @@ -1,13 +1,10 @@ from unittest.mock import Mock -import pytest - from griptape.artifacts import TextArtifact from griptape.engines import PromptImageGenerationEngine from griptape.structures import Agent from griptape.tasks import BaseTask, PromptImageGenerationTask from tests.mocks.mock_image_generation_driver import MockImageGenerationDriver -from tests.mocks.mock_structure_config import MockStructureConfig class TestPromptImageGenerationTask: @@ -28,13 +25,7 @@ def callable_input(task: BaseTask) -> TextArtifact: def test_config_image_generation_engine_engine(self): task = PromptImageGenerationTask("foo bar") - Agent(config=MockStructureConfig()).add_task(task) + Agent().add_task(task) assert isinstance(task.image_generation_engine, PromptImageGenerationEngine) assert isinstance(task.image_generation_engine.image_generation_driver, MockImageGenerationDriver) - - def test_missing_summary_engine(self): - task = PromptImageGenerationTask("foo bar") - - with pytest.raises(ValueError): - task.image_generation_engine # noqa: B018 diff --git a/tests/unit/tasks/test_prompt_task.py b/tests/unit/tasks/test_prompt_task.py index 083ea6da5..cfe853226 100644 --- a/tests/unit/tasks/test_prompt_task.py +++ b/tests/unit/tasks/test_prompt_task.py @@ -1,5 +1,3 @@ -import pytest - from griptape.artifacts.image_artifact import ImageArtifact from griptape.artifacts.list_artifact import ListArtifact from griptape.artifacts.text_artifact import TextArtifact @@ -9,13 +7,12 @@ from griptape.structures import Pipeline from griptape.tasks import PromptTask from tests.mocks.mock_prompt_driver import MockPromptDriver -from tests.mocks.mock_structure_config import MockStructureConfig class TestPromptTask: def test_run(self): task = PromptTask("test") - pipeline = Pipeline(prompt_driver=MockPromptDriver()) + pipeline = Pipeline() pipeline.add_task(task) @@ -30,16 +27,10 @@ def test_to_text(self): def test_config_prompt_driver(self): task = PromptTask("test") - Pipeline(config=MockStructureConfig()).add_task(task) + Pipeline().add_task(task) assert isinstance(task.prompt_driver, MockPromptDriver) - def test_missing_prompt_driver(self): - task = PromptTask("test") - - with pytest.raises(ValueError): - task.prompt_driver # noqa: B018 - def test_input(self): # Str task = PromptTask("test") diff --git a/tests/unit/tasks/test_rag_task.py b/tests/unit/tasks/test_rag_task.py index b205d385a..dc8603a2a 100644 --- a/tests/unit/tasks/test_rag_task.py +++ b/tests/unit/tasks/test_rag_task.py @@ -15,7 +15,7 @@ def task(self): input="test", rag_engine=RagEngine( response_stage=ResponseRagStage( - response_module=PromptResponseRagModule(prompt_driver=MockPromptDriver()) + response_modules=[PromptResponseRagModule(prompt_driver=MockPromptDriver())] ) ), ) diff --git a/tests/unit/tasks/test_structure_run_task.py b/tests/unit/tasks/test_structure_run_task.py index 1053ade9e..6ea9f5985 100644 --- a/tests/unit/tasks/test_structure_run_task.py +++ b/tests/unit/tasks/test_structure_run_task.py @@ -5,9 +5,11 @@ class TestStructureRunTask: - def test_run(self): - agent = Agent(prompt_driver=MockPromptDriver(mock_output="agent mock output")) - pipeline = Pipeline(prompt_driver=MockPromptDriver(mock_output="pipeline mock output")) + def test_run(self, mock_config): + mock_config.drivers_config.prompt_driver = MockPromptDriver(mock_output="agent mock output") + agent = Agent() + mock_config.drivers_config.prompt_driver = MockPromptDriver(mock_output="pipeline mock output") + pipeline = Pipeline() driver = LocalStructureRunDriver(structure_factory_fn=lambda: agent) task = StructureRunTask(driver=driver) diff --git a/tests/unit/tasks/test_text_summary_task.py b/tests/unit/tasks/test_text_summary_task.py index bb08f9d31..f83075f2a 100644 --- a/tests/unit/tasks/test_text_summary_task.py +++ b/tests/unit/tasks/test_text_summary_task.py @@ -1,15 +1,12 @@ -import pytest - from griptape.engines import PromptSummaryEngine from griptape.structures import Agent from griptape.tasks import TextSummaryTask from tests.mocks.mock_prompt_driver import MockPromptDriver -from tests.mocks.mock_structure_config import MockStructureConfig class TestTextSummaryTask: def test_run(self): - task = TextSummaryTask("test", summary_engine=PromptSummaryEngine(prompt_driver=MockPromptDriver())) + task = TextSummaryTask("test", summary_engine=PromptSummaryEngine()) agent = Agent() agent.add_task(task) @@ -26,13 +23,7 @@ def test_context_propagation(self): def test_config_summary_engine(self): task = TextSummaryTask("test") - Agent(config=MockStructureConfig()).add_task(task) + Agent().add_task(task) assert isinstance(task.summary_engine, PromptSummaryEngine) assert isinstance(task.summary_engine.prompt_driver, MockPromptDriver) - - def test_missing_summary_engine(self): - task = TextSummaryTask("test") - - with pytest.raises(ValueError): - task.summary_engine # noqa: B018 diff --git a/tests/unit/tasks/test_text_to_speech_task.py b/tests/unit/tasks/test_text_to_speech_task.py index bf1f19d5a..44348fef0 100644 --- a/tests/unit/tasks/test_text_to_speech_task.py +++ b/tests/unit/tasks/test_text_to_speech_task.py @@ -4,8 +4,6 @@ from griptape.engines import TextToSpeechEngine from griptape.structures import Agent, Pipeline from griptape.tasks import BaseTask, TextToSpeechTask -from tests.mocks.mock_prompt_driver import MockPromptDriver -from tests.mocks.mock_structure_config import MockStructureConfig class TestTextToSpeechTask: @@ -26,7 +24,7 @@ def callable_input(task: BaseTask) -> TextArtifact: def test_config_text_to_speech_engine(self): task = TextToSpeechTask("foo bar") - Agent(config=MockStructureConfig()).add_task(task) + Agent().add_task(task) assert isinstance(task.text_to_speech_engine, TextToSpeechEngine) @@ -41,7 +39,7 @@ def test_run(self): text_to_speech_engine.run.return_value = AudioArtifact(b"audio content", format="mp3") task = TextToSpeechTask("some text", text_to_speech_engine=text_to_speech_engine) - pipeline = Pipeline(prompt_driver=MockPromptDriver()) + pipeline = Pipeline() pipeline.add_task(task) assert isinstance(pipeline.run().output, AudioArtifact) diff --git a/tests/unit/tasks/test_tool_task.py b/tests/unit/tasks/test_tool_task.py index dfc679919..dbb76a943 100644 --- a/tests/unit/tasks/test_tool_task.py +++ b/tests/unit/tasks/test_tool_task.py @@ -5,7 +5,6 @@ from griptape.artifacts import TextArtifact from griptape.structures import Agent from griptape.tasks import ActionsSubtask, ToolTask -from tests.mocks.mock_embedding_driver import MockEmbeddingDriver from tests.mocks.mock_prompt_driver import MockPromptDriver from tests.mocks.mock_tool.tool import MockTool from tests.utils import defaults @@ -166,14 +165,15 @@ class TestToolTask: } @pytest.fixture() - def agent(self): + def agent(self, mock_config): output_dict = {"tag": "foo", "name": "MockTool", "path": "test", "input": {"values": {"test": "foobar"}}} - return Agent( - prompt_driver=MockPromptDriver(mock_output=f"```python foo bar\n{json.dumps(output_dict)}"), - embedding_driver=MockEmbeddingDriver(), + mock_config.drivers_config.prompt_driver = MockPromptDriver( + mock_output=f"```python foo bar\n{json.dumps(output_dict)}" ) + return Agent() + def test_run_without_memory(self, agent): task = ToolTask(tool=MockTool()) diff --git a/tests/unit/tasks/test_toolkit_task.py b/tests/unit/tasks/test_toolkit_task.py index cd5dd21f8..6b238c399 100644 --- a/tests/unit/tasks/test_toolkit_task.py +++ b/tests/unit/tasks/test_toolkit_task.py @@ -2,7 +2,6 @@ from griptape.common import ToolAction from griptape.structures import Agent from griptape.tasks import ActionsSubtask, PromptTask, ToolkitTask -from tests.mocks.mock_prompt_driver import MockPromptDriver from tests.mocks.mock_tool.tool import MockTool from tests.utils import defaults @@ -170,11 +169,12 @@ def test_init(self): except ValueError: assert True - def test_run(self): + def test_run(self, mock_config): output = """Answer: done""" + mock_config.drivers_config.prompt_driver.mock_output = output task = ToolkitTask("test", tools=[MockTool(name="Tool1"), MockTool(name="Tool2")]) - agent = Agent(prompt_driver=MockPromptDriver(mock_output=output)) + agent = Agent() agent.add_task(task) @@ -184,11 +184,12 @@ def test_run(self): assert len(task.subtasks) == 1 assert result.output_task.output.to_text() == "done" - def test_run_max_subtasks(self): + def test_run_max_subtasks(self, mock_config): output = 'Actions: [{"tag": "foo", "name": "Tool1", "path": "test", "input": {"values": {"test": "value"}}}]' + mock_config.drivers_config.prompt_driver.mock_output = output task = ToolkitTask("test", tools=[MockTool(name="Tool1")], max_subtasks=3) - agent = Agent(prompt_driver=MockPromptDriver(mock_output=output)) + agent = Agent() agent.add_task(task) @@ -197,11 +198,12 @@ def test_run_max_subtasks(self): assert len(task.subtasks) == 3 assert isinstance(task.output, ErrorArtifact) - def test_run_invalid_react_prompt(self): + def test_run_invalid_react_prompt(self, mock_config): output = """foo bar""" + mock_config.drivers_config.prompt_driver.mock_output = output task = ToolkitTask("test", tools=[MockTool(name="Tool1")], max_subtasks=3) - agent = Agent(prompt_driver=MockPromptDriver(mock_output=output)) + agent = Agent() agent.add_task(task) diff --git a/tests/unit/tasks/test_variation_image_generation_task.py b/tests/unit/tasks/test_variation_image_generation_task.py index a910fb8e0..f6afbf03e 100644 --- a/tests/unit/tasks/test_variation_image_generation_task.py +++ b/tests/unit/tasks/test_variation_image_generation_task.py @@ -8,7 +8,6 @@ from griptape.structures import Agent from griptape.tasks import BaseTask, VariationImageGenerationTask from tests.mocks.mock_image_generation_driver import MockImageGenerationDriver -from tests.mocks.mock_structure_config import MockStructureConfig class TestVariationImageGenerationTask: @@ -48,13 +47,7 @@ def test_bad_input(self, image_artifact): def test_config_image_generation_engine(self, text_artifact, image_artifact): task = VariationImageGenerationTask((text_artifact, image_artifact)) - Agent(config=MockStructureConfig()).add_task(task) + Agent().add_task(task) assert isinstance(task.image_generation_engine, VariationImageGenerationEngine) assert isinstance(task.image_generation_engine.image_generation_driver, MockImageGenerationDriver) - - def test_missing_summary_engine(self, text_artifact, image_artifact): - task = VariationImageGenerationTask((text_artifact, image_artifact)) - - with pytest.raises(ValueError): - task.image_generation_engine # noqa: B018 diff --git a/tests/unit/tools/test_aws_iam.py b/tests/unit/tools/test_aws_iam_tool.py similarity index 56% rename from tests/unit/tools/test_aws_iam.py rename to tests/unit/tools/test_aws_iam_tool.py index 54dbaa5fb..fb2b1e381 100644 --- a/tests/unit/tools/test_aws_iam.py +++ b/tests/unit/tools/test_aws_iam_tool.py @@ -1,11 +1,11 @@ import boto3 import pytest -from griptape.tools import AwsIamClient +from griptape.tools import AwsIamTool from tests.utils.aws import mock_aws_credentials -class TestAwsIamClient: +class TestAwsIamTool: @pytest.fixture(autouse=True) def _run_before_and_after_tests(self): mock_aws_credentials() @@ -14,18 +14,18 @@ def test_get_user_policy(self): value = {"user_name": "test_user", "policy_name": "test_policy"} assert ( "error returning policy document" - in AwsIamClient(session=boto3.Session()).get_user_policy({"values": value}).value + in AwsIamTool(session=boto3.Session()).get_user_policy({"values": value}).value ) def test_list_mfa_devices(self): - assert "error listing mfa devices" in AwsIamClient(session=boto3.Session()).list_mfa_devices({}).value + assert "error listing mfa devices" in AwsIamTool(session=boto3.Session()).list_mfa_devices({}).value def test_list_user_policies(self): value = {"user_name": "test_user"} assert ( "error listing iam user policies" - in AwsIamClient(session=boto3.Session()).list_user_policies({"values": value}).value + in AwsIamTool(session=boto3.Session()).list_user_policies({"values": value}).value ) def test_list_users(self): - assert "error listing s3 users" in AwsIamClient(session=boto3.Session()).list_users({}).value + assert "error listing s3 users" in AwsIamTool(session=boto3.Session()).list_users({}).value diff --git a/tests/unit/tools/test_aws_s3.py b/tests/unit/tools/test_aws_s3_tool.py similarity index 58% rename from tests/unit/tools/test_aws_s3.py rename to tests/unit/tools/test_aws_s3_tool.py index 5c6a4c151..9c4c34e0b 100644 --- a/tests/unit/tools/test_aws_s3.py +++ b/tests/unit/tools/test_aws_s3_tool.py @@ -1,42 +1,38 @@ import boto3 import pytest -from griptape.tools import AwsS3Client +from griptape.tools import AwsS3Tool from tests.utils.aws import mock_aws_credentials -class TestAwsS3Client: +class TestAwsS3Tool: @pytest.fixture(autouse=True) def _run_before_and_after_tests(self): mock_aws_credentials() def test_get_bucket_acl(self): value = {"bucket_name": "bucket_test"} - assert ( - "error getting bucket acl" in AwsS3Client(session=boto3.Session()).get_bucket_acl({"values": value}).value - ) + assert "error getting bucket acl" in AwsS3Tool(session=boto3.Session()).get_bucket_acl({"values": value}).value def test_get_bucket_policy(self): value = {"bucket_name": "bucket_test"} assert ( "error getting bucket policy" - in AwsS3Client(session=boto3.Session()).get_bucket_policy({"values": value}).value + in AwsS3Tool(session=boto3.Session()).get_bucket_policy({"values": value}).value ) def test_get_object_acl(self): value = {"bucket_name": "bucket_test", "object_key": "key_test"} - assert ( - "error getting object acl" in AwsS3Client(session=boto3.Session()).get_object_acl({"values": value}).value - ) + assert "error getting object acl" in AwsS3Tool(session=boto3.Session()).get_object_acl({"values": value}).value def test_list_s3_buckets(self): - assert "error listing s3 buckets" in AwsS3Client(session=boto3.Session()).list_s3_buckets({}).value + assert "error listing s3 buckets" in AwsS3Tool(session=boto3.Session()).list_s3_buckets({}).value def test_list_objects(self): value = {"bucket_name": "bucket_test"} assert ( "error listing objects in bucket" - in AwsS3Client(session=boto3.Session()).list_objects({"values": value}).value + in AwsS3Tool(session=boto3.Session()).list_objects({"values": value}).value ) def test_upload_memory_artifacts_to_s3(self): @@ -48,7 +44,7 @@ def test_upload_memory_artifacts_to_s3(self): } assert ( "memory not found" - in AwsS3Client(session=boto3.Session()).upload_memory_artifacts_to_s3({"values": value}).value + in AwsS3Tool(session=boto3.Session()).upload_memory_artifacts_to_s3({"values": value}).value ) def test_upload_content_to_s3(self): @@ -56,13 +52,12 @@ def test_upload_content_to_s3(self): assert ( "error uploading objects" - in AwsS3Client(session=boto3.Session()).upload_content_to_s3({"values": value}).value + in AwsS3Tool(session=boto3.Session()).upload_content_to_s3({"values": value}).value ) def test_download_objects(self): value = {"objects": {"bucket_name": "bucket_test", "object_key": "test.txt"}} assert ( - "error downloading objects" - in AwsS3Client(session=boto3.Session()).download_objects({"values": value}).value + "error downloading objects" in AwsS3Tool(session=boto3.Session()).download_objects({"values": value}).value ) diff --git a/tests/unit/tools/test_calculator.py b/tests/unit/tools/test_calculator.py index 72a525210..e598867f9 100644 --- a/tests/unit/tools/test_calculator.py +++ b/tests/unit/tools/test_calculator.py @@ -1,6 +1,6 @@ -from griptape.tools import Calculator +from griptape.tools import CalculatorTool class TestCalculator: def test_calculate(self): - assert Calculator().calculate({"values": {"expression": "5 * 5"}}).value == "25" + assert CalculatorTool().calculate({"values": {"expression": "5 * 5"}}).value == "25" diff --git a/tests/unit/tools/test_computer.py b/tests/unit/tools/test_computer.py index 95de18ae3..1f6e5c7a6 100644 --- a/tests/unit/tools/test_computer.py +++ b/tests/unit/tools/test_computer.py @@ -1,13 +1,13 @@ import pytest -from griptape.tools import Computer +from griptape.tools import ComputerTool from tests.mocks.docker.fake_api_client import make_fake_client class TestComputer: @pytest.fixture() def computer(self): - return Computer(docker_client=make_fake_client(), install_dependencies_on_init=False) + return ComputerTool(docker_client=make_fake_client(), install_dependencies_on_init=False) def test_execute_code(self, computer): assert computer.execute_code({"values": {"code": "print(1)", "filename": "foo.py"}}).value == "hello world" diff --git a/tests/unit/tools/test_date_time.py b/tests/unit/tools/test_date_time.py index c534ae69b..9fa2ce4bb 100644 --- a/tests/unit/tools/test_date_time.py +++ b/tests/unit/tools/test_date_time.py @@ -1,28 +1,28 @@ from datetime import datetime -from griptape.tools import DateTime +from griptape.tools import DateTimeTool class TestDateTime: def test_get_current_datetime(self): - result = DateTime().get_current_datetime({}) + result = DateTimeTool().get_current_datetime({}) time_delta = datetime.strptime(result.value, "%Y-%m-%d %H:%M:%S.%f") - datetime.now() assert abs(time_delta.total_seconds()) <= 1000 def test_get_past_relative_datetime(self): - result = DateTime().get_relative_datetime({"values": {"relative_date_string": "5 min ago"}}) + result = DateTimeTool().get_relative_datetime({"values": {"relative_date_string": "5 min ago"}}) time_delta = datetime.strptime(result.value, "%Y-%m-%d %H:%M:%S.%f") - datetime.now() assert abs(time_delta.total_seconds()) <= 1000 - result = DateTime().get_relative_datetime({"values": {"relative_date_string": "2 min ago, 12 seconds"}}) + result = DateTimeTool().get_relative_datetime({"values": {"relative_date_string": "2 min ago, 12 seconds"}}) time_delta = datetime.strptime(result.value, "%Y-%m-%d %H:%M:%S.%f") - datetime.now() assert abs(time_delta.total_seconds()) <= 1000 def test_get_future_relative_datetime(self): - result = DateTime().get_relative_datetime({"values": {"relative_date_string": "in 1 min, 36 seconds"}}) + result = DateTimeTool().get_relative_datetime({"values": {"relative_date_string": "in 1 min, 36 seconds"}}) time_delta = datetime.strptime(result.value, "%Y-%m-%d %H:%M:%S.%f") - datetime.now() assert abs(time_delta.total_seconds()) <= 1000 def test_get_invalid_relative_datetime(self): - result = DateTime().get_relative_datetime({"values": {"relative_date_string": "3 days from now"}}) + result = DateTimeTool().get_relative_datetime({"values": {"relative_date_string": "3 days from now"}}) assert result.type == "ErrorArtifact" diff --git a/tests/unit/tools/test_email_client.py b/tests/unit/tools/test_email_tool.py similarity index 93% rename from tests/unit/tools/test_email_client.py rename to tests/unit/tools/test_email_tool.py index cf99009b8..6c0f7cbd7 100644 --- a/tests/unit/tools/test_email_client.py +++ b/tests/unit/tools/test_email_tool.py @@ -2,14 +2,14 @@ from griptape.artifacts import ErrorArtifact, InfoArtifact, ListArtifact, TextArtifact from griptape.loaders.email_loader import EmailLoader -from griptape.tools import EmailClient +from griptape.tools import EmailTool -class TestEmailClient: +class TestEmailTool: @pytest.fixture(autouse=True) def mock_email_loader(self, mocker): mock_email_loader = mocker.patch( - "griptape.tools.email_client.tool.EmailLoader", + "griptape.tools.email.tool.EmailLoader", EmailQuery=EmailLoader.EmailQuery, # Prevents mocking the nested EmailQuery class ).return_value mock_email_loader.load.return_value = ListArtifact([TextArtifact("fake-email-content")]) @@ -29,7 +29,7 @@ def mock_smtp_ssl(self, mocker): @pytest.fixture() def client(self): - return EmailClient( + return EmailTool( username="fake-username", password="fake-password", smtp_host="foobar.com", @@ -63,7 +63,7 @@ def test_retrieve(self, client, mock_email_loader, values, query): def test_retrieve_when_email_max_retrieve_count_set(self, mock_email_loader): # Given - client = EmailClient(email_max_retrieve_count=84, mailboxes={"INBOX": "default mailbox for incoming email"}) + client = EmailTool(email_max_retrieve_count=84, mailboxes={"INBOX": "default mailbox for incoming email"}) # When client.retrieve({"values": {"label": "fake-label"}}) @@ -91,7 +91,7 @@ def test_send(self, client, send_params): def test_send_when_smtp_overrides_set(self, send_params): # Given - client = EmailClient( + client = EmailTool( smtp_host="smtp-host", smtp_port=86, smtp_use_ssl=False, diff --git a/tests/unit/tools/test_extraction_tool.py b/tests/unit/tools/test_extraction_tool.py new file mode 100644 index 000000000..1219da373 --- /dev/null +++ b/tests/unit/tools/test_extraction_tool.py @@ -0,0 +1,67 @@ +import json + +import pytest + +from griptape.artifacts import TextArtifact +from griptape.engines import CsvExtractionEngine, JsonExtractionEngine +from griptape.tools import ExtractionTool +from tests.mocks.mock_prompt_driver import MockPromptDriver +from tests.utils import defaults + + +class TestExtractionTool: + @pytest.fixture() + def json_tool(self): + return ExtractionTool( + input_memory=[defaults.text_task_memory("TestMemory")], + extraction_engine=JsonExtractionEngine( + prompt_driver=MockPromptDriver( + mock_output='[{"test_key_1": "test_value_1"}, {"test_key_2": "test_value_2"}]' + ), + template_schema={}, + ), + ) + + @pytest.fixture() + def csv_tool(self): + return ExtractionTool( + input_memory=[defaults.text_task_memory("TestMemory")], + extraction_engine=CsvExtractionEngine( + prompt_driver=MockPromptDriver(), + column_names=["test1"], + ), + ) + + def test_json_extract_artifacts(self, json_tool): + json_tool.input_memory[0].store_artifact("foo", TextArtifact(json.dumps({}))) + + result = json_tool.extract( + {"values": {"data": {"memory_name": json_tool.input_memory[0].name, "artifact_namespace": "foo"}}} + ) + + assert len(result.value) == 2 + assert result.value[0].value == '{"test_key_1": "test_value_1"}' + assert result.value[1].value == '{"test_key_2": "test_value_2"}' + + def test_json_extract_content(self, json_tool): + result = json_tool.extract({"values": {"data": "foo"}}) + + assert len(result.value) == 2 + assert result.value[0].value == '{"test_key_1": "test_value_1"}' + assert result.value[1].value == '{"test_key_2": "test_value_2"}' + + def test_csv_extract_artifacts(self, csv_tool): + csv_tool.input_memory[0].store_artifact("foo", TextArtifact("foo,bar\nbaz,maz")) + + result = csv_tool.extract( + {"values": {"data": {"memory_name": csv_tool.input_memory[0].name, "artifact_namespace": "foo"}}} + ) + + assert len(result.value) == 1 + assert result.value[0].value == {"test1": "mock output"} + + def test_csv_extract_content(self, csv_tool): + result = csv_tool.extract({"values": {"data": "foo"}}) + + assert len(result.value) == 1 + assert result.value[0].value == {"test1": "mock output"} diff --git a/tests/unit/tools/test_file_manager.py b/tests/unit/tools/test_file_manager.py index 57dd2c83e..dccf2f1a2 100644 --- a/tests/unit/tools/test_file_manager.py +++ b/tests/unit/tools/test_file_manager.py @@ -9,14 +9,14 @@ from griptape.artifacts.error_artifact import ErrorArtifact from griptape.drivers.file_manager.local_file_manager_driver import LocalFileManagerDriver from griptape.loaders.text_loader import TextLoader -from griptape.tools import FileManager +from griptape.tools import FileManagerTool from tests.utils import defaults class TestFileManager: @pytest.fixture() def file_manager(self): - return FileManager( + return FileManagerTool( input_memory=[defaults.text_task_memory("Memory1")], file_manager_driver=LocalFileManagerDriver(workdir=os.path.abspath(os.path.dirname(__file__))), ) @@ -47,7 +47,7 @@ def test_load_files_from_disk_with_encoding(self, file_manager): assert isinstance(result.value[0], TextArtifact) def test_load_files_from_disk_with_encoding_failure(self): - file_manager = FileManager( + file_manager = FileManagerTool( file_manager_driver=LocalFileManagerDriver( default_loader=TextLoader(encoding="utf-8"), loaders={}, @@ -65,7 +65,9 @@ def test_save_memory_artifacts_to_disk_for_one_artifact(self, temp_dir): memory.store_artifact("foobar", artifact) - file_manager = FileManager(input_memory=[memory], file_manager_driver=LocalFileManagerDriver(workdir=temp_dir)) + file_manager = FileManagerTool( + input_memory=[memory], file_manager_driver=LocalFileManagerDriver(workdir=temp_dir) + ) result = file_manager.save_memory_artifacts_to_disk( { "values": { @@ -88,7 +90,9 @@ def test_save_memory_artifacts_to_disk_for_multiple_artifacts(self, temp_dir): for a in artifacts: memory.store_artifact("foobar", a) - file_manager = FileManager(input_memory=[memory], file_manager_driver=LocalFileManagerDriver(workdir=temp_dir)) + file_manager = FileManagerTool( + input_memory=[memory], file_manager_driver=LocalFileManagerDriver(workdir=temp_dir) + ) result = file_manager.save_memory_artifacts_to_disk( { "values": { @@ -105,7 +109,7 @@ def test_save_memory_artifacts_to_disk_for_multiple_artifacts(self, temp_dir): assert result.value == "Successfully saved memory artifacts to disk" def test_save_content_to_file(self, temp_dir): - file_manager = FileManager(file_manager_driver=LocalFileManagerDriver(workdir=temp_dir)) + file_manager = FileManagerTool(file_manager_driver=LocalFileManagerDriver(workdir=temp_dir)) result = file_manager.save_content_to_file( {"values": {"path": os.path.join("test", "foobar.txt"), "content": "foobar"}} ) @@ -114,7 +118,7 @@ def test_save_content_to_file(self, temp_dir): assert result.value == "Successfully saved file" def test_save_content_to_file_with_encoding(self, temp_dir): - file_manager = FileManager( + file_manager = FileManagerTool( file_manager_driver=LocalFileManagerDriver(default_loader=TextLoader(encoding="utf-8"), workdir=temp_dir) ) result = file_manager.save_content_to_file( @@ -125,7 +129,7 @@ def test_save_content_to_file_with_encoding(self, temp_dir): assert result.value == "Successfully saved file" def test_save_and_load_content_to_file_with_encoding(self, temp_dir): - file_manager = FileManager( + file_manager = FileManagerTool( file_manager_driver=LocalFileManagerDriver(loaders={"txt": TextLoader(encoding="ascii")}, workdir=temp_dir) ) result = file_manager.save_content_to_file( @@ -135,7 +139,7 @@ def test_save_and_load_content_to_file_with_encoding(self, temp_dir): assert Path(os.path.join(temp_dir, "test", "foobar.txt")).read_text() == "foobar" assert result.value == "Successfully saved file" - file_manager = FileManager( + file_manager = FileManagerTool( file_manager_driver=LocalFileManagerDriver( default_loader=TextLoader(encoding="ascii"), loaders={}, workdir=temp_dir ) diff --git a/tests/unit/tools/test_google_docs_client.py b/tests/unit/tools/test_google_docs_tool.py similarity index 84% rename from tests/unit/tools/test_google_docs_client.py rename to tests/unit/tools/test_google_docs_tool.py index a42fddda3..516961c61 100644 --- a/tests/unit/tools/test_google_docs_client.py +++ b/tests/unit/tools/test_google_docs_tool.py @@ -1,12 +1,12 @@ import pytest -class TestGoogleDocsClient: +class TestGoogleDocsTool: @pytest.fixture() def mock_docs_client(self): - from griptape.tools import GoogleDocsClient + from griptape.tools import GoogleDocsTool - return GoogleDocsClient(owner_email="tony@griptape.ai", service_account_credentials={}) + return GoogleDocsTool(owner_email="tony@griptape.ai", service_account_credentials={}) def test_append_text(self, mock_docs_client): params = {"file_path": "test_folder/test_document", "text": "Appending this text"} diff --git a/tests/unit/tools/test_google_drive_client.py b/tests/unit/tools/test_google_drive_tool.py similarity index 68% rename from tests/unit/tools/test_google_drive_client.py rename to tests/unit/tools/test_google_drive_tool.py index 55f3c168f..55eae2267 100644 --- a/tests/unit/tools/test_google_drive_client.py +++ b/tests/unit/tools/test_google_drive_tool.py @@ -1,11 +1,11 @@ from griptape.artifacts import ErrorArtifact -from griptape.tools import GoogleDriveClient +from griptape.tools import GoogleDriveTool -class TestGoogleDriveClient: +class TestGoogleDriveTool: def test_list_files(self): value = {"folder_path": "root"} # This can be any folder path you want to test - result = GoogleDriveClient(owner_email="tony@griptape.ai", service_account_credentials={}).list_files( + result = GoogleDriveTool(owner_email="tony@griptape.ai", service_account_credentials={}).list_files( {"values": value} ) @@ -14,16 +14,16 @@ def test_list_files(self): def test_save_content_to_drive(self): value = {"path": "/path/to/your/file.txt", "content": "Sample content for the file."} - result = GoogleDriveClient( - owner_email="tony@griptape.ai", service_account_credentials={} - ).save_content_to_drive({"values": value}) + result = GoogleDriveTool(owner_email="tony@griptape.ai", service_account_credentials={}).save_content_to_drive( + {"values": value} + ) assert isinstance(result, ErrorArtifact) assert "error saving file to Google Drive" in result.value def test_download_files(self): value = {"file_paths": ["example_folder/example_file.txt"]} - result = GoogleDriveClient(owner_email="tony@griptape.ai", service_account_credentials={}).download_files( + result = GoogleDriveTool(owner_email="tony@griptape.ai", service_account_credentials={}).download_files( {"values": value} ) @@ -33,7 +33,7 @@ def test_download_files(self): def test_search_files(self): value = {"search_mode": "name", "file_name": "search_file_name.txt"} - result = GoogleDriveClient(owner_email="tony@griptape.ai", service_account_credentials={}).search_files( + result = GoogleDriveTool(owner_email="tony@griptape.ai", service_account_credentials={}).search_files( {"values": value} ) @@ -43,7 +43,7 @@ def test_search_files(self): def test_share_file(self): value = {"file_path": "/path/to/your/file.txt", "email_address": "sample_email@example.com", "role": "reader"} - result = GoogleDriveClient(owner_email="tony@griptape.ai", service_account_credentials={}).share_file( + result = GoogleDriveTool(owner_email="tony@griptape.ai", service_account_credentials={}).share_file( {"values": value} ) diff --git a/tests/unit/tools/test_google_gmail_client.py b/tests/unit/tools/test_google_gmail_tool.py similarity index 61% rename from tests/unit/tools/test_google_gmail_client.py rename to tests/unit/tools/test_google_gmail_tool.py index 7dcf1de38..ace7ef0ba 100644 --- a/tests/unit/tools/test_google_gmail_client.py +++ b/tests/unit/tools/test_google_gmail_tool.py @@ -1,12 +1,12 @@ -from griptape.tools import GoogleGmailClient +from griptape.tools import GoogleGmailTool -class TestGoogleGmailClient: +class TestGoogleGmailTool: def test_create_draft_email(self): value = {"subject": "stacey's mom", "from": "test@test.com", "body": "got it going on"} assert ( "error creating draft email" - in GoogleGmailClient(service_account_credentials={}, owner_email="tony@griptape.ai") + in GoogleGmailTool(service_account_credentials={}, owner_email="tony@griptape.ai") .create_draft_email({"values": value}) .value ) diff --git a/tests/unit/tools/test_griptape_cloud_knowledge_base_client.py b/tests/unit/tools/test_griptape_cloud_knowledge_base_tool.py similarity index 83% rename from tests/unit/tools/test_griptape_cloud_knowledge_base_client.py rename to tests/unit/tools/test_griptape_cloud_knowledge_base_tool.py index 7d75d8670..b98713273 100644 --- a/tests/unit/tools/test_griptape_cloud_knowledge_base_client.py +++ b/tests/unit/tools/test_griptape_cloud_knowledge_base_tool.py @@ -4,10 +4,10 @@ from griptape.artifacts import ErrorArtifact, TextArtifact -class TestGriptapeCloudKnowledgeBaseClient: +class TestGriptapeCloudKnowledgeBaseTool: @pytest.fixture() def client(self, mocker): - from griptape.tools import GriptapeCloudKnowledgeBaseClient + from griptape.tools import GriptapeCloudKnowledgeBaseTool mock_response = mocker.Mock() mock_response.status_code = 201 @@ -19,45 +19,45 @@ def client(self, mocker): mock_response.json.return_value = {"description": "fizz buzz"} mocker.patch("requests.get", return_value=mock_response) - return GriptapeCloudKnowledgeBaseClient( + return GriptapeCloudKnowledgeBaseTool( base_url="https://api.griptape.ai", api_key="foo bar", knowledge_base_id="1" ) @pytest.fixture() def client_no_description(self, mocker): - from griptape.tools import GriptapeCloudKnowledgeBaseClient + from griptape.tools import GriptapeCloudKnowledgeBaseTool mock_response = mocker.Mock() mock_response.json.return_value = {} mock_response.status_code = 200 mocker.patch("requests.get", return_value=mock_response) - return GriptapeCloudKnowledgeBaseClient( + return GriptapeCloudKnowledgeBaseTool( base_url="https://api.griptape.ai", api_key="foo bar", knowledge_base_id="1" ) @pytest.fixture() def client_kb_not_found(self, mocker): - from griptape.tools import GriptapeCloudKnowledgeBaseClient + from griptape.tools import GriptapeCloudKnowledgeBaseTool mock_response = mocker.Mock() mock_response.json.return_value = {} mock_response.status_code = 404 mocker.patch("requests.get", return_value=mock_response) - return GriptapeCloudKnowledgeBaseClient( + return GriptapeCloudKnowledgeBaseTool( base_url="https://api.griptape.ai", api_key="foo bar", knowledge_base_id="1" ) @pytest.fixture() def client_kb_error(self, mocker): - from griptape.tools import GriptapeCloudKnowledgeBaseClient + from griptape.tools import GriptapeCloudKnowledgeBaseTool mock_response = mocker.Mock() mock_response.status_code = 500 mocker.patch("requests.post", return_value=mock_response, side_effect=exceptions.RequestException("error")) - return GriptapeCloudKnowledgeBaseClient( + return GriptapeCloudKnowledgeBaseTool( base_url="https://api.griptape.ai", api_key="foo bar", knowledge_base_id="1" ) @@ -75,7 +75,7 @@ def test_get_knowledge_base_description(self, client): assert client._get_knowledge_base_description() == "foo bar" def test_get_knowledge_base_description_error(self, client_no_description): - exception_match_text = f"No description found for Knowledge Base {client_no_description.knowledge_base_id}. Please set a description, or manually set the `GriptapeCloudKnowledgeBaseClient.description` attribute." + exception_match_text = f"No description found for Knowledge Base {client_no_description.knowledge_base_id}. Please set a description, or manually set the `GriptapeCloudKnowledgeBaseTool.description` attribute." with pytest.raises(ValueError, match=exception_match_text): client_no_description._get_knowledge_base_description() diff --git a/tests/unit/tools/test_inpainting_image_generation_client.py b/tests/unit/tools/test_inpainting_image_generation_tool.py similarity index 87% rename from tests/unit/tools/test_inpainting_image_generation_client.py rename to tests/unit/tools/test_inpainting_image_generation_tool.py index 0c5e49f9a..45afcbc63 100644 --- a/tests/unit/tools/test_inpainting_image_generation_client.py +++ b/tests/unit/tools/test_inpainting_image_generation_tool.py @@ -6,10 +6,10 @@ import pytest from griptape.artifacts import ImageArtifact -from griptape.tools import InpaintingImageGenerationClient +from griptape.tools import InpaintingImageGenerationTool -class TestInpaintingImageGenerationClient: +class TestInpaintingImageGenerationTool: @pytest.fixture() def image_artifact(self) -> ImageArtifact: return ImageArtifact(value=b"image_data", format="png", width=512, height=512, name="name") @@ -26,12 +26,12 @@ def image_loader(self) -> Mock: return loader @pytest.fixture() - def image_generator(self, image_generation_engine, image_loader) -> InpaintingImageGenerationClient: - return InpaintingImageGenerationClient(engine=image_generation_engine, image_loader=image_loader) + def image_generator(self, image_generation_engine, image_loader) -> InpaintingImageGenerationTool: + return InpaintingImageGenerationTool(engine=image_generation_engine, image_loader=image_loader) def test_validate_output_configs(self, image_generation_engine) -> None: with pytest.raises(ValueError): - InpaintingImageGenerationClient(engine=image_generation_engine, output_dir="test", output_file="test") + InpaintingImageGenerationTool(engine=image_generation_engine, output_dir="test", output_file="test") def test_image_inpainting(self, image_generator, path_from_resource_path) -> None: image_generator.engine.run.return_value = Mock( @@ -55,7 +55,7 @@ def test_image_inpainting_with_outfile( self, image_generation_engine, image_loader, path_from_resource_path ) -> None: outfile = f"{tempfile.gettempdir()}/{str(uuid.uuid4())}.png" - image_generator = InpaintingImageGenerationClient( + image_generator = InpaintingImageGenerationTool( engine=image_generation_engine, output_file=outfile, image_loader=image_loader ) @@ -78,7 +78,7 @@ def test_image_inpainting_with_outfile( assert os.path.exists(outfile) def test_image_inpainting_from_memory(self, image_generation_engine, image_artifact): - image_generator = InpaintingImageGenerationClient(engine=image_generation_engine) + image_generator = InpaintingImageGenerationTool(engine=image_generation_engine) memory = Mock() memory.load_artifacts = Mock(return_value=[image_artifact]) image_generator.find_input_memory = Mock(return_value=memory) diff --git a/tests/unit/tools/test_openweather_client.py b/tests/unit/tools/test_openweather_tool.py similarity index 92% rename from tests/unit/tools/test_openweather_client.py rename to tests/unit/tools/test_openweather_tool.py index 89b80e164..44acaf571 100644 --- a/tests/unit/tools/test_openweather_client.py +++ b/tests/unit/tools/test_openweather_tool.py @@ -3,12 +3,12 @@ import pytest from griptape.artifacts import ErrorArtifact -from griptape.tools import OpenWeatherClient +from griptape.tools import OpenWeatherTool @pytest.fixture() def client(): - return OpenWeatherClient(api_key="YOUR_API_KEY") + return OpenWeatherTool(api_key="YOUR_API_KEY") class MockResponse: @@ -21,9 +21,9 @@ def json(self): def mock_requests_get(*args, **kwargs): - if args[0] == OpenWeatherClient.GEOCODING_URL: + if args[0] == OpenWeatherTool.GEOCODING_URL: return MockResponse([{"lat": 40.7128, "lon": -74.0061}], 200) - elif args[0] == OpenWeatherClient.BASE_URL: + elif args[0] == OpenWeatherTool.BASE_URL: return MockResponse({"weather": "sunny"}, 200) return MockResponse(None, 404) diff --git a/tests/unit/tools/test_outpainting_image_variation_client.py b/tests/unit/tools/test_outpainting_image_variation_tool.py similarity index 87% rename from tests/unit/tools/test_outpainting_image_variation_client.py rename to tests/unit/tools/test_outpainting_image_variation_tool.py index 13d8df082..4fbcbe8d4 100644 --- a/tests/unit/tools/test_outpainting_image_variation_client.py +++ b/tests/unit/tools/test_outpainting_image_variation_tool.py @@ -6,10 +6,10 @@ import pytest from griptape.artifacts import ImageArtifact -from griptape.tools import OutpaintingImageGenerationClient +from griptape.tools import OutpaintingImageGenerationTool -class TestOutpaintingImageGenerationClient: +class TestOutpaintingImageGenerationTool: @pytest.fixture() def image_artifact(self) -> ImageArtifact: return ImageArtifact(value=b"image_data", format="png", width=512, height=512, name="name") @@ -26,12 +26,12 @@ def image_loader(self, image_artifact) -> Mock: return loader @pytest.fixture() - def image_generator(self, image_generation_engine, image_loader) -> OutpaintingImageGenerationClient: - return OutpaintingImageGenerationClient(engine=image_generation_engine, image_loader=image_loader) + def image_generator(self, image_generation_engine, image_loader) -> OutpaintingImageGenerationTool: + return OutpaintingImageGenerationTool(engine=image_generation_engine, image_loader=image_loader) def test_validate_output_configs(self, image_generation_engine) -> None: with pytest.raises(ValueError): - OutpaintingImageGenerationClient(engine=image_generation_engine, output_dir="test", output_file="test") + OutpaintingImageGenerationTool(engine=image_generation_engine, output_dir="test", output_file="test") def test_image_outpainting(self, image_generator, path_from_resource_path) -> None: image_generator.engine.run.return_value = Mock( @@ -55,7 +55,7 @@ def test_image_outpainting_with_outfile( self, image_generation_engine, image_loader, path_from_resource_path ) -> None: outfile = f"{tempfile.gettempdir()}/{str(uuid.uuid4())}.png" - image_generator = OutpaintingImageGenerationClient( + image_generator = OutpaintingImageGenerationTool( engine=image_generation_engine, output_file=outfile, image_loader=image_loader ) @@ -78,7 +78,7 @@ def test_image_outpainting_with_outfile( assert os.path.exists(outfile) def test_image_outpainting_from_memory(self, image_generation_engine, image_artifact): - image_generator = OutpaintingImageGenerationClient(engine=image_generation_engine) + image_generator = OutpaintingImageGenerationTool(engine=image_generation_engine) memory = Mock() memory.load_artifacts = Mock(return_value=[image_artifact]) image_generator.find_input_memory = Mock(return_value=memory) diff --git a/tests/unit/tools/test_prompt_image_generation_client.py b/tests/unit/tools/test_prompt_image_generation_tool.py similarity index 77% rename from tests/unit/tools/test_prompt_image_generation_client.py rename to tests/unit/tools/test_prompt_image_generation_tool.py index 276e33473..a0c5c7037 100644 --- a/tests/unit/tools/test_prompt_image_generation_client.py +++ b/tests/unit/tools/test_prompt_image_generation_tool.py @@ -5,21 +5,21 @@ import pytest -from griptape.tools import PromptImageGenerationClient +from griptape.tools import PromptImageGenerationTool -class TestPromptImageGenerationClient: +class TestPromptImageGenerationTool: @pytest.fixture() def image_generation_engine(self) -> Mock: return Mock() @pytest.fixture() - def image_generator(self, image_generation_engine) -> PromptImageGenerationClient: - return PromptImageGenerationClient(engine=image_generation_engine) + def image_generator(self, image_generation_engine) -> PromptImageGenerationTool: + return PromptImageGenerationTool(engine=image_generation_engine) def test_validate_output_configs(self, image_generation_engine) -> None: with pytest.raises(ValueError): - PromptImageGenerationClient(engine=image_generation_engine, output_dir="test", output_file="test") + PromptImageGenerationTool(engine=image_generation_engine, output_dir="test", output_file="test") def test_generate_image(self, image_generator) -> None: image_generator.engine.run.return_value = Mock( @@ -34,7 +34,7 @@ def test_generate_image(self, image_generator) -> None: def test_generate_image_with_outfile(self, image_generation_engine) -> None: outfile = f"{tempfile.gettempdir()}/{str(uuid.uuid4())}.png" - image_generator = PromptImageGenerationClient(engine=image_generation_engine, output_file=outfile) + image_generator = PromptImageGenerationTool(engine=image_generation_engine, output_file=outfile) image_generator.engine.run.return_value = Mock( # pyright: ignore[reportFunctionMemberAccess] value=b"image data", format="png", width=512, height=512, model="test model", prompt="test prompt" diff --git a/tests/unit/tools/test_prompt_summary_tool.py b/tests/unit/tools/test_prompt_summary_tool.py new file mode 100644 index 000000000..81a03acf5 --- /dev/null +++ b/tests/unit/tools/test_prompt_summary_tool.py @@ -0,0 +1,29 @@ +import pytest + +from griptape.artifacts import TextArtifact +from griptape.engines import PromptSummaryEngine +from griptape.tools import PromptSummaryTool +from tests.mocks.mock_prompt_driver import MockPromptDriver +from tests.utils import defaults + + +class TestPromptSummaryTool: + @pytest.fixture() + def tool(self): + return PromptSummaryTool( + input_memory=[defaults.text_task_memory("TestMemory")], + prompt_summary_engine=PromptSummaryEngine(prompt_driver=MockPromptDriver()), + ) + + def test_summarize_artifacts(self, tool): + tool.input_memory[0].store_artifact("foo", TextArtifact("test")) + + assert ( + tool.summarize( + {"values": {"summary": {"memory_name": tool.input_memory[0].name, "artifact_namespace": "foo"}}} + ).value + == "mock output" + ) + + def test_summarize_content(self, tool): + assert tool.summarize({"values": {"summary": "test"}}).value == "mock output" diff --git a/tests/unit/tools/test_query_tool.py b/tests/unit/tools/test_query_tool.py new file mode 100644 index 000000000..dcbee16cf --- /dev/null +++ b/tests/unit/tools/test_query_tool.py @@ -0,0 +1,31 @@ +import pytest + +from griptape.tools.query.tool import QueryTool +from tests.utils import defaults + + +class TestQueryTool: + @pytest.fixture() + def tool(self): + return QueryTool(input_memory=[defaults.text_task_memory("TestMemory")]) + + def test_query_str(self, tool): + assert tool.query({"values": {"query": "test", "content": "foo"}}).value[0].value == "mock output" + + def test_query_artifacts(self, tool): + assert ( + tool.query( + { + "values": { + "query": "test", + "content": { + "memory_name": tool.input_memory[0].name, + "artifact_namespace": "test", + }, + } + } + ) + .value[0] + .value + == "mock output" + ) diff --git a/tests/unit/tools/test_rag_client.py b/tests/unit/tools/test_rag_tool.py similarity index 57% rename from tests/unit/tools/test_rag_client.py rename to tests/unit/tools/test_rag_tool.py index 9c6497b02..eb1c00e4c 100644 --- a/tests/unit/tools/test_rag_client.py +++ b/tests/unit/tools/test_rag_tool.py @@ -1,13 +1,13 @@ from griptape.drivers import LocalVectorStoreDriver -from griptape.tools import RagClient +from griptape.tools import RagTool from tests.mocks.mock_embedding_driver import MockEmbeddingDriver from tests.mocks.mock_prompt_driver import MockPromptDriver from tests.utils.defaults import rag_engine -class TestRagClient: +class TestRagTool: def test_search(self): vector_store_driver = LocalVectorStoreDriver(embedding_driver=MockEmbeddingDriver()) - tool = RagClient(description="Test", rag_engine=rag_engine(MockPromptDriver(), vector_store_driver)) + tool = RagTool(description="Test", rag_engine=rag_engine(MockPromptDriver(), vector_store_driver)) - assert tool.search({"values": {"query": "test"}}).value == "mock output" + assert tool.search({"values": {"query": "test"}}).value[0].value == "mock output" diff --git a/tests/unit/tools/test_rest_api_client.py b/tests/unit/tools/test_rest_api_tool.py similarity index 92% rename from tests/unit/tools/test_rest_api_client.py rename to tests/unit/tools/test_rest_api_tool.py index 58f21d1f1..70d63478e 100644 --- a/tests/unit/tools/test_rest_api_client.py +++ b/tests/unit/tools/test_rest_api_tool.py @@ -6,9 +6,9 @@ class TestRestApi: @pytest.fixture() def client(self): - from griptape.tools import RestApiClient + from griptape.tools import RestApiTool - return RestApiClient(base_url="http://www.griptape.ai", description="Griptape website.") + return RestApiTool(base_url="http://www.griptape.ai", description="Griptape website.") def test_put(self, client): assert isinstance(client.post({"values": {"body": {}}}), BaseArtifact) diff --git a/tests/unit/tools/test_sql_client.py b/tests/unit/tools/test_sql_tool.py similarity index 87% rename from tests/unit/tools/test_sql_client.py rename to tests/unit/tools/test_sql_tool.py index 8ab61fc8f..2ef50ff54 100644 --- a/tests/unit/tools/test_sql_client.py +++ b/tests/unit/tools/test_sql_tool.py @@ -4,10 +4,10 @@ from griptape.drivers import SqlDriver from griptape.loaders import SqlLoader -from griptape.tools import SqlClient +from griptape.tools import SqlTool -class TestSqlClient: +class TestSqlTool: @pytest.fixture() def driver(self): new_driver = SqlDriver(engine_url="sqlite:///:memory:") @@ -22,14 +22,14 @@ def driver(self): def test_execute_query(self, driver): with sqlite3.connect(":memory:"): - client = SqlClient(sql_loader=SqlLoader(sql_driver=driver), table_name="test_table", engine_name="sqlite") + client = SqlTool(sql_loader=SqlLoader(sql_driver=driver), table_name="test_table", engine_name="sqlite") result = client.execute_query({"values": {"sql_query": "SELECT * from test_table;"}}) assert len(result.value) == 1 assert result.value[0].value == {"id": 1, "name": "Alice", "age": 25, "city": "New York"} def test_execute_query_description(self, driver): - client = SqlClient( + client = SqlTool( sql_loader=SqlLoader(sql_driver=driver), table_name="test_table", table_description="foobar", diff --git a/tests/unit/tools/test_structure_run_client.py b/tests/unit/tools/test_structure_run_tool.py similarity index 66% rename from tests/unit/tools/test_structure_run_client.py rename to tests/unit/tools/test_structure_run_tool.py index d498b7c56..f62cdeea7 100644 --- a/tests/unit/tools/test_structure_run_client.py +++ b/tests/unit/tools/test_structure_run_tool.py @@ -2,17 +2,15 @@ from griptape.drivers.structure_run.local_structure_run_driver import LocalStructureRunDriver from griptape.structures import Agent -from griptape.tools import StructureRunClient -from tests.mocks.mock_prompt_driver import MockPromptDriver +from griptape.tools import StructureRunTool -class TestStructureRunClient: +class TestStructureRunTool: @pytest.fixture() def client(self): - driver = MockPromptDriver() - agent = Agent(prompt_driver=driver) + agent = Agent() - return StructureRunClient( + return StructureRunTool( description="foo bar", driver=LocalStructureRunDriver(structure_factory_fn=lambda: agent) ) diff --git a/tests/unit/tools/test_task_memory_client.py b/tests/unit/tools/test_task_memory_client.py deleted file mode 100644 index 4276b89ec..000000000 --- a/tests/unit/tools/test_task_memory_client.py +++ /dev/null @@ -1,29 +0,0 @@ -import pytest - -from griptape.artifacts import TextArtifact -from griptape.tools import TaskMemoryClient -from tests.utils import defaults - - -class TestTaskMemoryClient: - @pytest.fixture() - def tool(self): - return TaskMemoryClient(off_prompt=True, input_memory=[defaults.text_task_memory("TestMemory")]) - - def test_summarize(self, tool): - tool.input_memory[0].store_artifact("foo", TextArtifact("test")) - - assert ( - tool.summarize({"values": {"memory_name": tool.input_memory[0].name, "artifact_namespace": "foo"}}).value - == "mock output" - ) - - def test_query(self, tool): - tool.input_memory[0].store_artifact("foo", TextArtifact("test")) - - assert ( - tool.query( - {"values": {"query": "foobar", "memory_name": tool.input_memory[0].name, "artifact_namespace": "foo"}} - ).value - == "mock output" - ) diff --git a/tests/unit/tools/test_text_to_speech_client.py b/tests/unit/tools/test_text_to_speech_tool.py similarity index 74% rename from tests/unit/tools/test_text_to_speech_client.py rename to tests/unit/tools/test_text_to_speech_tool.py index 0b9061aa6..8821d48fc 100644 --- a/tests/unit/tools/test_text_to_speech_client.py +++ b/tests/unit/tools/test_text_to_speech_tool.py @@ -5,21 +5,21 @@ import pytest -from griptape.tools.text_to_speech_client.tool import TextToSpeechClient +from griptape.tools.text_to_speech.tool import TextToSpeechTool -class TestTextToSpeechClient: +class TestTextToSpeechTool: @pytest.fixture() def text_to_speech_engine(self) -> Mock: return Mock() @pytest.fixture() - def text_to_speech_client(self, text_to_speech_engine) -> TextToSpeechClient: - return TextToSpeechClient(engine=text_to_speech_engine) + def text_to_speech_client(self, text_to_speech_engine) -> TextToSpeechTool: + return TextToSpeechTool(engine=text_to_speech_engine) def test_validate_output_configs(self, text_to_speech_engine) -> None: with pytest.raises(ValueError): - TextToSpeechClient(engine=text_to_speech_engine, output_dir="test", output_file="test") + TextToSpeechTool(engine=text_to_speech_engine, output_dir="test", output_file="test") def test_text_to_speech(self, text_to_speech_client) -> None: text_to_speech_client.engine.run.return_value = Mock(value=b"audio data", format="mp3") @@ -30,7 +30,7 @@ def test_text_to_speech(self, text_to_speech_client) -> None: def test_text_to_speech_with_outfile(self, text_to_speech_engine) -> None: outfile = f"{tempfile.gettempdir()}/{str(uuid.uuid4())}.mp3" - text_to_speech_client = TextToSpeechClient(engine=text_to_speech_engine, output_file=outfile) + text_to_speech_client = TextToSpeechTool(engine=text_to_speech_engine, output_file=outfile) text_to_speech_client.engine.run.return_value = Mock(value=b"audio data", format="mp3") # pyright: ignore[reportFunctionMemberAccess] diff --git a/tests/unit/tools/test_transcription_client.py b/tests/unit/tools/test_transcription_tool.py similarity index 83% rename from tests/unit/tools/test_transcription_client.py rename to tests/unit/tools/test_transcription_tool.py index 8b54e891b..07368495f 100644 --- a/tests/unit/tools/test_transcription_client.py +++ b/tests/unit/tools/test_transcription_tool.py @@ -3,10 +3,10 @@ import pytest from griptape.artifacts import AudioArtifact -from griptape.tools.audio_transcription_client.tool import AudioTranscriptionClient +from griptape.tools.audio_transcription.tool import AudioTranscriptionTool -class TestTranscriptionClient: +class TestTranscriptionTool: @pytest.fixture() def transcription_engine(self) -> Mock: return Mock() @@ -27,11 +27,11 @@ def mock_path(self, mocker) -> Mock: return mocker def test_init_transcription_client(self, transcription_engine, audio_loader) -> None: - assert AudioTranscriptionClient(engine=transcription_engine, audio_loader=audio_loader) + assert AudioTranscriptionTool(engine=transcription_engine, audio_loader=audio_loader) @patch("builtins.open", mock_open(read_data=b"audio data")) def test_transcribe_audio_from_disk(self, transcription_engine, audio_loader) -> None: - client = AudioTranscriptionClient(engine=transcription_engine, audio_loader=audio_loader) + client = AudioTranscriptionTool(engine=transcription_engine, audio_loader=audio_loader) client.engine.run.return_value = Mock(value="transcription") # pyright: ignore[reportFunctionMemberAccess] text_artifact = client.transcribe_audio_from_disk(params={"values": {"path": "audio.wav"}}) @@ -40,7 +40,7 @@ def test_transcribe_audio_from_disk(self, transcription_engine, audio_loader) -> assert text_artifact.value == "transcription" def test_transcribe_audio_from_memory(self, transcription_engine, audio_loader) -> None: - client = AudioTranscriptionClient(engine=transcription_engine, audio_loader=audio_loader) + client = AudioTranscriptionTool(engine=transcription_engine, audio_loader=audio_loader) memory = Mock() memory.load_artifacts = Mock(return_value=[AudioArtifact(value=b"audio data", format="wav", name="name")]) client.find_input_memory = Mock(return_value=memory) diff --git a/tests/unit/tools/test_variation_image_generation_client.py b/tests/unit/tools/test_variation_image_generation_tool.py similarity index 88% rename from tests/unit/tools/test_variation_image_generation_client.py rename to tests/unit/tools/test_variation_image_generation_tool.py index 0db454f92..c4528a044 100644 --- a/tests/unit/tools/test_variation_image_generation_client.py +++ b/tests/unit/tools/test_variation_image_generation_tool.py @@ -6,10 +6,10 @@ import pytest from griptape.artifacts import ImageArtifact -from griptape.tools import VariationImageGenerationClient +from griptape.tools import VariationImageGenerationTool -class TestVariationImageGenerationClient: +class TestVariationImageGenerationTool: @pytest.fixture() def image_artifact(self) -> ImageArtifact: return ImageArtifact(value=b"image_data", format="png", width=512, height=512, name="name") @@ -26,12 +26,12 @@ def image_loader(self) -> Mock: return loader @pytest.fixture() - def image_generator(self, image_generation_engine, image_loader) -> VariationImageGenerationClient: - return VariationImageGenerationClient(engine=image_generation_engine, image_loader=image_loader) + def image_generator(self, image_generation_engine, image_loader) -> VariationImageGenerationTool: + return VariationImageGenerationTool(engine=image_generation_engine, image_loader=image_loader) def test_validate_output_configs(self, image_generation_engine, image_loader) -> None: with pytest.raises(ValueError): - VariationImageGenerationClient( + VariationImageGenerationTool( engine=image_generation_engine, output_dir="test", output_file="test", image_loader=image_loader ) @@ -54,7 +54,7 @@ def test_image_variation(self, image_generator, path_from_resource_path) -> None def test_image_variation_with_outfile(self, image_generation_engine, image_loader, path_from_resource_path) -> None: outfile = f"{tempfile.gettempdir()}/{str(uuid.uuid4())}.png" - image_generator = VariationImageGenerationClient( + image_generator = VariationImageGenerationTool( engine=image_generation_engine, output_file=outfile, image_loader=image_loader ) @@ -76,7 +76,7 @@ def test_image_variation_with_outfile(self, image_generation_engine, image_loade assert os.path.exists(outfile) def test_image_variation_from_memory(self, image_generation_engine, image_artifact): - image_generator = VariationImageGenerationClient(engine=image_generation_engine) + image_generator = VariationImageGenerationTool(engine=image_generation_engine) memory = Mock() memory.load_artifacts = Mock(return_value=[image_artifact]) image_generator.find_input_memory = Mock(return_value=memory) diff --git a/tests/unit/tools/test_vector_store_client.py b/tests/unit/tools/test_vector_store_tool.py similarity index 68% rename from tests/unit/tools/test_vector_store_client.py rename to tests/unit/tools/test_vector_store_tool.py index b02dda226..30596f09f 100644 --- a/tests/unit/tools/test_vector_store_client.py +++ b/tests/unit/tools/test_vector_store_tool.py @@ -1,19 +1,13 @@ -import pytest - from griptape.artifacts import ListArtifact, TextArtifact from griptape.drivers import LocalVectorStoreDriver -from griptape.tools import VectorStoreClient +from griptape.tools import VectorStoreTool from tests.mocks.mock_embedding_driver import MockEmbeddingDriver -class TestVectorStoreClient: - @pytest.fixture(autouse=True) - def _mock_try_run(self, mocker): - mocker.patch("griptape.drivers.OpenAiEmbeddingDriver.try_embed_chunk", return_value=[0, 1]) - +class TestVectorStoreTool: def test_search(self): driver = LocalVectorStoreDriver(embedding_driver=MockEmbeddingDriver()) - tool = VectorStoreClient(description="Test", vector_store_driver=driver) + tool = VectorStoreTool(description="Test", vector_store_driver=driver) driver.upsert_text_artifacts({"test": [TextArtifact("foo"), TextArtifact("bar")]}) @@ -21,8 +15,8 @@ def test_search(self): def test_search_with_namespace(self): driver = LocalVectorStoreDriver(embedding_driver=MockEmbeddingDriver()) - tool1 = VectorStoreClient(description="Test", vector_store_driver=driver, query_params={"namespace": "test"}) - tool2 = VectorStoreClient(description="Test", vector_store_driver=driver, query_params={"namespace": "test2"}) + tool1 = VectorStoreTool(description="Test", vector_store_driver=driver, query_params={"namespace": "test"}) + tool2 = VectorStoreTool(description="Test", vector_store_driver=driver, query_params={"namespace": "test2"}) driver.upsert_text_artifacts({"test": [TextArtifact("foo"), TextArtifact("bar")]}) @@ -31,7 +25,7 @@ def test_search_with_namespace(self): def test_custom_process_query_output_fn(self): driver = LocalVectorStoreDriver(embedding_driver=MockEmbeddingDriver()) - tool1 = VectorStoreClient( + tool1 = VectorStoreTool( description="Test", vector_store_driver=driver, process_query_output_fn=lambda es: ListArtifact([e.vector for e in es]), diff --git a/tests/unit/tools/test_web_scraper.py b/tests/unit/tools/test_web_scraper.py index 30362ce65..0fdc761b4 100644 --- a/tests/unit/tools/test_web_scraper.py +++ b/tests/unit/tools/test_web_scraper.py @@ -6,9 +6,9 @@ class TestWebScraper: @pytest.fixture() def scraper(self): - from griptape.tools import WebScraper + from griptape.tools import WebScraperTool - return WebScraper() + return WebScraperTool() def test_get_content(self, scraper): assert isinstance( diff --git a/tests/unit/tools/test_web_search.py b/tests/unit/tools/test_web_search.py index dd447de5d..c1f9555ea 100644 --- a/tests/unit/tools/test_web_search.py +++ b/tests/unit/tools/test_web_search.py @@ -1,7 +1,7 @@ import pytest from griptape.artifacts import BaseArtifact, ErrorArtifact, TextArtifact -from griptape.tools import WebSearch +from griptape.tools import WebSearchTool class TestWebSearch: @@ -11,7 +11,7 @@ def websearch_tool(self, mocker): driver = mocker.Mock() mocker.patch.object(driver, "search", return_value=mock_response) - return WebSearch(web_search_driver=driver) + return WebSearchTool(web_search_driver=driver) @pytest.fixture() def websearch_tool_with_error(self, mocker): @@ -19,12 +19,20 @@ def websearch_tool_with_error(self, mocker): driver = mocker.Mock() mocker.patch.object(driver, "search", side_effect=mock_response) - return WebSearch(web_search_driver=driver) + return WebSearchTool(web_search_driver=driver) def test_search(self, websearch_tool): assert isinstance(websearch_tool.search({"values": {"query": "foo bar"}}), BaseArtifact) assert websearch_tool.search({"values": {"query": "foo bar"}}).value == "test_response" + def test_search_with_params(self, websearch_tool): + assert isinstance( + websearch_tool.search({"values": {"query": "foo bar", "params": {"key": "value"}}}), BaseArtifact + ) + assert ( + websearch_tool.search({"values": {"query": "foo bar", "params": {"key": "value"}}}).value == "test_response" + ) + def test_search_with_error(self, websearch_tool_with_error): assert isinstance(websearch_tool_with_error.search({"values": {"query": "foo bar"}}), ErrorArtifact) assert ( diff --git a/tests/unit/utils/test_chat.py b/tests/unit/utils/test_chat.py index 42ecc59c3..a8ffb1fff 100644 --- a/tests/unit/utils/test_chat.py +++ b/tests/unit/utils/test_chat.py @@ -1,14 +1,15 @@ +import logging +from unittest.mock import patch + +from griptape.configs import Defaults from griptape.memory.structure import ConversationMemory from griptape.structures import Agent from griptape.utils import Chat -from tests.mocks.mock_prompt_driver import MockPromptDriver class TestConversation: def test_init(self): - import logging - - agent = Agent(prompt_driver=MockPromptDriver(), conversation_memory=ConversationMemory()) + agent = Agent(conversation_memory=ConversationMemory()) chat = Chat( agent, @@ -19,6 +20,7 @@ def test_init(self): prompt_prefix="Question: ", response_prefix="Answer: ", output_fn=logging.info, + logger_level=logging.INFO, ) assert chat.structure == agent assert chat.exiting_text == "foo..." @@ -27,3 +29,20 @@ def test_init(self): assert chat.prompt_prefix == "Question: " assert chat.response_prefix == "Answer: " assert callable(chat.output_fn) + assert chat.logger_level == logging.INFO + + @patch("builtins.input", side_effect=["exit"]) + def test_chat_logger_level(self, mock_input): + agent = Agent(conversation_memory=ConversationMemory()) + + chat = Chat(agent) + + logger = logging.getLogger(Defaults.logging_config.logger_name) + logger.setLevel(logging.DEBUG) + + assert logger.getEffectiveLevel() == logging.DEBUG + + chat.start() + + assert logger.getEffectiveLevel() == logging.DEBUG + assert mock_input.call_count == 1 diff --git a/tests/unit/utils/test_conversation.py b/tests/unit/utils/test_conversation.py index 28ee72409..a07d15cdb 100644 --- a/tests/unit/utils/test_conversation.py +++ b/tests/unit/utils/test_conversation.py @@ -2,12 +2,11 @@ from griptape.structures import Pipeline from griptape.tasks import PromptTask from griptape.utils import Conversation -from tests.mocks.mock_prompt_driver import MockPromptDriver class TestConversation: def test_lines(self): - pipeline = Pipeline(prompt_driver=MockPromptDriver(), conversation_memory=ConversationMemory()) + pipeline = Pipeline(conversation_memory=ConversationMemory()) pipeline.add_tasks(PromptTask("question 1")) @@ -22,7 +21,7 @@ def test_lines(self): assert lines[3] == "A: mock output" def test_prompt_stack_conversation_memory(self): - pipeline = Pipeline(prompt_driver=MockPromptDriver(), conversation_memory=ConversationMemory()) + pipeline = Pipeline(conversation_memory=ConversationMemory()) pipeline.add_tasks(PromptTask("question 1")) @@ -36,8 +35,7 @@ def test_prompt_stack_conversation_memory(self): def test_prompt_stack_summary_conversation_memory(self): pipeline = Pipeline( - prompt_driver=MockPromptDriver(), - conversation_memory=SummaryConversationMemory(summary="foobar", prompt_driver=MockPromptDriver()), + conversation_memory=SummaryConversationMemory(summary="foobar"), ) pipeline.add_tasks(PromptTask("question 1")) @@ -52,7 +50,7 @@ def test_prompt_stack_summary_conversation_memory(self): assert lines[2] == "assistant: mock output" def test___str__(self): - pipeline = Pipeline(prompt_driver=MockPromptDriver(), conversation_memory=ConversationMemory()) + pipeline = Pipeline(conversation_memory=ConversationMemory()) pipeline.add_tasks(PromptTask("question 1")) diff --git a/tests/unit/utils/test_file_utils.py b/tests/unit/utils/test_file_utils.py index a9c122126..00df6958d 100644 --- a/tests/unit/utils/test_file_utils.py +++ b/tests/unit/utils/test_file_utils.py @@ -3,7 +3,6 @@ from griptape import utils from griptape.loaders import TextLoader -from tests.mocks.mock_embedding_driver import MockEmbeddingDriver MAX_TOKENS = 50 @@ -32,7 +31,7 @@ def test_load_files(self): def test_load_file_with_loader(self): dirname = os.path.dirname(__file__) file = utils.load_file(os.path.join(dirname, "../../", "resources/foobar-many.txt")) - artifacts = TextLoader(max_tokens=MAX_TOKENS, embedding_driver=MockEmbeddingDriver()).load(file) + artifacts = TextLoader(max_tokens=MAX_TOKENS).load(file) assert len(artifacts) == 39 assert isinstance(artifacts, list) @@ -43,7 +42,7 @@ def test_load_files_with_loader(self): sources = ["resources/foobar-many.txt"] sources = [os.path.join(dirname, "../../", source) for source in sources] files = utils.load_files(sources) - loader = TextLoader(max_tokens=MAX_TOKENS, embedding_driver=MockEmbeddingDriver()) + loader = TextLoader(max_tokens=MAX_TOKENS) collection = loader.load_collection(list(files.values())) test_file_artifacts = collection[loader.to_key(files[utils.str_to_hash(sources[0])])] diff --git a/tests/unit/utils/test_futures.py b/tests/unit/utils/test_futures.py index 04ddb9877..c34124c76 100644 --- a/tests/unit/utils/test_futures.py +++ b/tests/unit/utils/test_futures.py @@ -19,8 +19,21 @@ def test_execute_futures_list(self): [executor.submit(self.foobar, "foo"), executor.submit(self.foobar, "baz")] ) - assert result[0] == "foo-bar" - assert result[1] == "baz-bar" + assert set(result) == {"foo-bar", "baz-bar"} + + def test_execute_futures_list_dict(self): + with futures.ThreadPoolExecutor() as executor: + result = utils.execute_futures_list_dict( + { + "test1": [executor.submit(self.foobar, f"foo-{i}") for i in range(0, 1000)], + "test2": [executor.submit(self.foobar, f"foo-{i}") for i in range(0, 1000)], + "test3": [executor.submit(self.foobar, f"foo-{i}") for i in range(0, 1000)], + } + ) + + assert len(result["test1"]) == 1000 + assert len(result["test2"]) == 1000 + assert len(result["test3"]) == 1000 def foobar(self, foo): return f"{foo}-bar" diff --git a/tests/unit/utils/test_stream.py b/tests/unit/utils/test_stream.py index da6695139..caddbb1a3 100644 --- a/tests/unit/utils/test_stream.py +++ b/tests/unit/utils/test_stream.py @@ -2,18 +2,17 @@ import pytest -from griptape.structures import Agent +from griptape.structures import Agent, Pipeline from griptape.utils import Stream -from tests.mocks.mock_prompt_driver import MockPromptDriver class TestStream: @pytest.fixture(params=[True, False]) def agent(self, request): - return Agent(prompt_driver=MockPromptDriver(stream=request.param, max_attempts=0)) + return Agent(stream=request.param) def test_init(self, agent): - if agent.prompt_driver.stream: + if agent.stream: chat_stream = Stream(agent) assert chat_stream.structure == agent @@ -28,3 +27,9 @@ def test_init(self, agent): else: with pytest.raises(ValueError): Stream(agent) + + def test_validate_structure_invalid(self): + pipeline = Pipeline(tasks=[]) + + with pytest.raises(ValueError): + Stream(pipeline) diff --git a/tests/unit/utils/test_structure_visualizer.py b/tests/unit/utils/test_structure_visualizer.py index f6e621b91..8a055cb21 100644 --- a/tests/unit/utils/test_structure_visualizer.py +++ b/tests/unit/utils/test_structure_visualizer.py @@ -1,12 +1,11 @@ from griptape.structures import Agent, Pipeline, Workflow from griptape.tasks import PromptTask from griptape.utils import StructureVisualizer -from tests.mocks.mock_prompt_driver import MockPromptDriver class TestStructureVisualizer: def test_agent(self): - agent = Agent(prompt_driver=MockPromptDriver(), tasks=[PromptTask("test1", id="task1")]) + agent = Agent(tasks=[PromptTask("test1", id="task1")]) visualizer = StructureVisualizer(agent) result = visualizer.to_url() @@ -15,7 +14,6 @@ def test_agent(self): def test_pipeline(self): pipeline = Pipeline( - prompt_driver=MockPromptDriver(), tasks=[ PromptTask("test1", id="task1"), PromptTask("test2", id="task2"), @@ -34,7 +32,6 @@ def test_pipeline(self): def test_workflow(self): workflow = Workflow( - prompt_driver=MockPromptDriver(), tasks=[ PromptTask("test1", id="task1"), PromptTask("test2", id="task2", parent_ids=["task1"]), diff --git a/tests/utils/code_blocks.py b/tests/utils/code_blocks.py deleted file mode 100644 index ca5b193d1..000000000 --- a/tests/utils/code_blocks.py +++ /dev/null @@ -1,83 +0,0 @@ -from __future__ import annotations - -import logging -import pathlib -import textwrap - -# Adapted from https://github.com/koaning/mktestdocs - - -def check_py_string(source: str) -> None: - """Exec the python source given in a new module namespace. - - Does not return anything, but exceptions raised by the source - will propagate out unmodified - """ - try: - exec(source, {"__MODULE__": "__main__"}) - except Exception: - logging.info(source) - raise - - -def check_code_block(block: str, lang: str = "python") -> str: - """Cleans the found codeblock and checks if the proglang is correct. - - Returns an empty string if the codeblock is deemed invalid. - - Args: - block: the code block to analyse - lang: if not None, the language that is assigned to the codeblock - """ - first_line = block.split("\n")[0] - if lang: - line_elements = first_line[3:].split(" ", 2) - if len(line_elements) == 1: - block_lang = line_elements[0] - title_value = None - elif len(line_elements) == 2: - block_lang, title = line_elements - title_elements = title.replace(" ", "").split("=", 2) - if len(title_elements) == 2: - _, title_value = title_elements - else: - title_value = None - else: - block_lang = None - title_value = None - - if block_lang != lang: - return "" - if title_value == '"PYTEST_IGNORE"': - return "" - return "\n".join(block.split("\n")[1:]) - - -def get_code_blocks(docstring: str, lang: str = "python") -> list[str]: - """Given a docstring, grab all the markdown codeblocks found in docstring. - - Args: - docstring: the docstring to analyse - lang: if not None, the language that is assigned to the codeblock - """ - docstring = textwrap.dedent(docstring) - in_block = False - block = "" - codeblocks = [] - for line in docstring.split("\n"): - if line.startswith("```"): - if in_block: - codeblocks.append(check_code_block(block, lang=lang)) - block = "" - in_block = not in_block - if in_block: - block += line + "\n" - return [c for c in codeblocks if c != ""] - - -def get_all_code_blocks(path: str) -> list[dict]: - return [ - {"id": f"{str(fpath)}-{block_num + 1}", "code": code_block} - for fpath in pathlib.Path().glob(path) - for block_num, code_block in enumerate(get_code_blocks(fpath.read_text())) - ] diff --git a/tests/utils/defaults.py b/tests/utils/defaults.py index bad7f0d79..0e26225a9 100644 --- a/tests/utils/defaults.py +++ b/tests/utils/defaults.py @@ -1,25 +1,18 @@ from griptape.artifacts import BlobArtifact, TextArtifact from griptape.drivers import LocalVectorStoreDriver -from griptape.engines import CsvExtractionEngine, JsonExtractionEngine, PromptSummaryEngine from griptape.engines.rag import RagEngine from griptape.engines.rag.modules import PromptResponseRagModule, VectorStoreRetrievalRagModule from griptape.engines.rag.stages import ResponseRagStage, RetrievalRagStage from griptape.memory import TaskMemory from griptape.memory.task.storage import BlobArtifactStorage, TextArtifactStorage from tests.mocks.mock_embedding_driver import MockEmbeddingDriver -from tests.mocks.mock_prompt_driver import MockPromptDriver def text_tool_artifact_storage(): vector_store_driver = LocalVectorStoreDriver(embedding_driver=MockEmbeddingDriver()) return TextArtifactStorage( - rag_engine=rag_engine(MockPromptDriver(), vector_store_driver), vector_store_driver=vector_store_driver, - retrieval_rag_module_name="VectorStoreRetrievalRagModule", - summary_engine=PromptSummaryEngine(prompt_driver=MockPromptDriver()), - csv_extraction_engine=CsvExtractionEngine(prompt_driver=MockPromptDriver()), - json_extraction_engine=JsonExtractionEngine(prompt_driver=MockPromptDriver()), ) @@ -34,5 +27,5 @@ def rag_engine(prompt_driver, vector_store_driver): retrieval_stage=RetrievalRagStage( retrieval_modules=[VectorStoreRetrievalRagModule(vector_store_driver=vector_store_driver)] ), - response_stage=ResponseRagStage(response_module=PromptResponseRagModule(prompt_driver=prompt_driver)), + response_stage=ResponseRagStage(response_modules=[PromptResponseRagModule(prompt_driver=prompt_driver)]), ) diff --git a/tests/utils/structure_tester.py b/tests/utils/structure_tester.py index 5b908065b..9fadf4e36 100644 --- a/tests/utils/structure_tester.py +++ b/tests/utils/structure_tester.py @@ -228,6 +228,15 @@ def prompt_driver_id_fn(cls, prompt_driver) -> str: return f"{prompt_driver.__class__.__name__}-{prompt_driver.model}" def verify_structure_output(self, structure) -> dict: + from griptape.configs import Defaults + + Defaults.drivers_config.prompt_driver = AzureOpenAiChatPromptDriver( + api_key=os.environ["AZURE_OPENAI_API_KEY_1"], + model="gpt-4o", + azure_deployment=os.environ["AZURE_OPENAI_4_DEPLOYMENT_ID"], + azure_endpoint=os.environ["AZURE_OPENAI_ENDPOINT_1"], + response_format="json_object", + ) output_schema = Schema( { Literal("correct", description="Whether the output was correct or not."): bool, @@ -265,13 +274,6 @@ def verify_structure_output(self, structure) -> dict: ], ), ], - prompt_driver=AzureOpenAiChatPromptDriver( - api_key=os.environ["AZURE_OPENAI_API_KEY_1"], - model="gpt-4o", - azure_deployment=os.environ["AZURE_OPENAI_4_DEPLOYMENT_ID"], - azure_endpoint=os.environ["AZURE_OPENAI_ENDPOINT_1"], - response_format="json_object", - ), tasks=[ PromptTask( "\nTasks: {{ task_names }}" diff --git a/tests/utils/test_reference_utils.py b/tests/utils/test_reference_utils.py index c3491f5d0..47da18713 100644 --- a/tests/utils/test_reference_utils.py +++ b/tests/utils/test_reference_utils.py @@ -1,12 +1,11 @@ from griptape.artifacts import TextArtifact from griptape.common import Reference from griptape.engines.rag.modules import PromptResponseRagModule -from tests.mocks.mock_prompt_driver import MockPromptDriver class TestReferenceUtils: def test_references_from_artifacts(self): - module = PromptResponseRagModule(prompt_driver=MockPromptDriver()) + module = PromptResponseRagModule() reference1 = Reference(title="foo") reference2 = Reference(title="bar") artifacts = [ From b1265ec88610e980d0f0b6716d7b9e55db12a306 Mon Sep 17 00:00:00 2001 From: Collin Dutter Date: Wed, 21 Aug 2024 15:56:48 -0700 Subject: [PATCH 5/9] Hotfix/extraction docs (#1096) --- CHANGELOG.md | 8 ++++++++ .../engines/src/extraction_engines_1.py | 4 ++-- .../engines/src/extraction_engines_2.py | 11 ++++++----- docs/griptape-framework/structures/src/tasks_6.py | 7 +++---- docs/griptape-framework/structures/src/tasks_7.py | 3 +-- griptape/engines/extraction/csv_extraction_engine.py | 1 + 6 files changed, 21 insertions(+), 13 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 21f72fe50..525817db9 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -6,6 +6,14 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ## Unreleased +## [0.30.1] - 2024-08-21 + +### Fixed +- `CsvExtractionEngine` not using provided `Ruleset`s. +- Docs examples for Extraction Engines not properly passing in schemas. + +## [0.30.0] - 2024-08-20 + ### Added - `AstraDbVectorStoreDriver` to support DataStax Astra DB as a vector store. - Ability to set custom schema properties on Tool Activities via `extra_schema_properties`. diff --git a/docs/griptape-framework/engines/src/extraction_engines_1.py b/docs/griptape-framework/engines/src/extraction_engines_1.py index c681980f2..17644ebf2 100644 --- a/docs/griptape-framework/engines/src/extraction_engines_1.py +++ b/docs/griptape-framework/engines/src/extraction_engines_1.py @@ -4,7 +4,7 @@ # Initialize the CsvExtractionEngine instance csv_engine = CsvExtractionEngine( - prompt_driver=OpenAiChatPromptDriver(model="gpt-3.5-turbo"), + prompt_driver=OpenAiChatPromptDriver(model="gpt-3.5-turbo"), column_names=["name", "age", "location"] ) # Define some unstructured data @@ -15,7 +15,7 @@ """ # Extract CSV rows using the engine -result = csv_engine.extract(sample_text, column_names=["name", "age", "location"]) +result = csv_engine.extract(sample_text) if isinstance(result, ListArtifact): for row in result.value: diff --git a/docs/griptape-framework/engines/src/extraction_engines_2.py b/docs/griptape-framework/engines/src/extraction_engines_2.py index d47bb48e5..a100754b3 100644 --- a/docs/griptape-framework/engines/src/extraction_engines_2.py +++ b/docs/griptape-framework/engines/src/extraction_engines_2.py @@ -4,8 +4,12 @@ from griptape.drivers import OpenAiChatPromptDriver from griptape.engines import JsonExtractionEngine +# Define a schema for extraction +user_schema = Schema({"users": [{"name": str, "age": int, "location": str}]}).json_schema("UserSchema") + + json_engine = JsonExtractionEngine( - prompt_driver=OpenAiChatPromptDriver(model="gpt-3.5-turbo"), + prompt_driver=OpenAiChatPromptDriver(model="gpt-3.5-turbo"), template_schema=user_schema ) # Define some unstructured data @@ -14,11 +18,8 @@ Bob (Age 35) lives in California. """ -# Define a schema for extraction -user_schema = Schema({"users": [{"name": str, "age": int, "location": str}]}).json_schema("UserSchema") - # Extract data using the engine -result = json_engine.extract(sample_json_text, template_schema=user_schema) +result = json_engine.extract(sample_json_text) if isinstance(result, ListArtifact): for artifact in result.value: diff --git a/docs/griptape-framework/structures/src/tasks_6.py b/docs/griptape-framework/structures/src/tasks_6.py index a1b84e44d..ecd6f354f 100644 --- a/docs/griptape-framework/structures/src/tasks_6.py +++ b/docs/griptape-framework/structures/src/tasks_6.py @@ -4,7 +4,9 @@ from griptape.tasks import ExtractionTask # Instantiate the CSV extraction engine -csv_extraction_engine = CsvExtractionEngine(prompt_driver=OpenAiChatPromptDriver(model="gpt-3.5-turbo")) +csv_extraction_engine = CsvExtractionEngine( + prompt_driver=OpenAiChatPromptDriver(model="gpt-3.5-turbo"), column_names=["Name", "Age", "Address"] +) # Define some unstructured data and columns csv_data = """ @@ -13,15 +15,12 @@ Charlie is 40 and lives in Texas. """ -columns = ["Name", "Age", "Address"] - # Create an agent and add the ExtractionTask to it agent = Agent() agent.add_task( ExtractionTask( extraction_engine=csv_extraction_engine, - args={"column_names": columns}, ) ) diff --git a/docs/griptape-framework/structures/src/tasks_7.py b/docs/griptape-framework/structures/src/tasks_7.py index 909d00084..da5deda88 100644 --- a/docs/griptape-framework/structures/src/tasks_7.py +++ b/docs/griptape-framework/structures/src/tasks_7.py @@ -8,6 +8,7 @@ # Instantiate the json extraction engine json_extraction_engine = JsonExtractionEngine( prompt_driver=OpenAiChatPromptDriver(model="gpt-3.5-turbo"), + template_schema=Schema({"users": [{"name": str, "age": int, "location": str}]}).json_schema("UserSchema"), ) # Define some unstructured data and a schema @@ -15,13 +16,11 @@ Alice (Age 28) lives in New York. Bob (Age 35) lives in California. """ -user_schema = Schema({"users": [{"name": str, "age": int, "location": str}]}).json_schema("UserSchema") agent = Agent() agent.add_task( ExtractionTask( extraction_engine=json_extraction_engine, - args={"template_schema": user_schema}, ) ) diff --git a/griptape/engines/extraction/csv_extraction_engine.py b/griptape/engines/extraction/csv_extraction_engine.py index c9c040f65..6f7637476 100644 --- a/griptape/engines/extraction/csv_extraction_engine.py +++ b/griptape/engines/extraction/csv_extraction_engine.py @@ -33,6 +33,7 @@ def extract( self._extract_rec( cast(list[TextArtifact], text.value) if isinstance(text, ListArtifact) else [TextArtifact(text)], [], + rulesets=rulesets, ), item_separator="\n", ) From d576619260595e6cef2e974666441b7b757859f8 Mon Sep 17 00:00:00 2001 From: Collin Dutter Date: Wed, 21 Aug 2024 16:20:38 -0700 Subject: [PATCH 6/9] Version bump v0.30.1 (#1098) --- pyproject.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pyproject.toml b/pyproject.toml index 6c50013ad..d812bbe44 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "griptape" -version = "0.30.0" +version = "0.30.1" description = "Modular Python framework for LLM workflows, tools, memory, and data." authors = ["Griptape "] license = "Apache 2.0" From ad500c1132f3d3b81e4a89690389048965cb15d7 Mon Sep 17 00:00:00 2001 From: Collin Dutter Date: Mon, 26 Aug 2024 12:30:23 -0700 Subject: [PATCH 7/9] Hotfix/event lock (#1104) --- CHANGELOG.md | 5 +++++ .../event_listener/base_event_listener_driver.py | 11 +++++++---- 2 files changed, 12 insertions(+), 4 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 525817db9..2b2dafbc8 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -6,6 +6,11 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ## Unreleased +## [0.30.2] - 2024-08-26 + +### Fixed +- Ensure thread safety when publishing events by adding a thread lock to batch operations in `BaseEventListenerDriver`. + ## [0.30.1] - 2024-08-21 ### Fixed diff --git a/griptape/drivers/event_listener/base_event_listener_driver.py b/griptape/drivers/event_listener/base_event_listener_driver.py index 0af57f0f3..75bdc9f75 100644 --- a/griptape/drivers/event_listener/base_event_listener_driver.py +++ b/griptape/drivers/event_listener/base_event_listener_driver.py @@ -1,6 +1,7 @@ from __future__ import annotations import logging +import threading from abc import ABC, abstractmethod from typing import TYPE_CHECKING @@ -18,6 +19,7 @@ class BaseEventListenerDriver(FuturesExecutorMixin, ABC): batched: bool = field(default=True, kw_only=True) batch_size: int = field(default=10, kw_only=True) + thread_lock: threading.Lock = field(default=Factory(lambda: threading.Lock())) _batch: list[dict] = field(default=Factory(list), kw_only=True) @@ -39,10 +41,11 @@ def _safe_try_publish_event(self, event: BaseEvent | dict, *, flush: bool) -> No event_payload = event if isinstance(event, dict) else event.to_dict() if self.batched: - self._batch.append(event_payload) - if len(self.batch) >= self.batch_size or flush: - self.try_publish_event_payload_batch(self.batch) - self._batch = [] + with self.thread_lock: + self._batch.append(event_payload) + if len(self.batch) >= self.batch_size or flush: + self.try_publish_event_payload_batch(self.batch) + self._batch = [] return else: self.try_publish_event_payload(event_payload) From 1d1f0c0ee8a39bc070d5fad3a78f441c9c11e756 Mon Sep 17 00:00:00 2001 From: Collin Dutter Date: Mon, 26 Aug 2024 13:00:19 -0700 Subject: [PATCH 8/9] Fix saving results of csv extraction (#1105) --- CHANGELOG.md | 1 + griptape/tools/file_manager/tool.py | 3 ++- tests/unit/tools/test_file_manager.py | 24 ++++++++++++++++++++++++ 3 files changed, 27 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 2b2dafbc8..0f6cb4d62 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -10,6 +10,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ### Fixed - Ensure thread safety when publishing events by adding a thread lock to batch operations in `BaseEventListenerDriver`. +- `FileManagerTool` failing to save Artifacts created by `ExtractionTool` with a `CsvExtractionEngine`. ## [0.30.1] - 2024-08-21 diff --git a/griptape/tools/file_manager/tool.py b/griptape/tools/file_manager/tool.py index ece6a0e92..04a9bbc41 100644 --- a/griptape/tools/file_manager/tool.py +++ b/griptape/tools/file_manager/tool.py @@ -93,7 +93,8 @@ def save_memory_artifacts_to_disk(self, params: dict) -> ErrorArtifact | InfoArt for artifact in list_artifact.value: formatted_file_name = f"{artifact.name}-{file_name}" if len(list_artifact) > 1 else file_name - result = self.file_manager_driver.save_file(os.path.join(dir_name, formatted_file_name), artifact.value) + value = artifact.value if isinstance(artifact.value, (str, bytes)) else artifact.to_text() + result = self.file_manager_driver.save_file(os.path.join(dir_name, formatted_file_name), value) if isinstance(result, ErrorArtifact): return result diff --git a/tests/unit/tools/test_file_manager.py b/tests/unit/tools/test_file_manager.py index dccf2f1a2..e37a7f31d 100644 --- a/tests/unit/tools/test_file_manager.py +++ b/tests/unit/tools/test_file_manager.py @@ -6,6 +6,7 @@ import pytest from griptape.artifacts import ListArtifact, TextArtifact +from griptape.artifacts.csv_row_artifact import CsvRowArtifact from griptape.artifacts.error_artifact import ErrorArtifact from griptape.drivers.file_manager.local_file_manager_driver import LocalFileManagerDriver from griptape.loaders.text_loader import TextLoader @@ -108,6 +109,29 @@ def test_save_memory_artifacts_to_disk_for_multiple_artifacts(self, temp_dir): assert Path(os.path.join(temp_dir, "test", f"{artifacts[1].name}-{file_name}")).read_text() == "baz" assert result.value == "Successfully saved memory artifacts to disk" + def test_save_memory_artifacts_to_disk_for_non_string_artifact(self, temp_dir): + memory = defaults.text_task_memory("Memory1") + artifact = CsvRowArtifact({"foo": "bar"}) + + memory.store_artifact("foobar", artifact) + + file_manager = FileManagerTool( + input_memory=[memory], file_manager_driver=LocalFileManagerDriver(workdir=temp_dir) + ) + result = file_manager.save_memory_artifacts_to_disk( + { + "values": { + "dir_name": "test", + "file_name": "foobar.txt", + "memory_name": memory.name, + "artifact_namespace": "foobar", + } + } + ) + + assert Path(os.path.join(temp_dir, "test", "foobar.txt")).read_text() == "bar" + assert result.value == "Successfully saved memory artifacts to disk" + def test_save_content_to_file(self, temp_dir): file_manager = FileManagerTool(file_manager_driver=LocalFileManagerDriver(workdir=temp_dir)) result = file_manager.save_content_to_file( From 457259de683f2fe18fcb1d94d75c858b3f6c474d Mon Sep 17 00:00:00 2001 From: Collin Dutter Date: Mon, 26 Aug 2024 14:25:00 -0700 Subject: [PATCH 9/9] Version bump v0.30.2 (#1107) --- pyproject.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pyproject.toml b/pyproject.toml index d812bbe44..880a7a6ec 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "griptape" -version = "0.30.1" +version = "0.30.2" description = "Modular Python framework for LLM workflows, tools, memory, and data." authors = ["Griptape "] license = "Apache 2.0"