diff --git a/.gitignore b/.gitignore index be8fea9..37489fb 100644 --- a/.gitignore +++ b/.gitignore @@ -7,6 +7,13 @@ __pycache__ # Exclude `databricks sync` CLI command snapshots .databricks +openai_sdk_agent_app_sample_code/configs/*/*.yaml +openai_sdk_agent_app_sample_code/configs/*.yaml +dist/ +mlruns/ +_scratch_pad/ +openai_sdk_agent_app_sample_code/_scratch_pad/ +.vscode/ *.png poetry.lock \ No newline at end of file diff --git a/openai_sdk_agent_app_sample_code/01_data_pipeline.ipynb b/openai_sdk_agent_app_sample_code/01_data_pipeline.ipynb new file mode 100644 index 0000000..0587393 --- /dev/null +++ b/openai_sdk_agent_app_sample_code/01_data_pipeline.ipynb @@ -0,0 +1,947 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": { + "application/vnd.databricks.v1+cell": { + "cellMetadata": {}, + "inputWidgets": {}, + "nuid": "7c756f50-2063-4a07-b964-e5d6de29abb4", + "showTitle": false, + "tableResultSettingsMap": {}, + "title": "" + } + }, + "source": [ + "# Unstructured data pipeline for the Agent's Retriever\n", + "\n", + "By the end of this notebook, you will have transformed your unstructured documents into a vector index that can be queried by your Agent.\n", + "\n", + "This means:\n", + "- Documents loaded into a delta table.\n", + "- Documents are chunked.\n", + "- Chunks have been embedded with an embedding model and stored in a vector index.\n", + "\n", + "The important resulting artifact of this notebook is the chunked vector index. This will be used in the next notebook to power our Retriever." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "application/vnd.databricks.v1+cell": { + "cellMetadata": {}, + "inputWidgets": {}, + "nuid": "d3777205-4dfe-418c-9d21-c67961a18070", + "showTitle": false, + "tableResultSettingsMap": {}, + "title": "" + } + }, + "source": [ + "### 👉 START HERE: How to Use This Notebook\n", + "\n", + "Follow these steps to build and refine your data pipeline's quality:\n", + "\n", + "1. **Build a v0 index with default settings**\n", + " - Configure the data source and destination tables in the `1️⃣ 📂 Data source & destination configuration` cells\n", + " - Press `Run All` to create the vector index.\n", + "\n", + " *Note: While you can adjust the other settings and modify the parsing/chunking code, we suggest doing so only after evaluating your Agent's quality so you can make improvements that specifically address root causes of quality issues.*\n", + "\n", + "2. **Use later notebooks to integrate the retriever into an the agent and evaluate the agent/retriever's quality.**\n", + "\n", + "3. **If the evaluation results show retrieval issues as a root cause, use this notebook to iterate on your data pipeline's code & config.** Below are some potential fixes you can try, see the AI Cookbook's [debugging retrieval issues](https://ai-cookbook.io/nbs/5-hands-on-improve-quality-step-1-retrieval.html) section for details.**\n", + " - Add missing, but relevant source documents into in the index.\n", + " - Resolve any conflicting information in source documents.\n", + " - Adjust the data pipeline configuration:\n", + " - Modify chunk size or overlap.\n", + " - Experiment with different embedding models.\n", + " - Adjust the data pipeline code:\n", + " - Create a custom parser or use different parsing libraries.\n", + " - Develop a custom chunker or use different chunking techniques.\n", + " - Extract additional metadata for each document.\n", + " - Adjust the Agent's code/config in subsequent notebooks:\n", + " - Change the number of documents retrieved (K).\n", + " - Try a re-ranker.\n", + " - Use hybrid search.\n", + " - Apply extracted metadata as filters.\n", + "\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "application/vnd.databricks.v1+cell": { + "cellMetadata": {}, + "inputWidgets": {}, + "nuid": "1a6053b9-3135-4097-9ed0-64bdb03a6b9f", + "showTitle": false, + "tableResultSettingsMap": {}, + "title": "" + } + }, + "source": [ + "**Important note:** Throughout this notebook, we indicate which cells you:\n", + "- ✅✏️ *should* customize - these cells contain code & config with business logic that you should edit to meet your requirements & tune quality\n", + "- 🚫✏️ *typically will not* customize - these cells contain boilerplate code required to execute the pipeline\n", + "\n", + "*Cells that don't require customization still need to be run! You CAN change these cells, but if this is the first time using this notebook, we suggest not doing so.*" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "application/vnd.databricks.v1+cell": { + "cellMetadata": {}, + "inputWidgets": {}, + "nuid": "16b35cfd-7c99-4419-8978-33939faf24a6", + "showTitle": false, + "tableResultSettingsMap": {}, + "title": "" + } + }, + "source": [ + "### Install Python libraries (Databricks Notebook only)\n", + "\n", + "🚫✏️ Only modify if you need additional packages in your code changes to the document parsing or chunking logic.\n", + "\n", + "Versions of Databricks code are not locked since Databricks ensures changes are backwards compatible.\n", + "Versions of open source packages are locked since package authors often make backwards compatible changes" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": { + "application/vnd.databricks.v1+cell": { + "cellMetadata": { + "byteLimit": 2048000, + "rowLimit": 10000 + }, + "inputWidgets": {}, + "nuid": "6b4eebb3-448a-4236-99fb-19e44858e3c6", + "showTitle": false, + "tableResultSettingsMap": {}, + "title": "" + } + }, + "outputs": [], + "source": [ + "# %pip install -qqqq -U -r requirements.txt\n", + "# %pip install -qqqq -U -r requirements_datapipeline.txt\n", + "# dbutils.library.restartPython()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Connect to Databricks (Local IDE only)\n", + "\n", + "If running from an IDE with [`databricks-connect`](https://docs.databricks.com/en/dev-tools/databricks-connect/python/index.html), connect to a Spark session & install the necessary packages on that cluster." + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": {}, + "outputs": [], + "source": [ + "from cookbook.databricks_utils import get_cluster_url\n", + "from cookbook.databricks_utils import get_active_cluster_id\n", + "from cookbook.databricks_utils.install_cluster_library import install_requirements\n", + "\n", + "# UNCOMMENT TO INSTALL PACKAGES ON THE ACTIVE CLUSTER; this is code that is not super battle tested.\n", + "# cluster_id = get_active_cluster_id()\n", + "# print(f\"Installing packages on the active cluster: {get_cluster_url(cluster_id)}\")\n", + "\n", + "\n", + "# install_requirements(cluster_id, \"requirements.txt\")\n", + "# install_requirements(cluster_id, \"requirements_datapipeline.txt\")\n", + "\n", + "# THIS MUST BE DONE MANUALLY! TODO: Automate it.\n", + "# - Go to openai_sdk_agent_app_sample_code/\n", + "# - Run `poetry build`\n", + "# - Copy the wheel file to a UC Volume or Workspace folder\n", + "# - Go to the cluster's Libraries page and install the wheel file as a new library\n", + "\n", + "# Get Spark session if using Databricks Connect from an IDE\n", + "from mlflow.utils import databricks_utils as du\n", + "\n", + "if not du.is_in_databricks_notebook():\n", + " from databricks.connect import DatabricksSession\n", + "\n", + " spark = DatabricksSession.builder.getOrCreate()" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "application/vnd.databricks.v1+cell": { + "cellMetadata": {}, + "inputWidgets": {}, + "nuid": "7a27dc10-44ae-4489-bc75-0d61c89b4268", + "showTitle": false, + "tableResultSettingsMap": {}, + "title": "" + } + }, + "source": [ + "\n", + "## 1️⃣ 📂 Data source & destination configuration" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "application/vnd.databricks.v1+cell": { + "cellMetadata": {}, + "inputWidgets": {}, + "nuid": "cf8bd6ab-827e-4ba6-805f-091349906ef6", + "showTitle": false, + "tableResultSettingsMap": {}, + "title": "" + } + }, + "source": [ + "#### ✅✏️ Configure the data pipeline's source location.\n", + "\n", + "Choose a [Unity Catalog Volume](https://docs.databricks.com/en/volumes/index.html) containing PDF, HTML, etc documents to be parsed/chunked/embedded.\n", + "\n", + "- `uc_catalog_name`: Name of the Unity Catalog.\n", + "- `uc_schema_name`: Name of the Unity Catalog schema.\n", + "- `uc_volume_name`: Name of the Unity Catalog volume.\n", + "\n", + "Running this cell with validate that the UC Volume exists, trying to create it if not.\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "application/vnd.databricks.v1+cell": { + "cellMetadata": {}, + "inputWidgets": {}, + "nuid": "59b3efc5-0591-4a44-b88d-184003cabfb6", + "showTitle": false, + "tableResultSettingsMap": {}, + "title": "" + } + }, + "outputs": [], + "source": [ + "from cookbook.config.data_pipeline.uc_volume_source import UCVolumeSourceConfig\n", + "\n", + "# Configure the UC Volume that contains the source documents\n", + "source_config = UCVolumeSourceConfig(\n", + " # uc_catalog_name=\"REPLACE_ME\", # REPLACE_ME\n", + " # uc_schema_name=\"REPLACE_ME\", # REPLACE_ME\n", + " # uc_volume_name=f\"REPLACE_ME\", # REPLACE_ME\n", + " uc_catalog_name=\"ep\", # REPLACE_ME\n", + " uc_schema_name=\"cookbook_local_test\", # REPLACE_ME\n", + " uc_volume_name=f\"product_docs\", # REPLACE_ME\n", + ")\n", + "\n", + "# Check if volume exists, create otherwise\n", + "is_valid, msg = source_config.create_or_validate_volume()\n", + "if not is_valid:\n", + " raise Exception(msg)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "application/vnd.databricks.v1+cell": { + "cellMetadata": {}, + "inputWidgets": {}, + "nuid": "083e203f-e468-4ce7-b645-31507a36c86b", + "showTitle": false, + "tableResultSettingsMap": {}, + "title": "" + } + }, + "source": [ + "#### ✅✏️ Configure the data pipeline's output location.\n", + " \n", + "Choose where the data pipeline outputs the parsed, chunked, and embedded documents.\n", + "\n", + "Required parameters:\n", + "* `uc_catalog_name`: Unity Catalog name where tables will be created\n", + "* `uc_schema_name`: Schema name within the catalog \n", + "* `base_table_name`: Core name used as prefix for all generated tables\n", + "* `vector_search_endpoint`: Vector Search endpoint to store the index\n", + "\n", + "Optional parameters:\n", + "* `docs_table_postfix`: Suffix for the parsed documents table (default: \"docs\")\n", + "* `chunked_table_postfix`: Suffix for the chunked documents table (default: \"docs_chunked\") \n", + "* `vector_index_postfix`: Suffix for the vector index (default: \"docs_chunked_index\")\n", + "* `version_suffix`: Version identifier (e.g. 'v1', 'test') to maintain multiple versions\n", + "\n", + "The generated tables follow this naming convention:\n", + "* Parsed docs: {uc_catalog_name}.{uc_schema_name}.{base_table_name}_{docs_table_postfix}__{version_suffix}\n", + "* Chunked docs: {uc_catalog_name}.{uc_schema_name}.{base_table_name}_{chunked_table_postfix}__{version_suffix}\n", + "* Vector index: {uc_catalog_name}.{uc_schema_name}.{base_table_name}_{vector_index_postfix}__{version_suffix}\n", + "\n", + "*Note: If you are comparing different chunking/parsing/embedding strategies, set the `version_suffix` parameter to maintain multiple versions of the pipeline output with the same base_table_name.*\n", + "\n", + "*Databricks suggests sharing a Vector Search endpoint across multiple agents.*" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from cookbook.config.data_pipeline.data_pipeline_output import DataPipelineOuputConfig\n", + "\n", + "# Output configuration\n", + "output_config = DataPipelineOuputConfig(\n", + " # Required parameters\n", + " uc_catalog_name=source_config.uc_catalog_name, # usually same as source volume catalog, by default is the same as the source volume catalog\n", + " uc_schema_name=source_config.uc_schema_name, # usually same as source volume schema, by default is the same as the source volume schema\n", + " #base_table_name=source_config.uc_volume_name, # usually similar / same as the source volume name; by default, is the same as the volume_name\n", + " base_table_name=\"test_product_docs\", # usually similar / same as the source volume name; by default, is the same as the volume_name\n", + " # vector_search_endpoint=\"REPLACE_ME\", # Vector Search endpoint to store the index\n", + " vector_search_endpoint=\"ericpeter_vector_search\", # Vector Search endpoint to store the index\n", + "\n", + " # Optional parameters, showing defaults\n", + " docs_table_postfix=\"docs\", # default value is `docs`\n", + " chunked_table_postfix=\"docs_chunked\", # default value is `docs_chunked`\n", + " vector_index_postfix=\"docs_chunked_index\", # default value is `docs_chunked_index`\n", + " version_suffix=\"v2\" # default is None\n", + "\n", + " # Output tables / indexes follow this naming convention:\n", + " # {uc_catalog_name}.{uc_schema_name}.{base_table_name}_{docs_table_postfix}__{version_suffix}\n", + " # {uc_catalog_name}.{uc_schema_name}.{base_table_name}_{chunked_table_postfix}__{version_suffix}\n", + " # {uc_catalog_name}.{uc_schema_name}.{base_table_name}_{vector_index_postfix}__{version_suffix}\n", + ")\n", + "\n", + "# Alternatively, you can directly pass in the UC locations of the tables / indexes\n", + "# output_config = DataPipelineOuputConfig(\n", + "# chunked_docs_table=\"catalog.schema.docs_chunked\",\n", + "# parsed_docs_table=\"catalog.schema.parsed_docs\",\n", + "# vector_index=\"catalog.schema.docs_chunked_index\",\n", + "# vector_search_endpoint=\"REPLACE_ME\",\n", + "# )\n", + "\n", + "# Check UC locations exist\n", + "is_valid, msg = output_config.validate_catalog_and_schema()\n", + "if not is_valid:\n", + " raise Exception(msg)\n", + "\n", + "# Check Vector Search endpoint exists\n", + "is_valid, msg = output_config.validate_vector_search_endpoint()\n", + "if not is_valid:\n", + " raise Exception(msg)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "application/vnd.databricks.v1+cell": { + "cellMetadata": {}, + "inputWidgets": {}, + "nuid": "b5b380e5-1d9a-4c93-b8fe-ec23f00442a9", + "showTitle": false, + "tableResultSettingsMap": {}, + "title": "" + } + }, + "source": [ + "#### ✅✏️ Configure chunk size and the embedding model.\n", + "\n", + "**Chunk size and overlap** control how a larger document is turned into smaller chunks that can be processed by an embedding model. See the AI Cookbook [chunking deep dive](https://ai-cookbook.io/nbs/3-deep-dive-data-pipeline.html#chunking) for more details.\n", + "\n", + "**The embedding model** is an AI model that is used to identify the most similar documents to a given user's query. See the AI Cookbook [embedding model deep dive](https://ai-cookbook.io/nbs/3-deep-dive-data-pipeline.html#embedding-model) for more details.\n", + "\n", + "This notebook supports the following [Foundational Models](https://docs.databricks.com/en/machine-learning/foundation-models/index.html) or [External Model](https://docs.databricks.com/en/generative-ai/external-models/index.html) of type `/llm/v1/embeddings`/. If you want to try another model, you will need to modify the `utils/get_recursive_character_text_splitter` Notebook to add support.\n", + "- `databricks-gte-large-en` or `databricks-bge-large-en`\n", + "- Azure OpenAI or OpenAI External Model of type `text-embedding-ada-002`, `text-embedding-3-small` or `text-embedding-3-large`" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "application/vnd.databricks.v1+cell": { + "cellMetadata": {}, + "inputWidgets": {}, + "nuid": "06ee684b-c7bd-4c0e-8fd8-f54416948a5a", + "showTitle": false, + "tableResultSettingsMap": {}, + "title": "" + } + }, + "outputs": [], + "source": [ + "from cookbook.config.data_pipeline.recursive_text_splitter import RecursiveTextSplitterChunkingConfig\n", + "\n", + "chunking_config = RecursiveTextSplitterChunkingConfig(\n", + " embedding_model_endpoint=\"databricks-gte-large-en\", # A Model Serving endpoint supporting the /llm/v1/embeddings task\n", + " chunk_size_tokens=1024,\n", + " chunk_overlap_tokens=256,\n", + ")\n", + "\n", + "# Validate the embedding endpoint & chunking config\n", + "is_valid, msg = chunking_config.validate_embedding_endpoint()\n", + "if not is_valid:\n", + " raise Exception(msg)\n", + "\n", + "is_valid, msg = chunking_config.validate_chunk_size_and_overlap()\n", + "if not is_valid:\n", + " raise Exception(msg)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "#### 🚫✏️ Write the data pipeline configuration to a YAML\n", + "\n", + "This allows the configuration to be loaded referenced by the Agent's notebook." + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "metadata": {}, + "outputs": [], + "source": [ + "from cookbook.config.data_pipeline import DataPipelineConfig\n", + "from cookbook.config import serializable_config_to_yaml_file\n", + "\n", + "data_pipeline_config = DataPipelineConfig(\n", + " source=source_config,\n", + " output=output_config,\n", + " chunking_config=chunking_config,\n", + ")\n", + "\n", + "serializable_config_to_yaml_file(data_pipeline_config, \"./configs/data_pipeline_config.yaml\")" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "application/vnd.databricks.v1+cell": { + "cellMetadata": {}, + "inputWidgets": {}, + "nuid": "a28cbf99-c4ca-4adc-905a-e7ebfe015730", + "showTitle": false, + "tableResultSettingsMap": {}, + "title": "" + } + }, + "source": [ + "#### 🛑 If you are running your initial data pipeline, you do not need to configure anything else, you can just `Run All` the notebook cells before. You can modify these cells later to tune the quality of your data pipeline by changing the parsing logic." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "application/vnd.databricks.v1+cell": { + "cellMetadata": {}, + "inputWidgets": {}, + "nuid": "95b6971b-b00b-4f42-bbe8-cc64eea2fff8", + "showTitle": false, + "tableResultSettingsMap": {}, + "title": "" + } + }, + "source": [ + "## 3️⃣ ⌨️ Data pipeline code\n", + "\n", + "The code below executes the data pipeline. You can modify the below code as indicated to implement different parsing or chunking strategies or to extract additional metadata fields" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "application/vnd.databricks.v1+cell": { + "cellMetadata": {}, + "inputWidgets": {}, + "nuid": "c85ddc92-10c5-405c-ae78-8ded5462333e", + "showTitle": false, + "tableResultSettingsMap": {}, + "title": "" + } + }, + "source": [ + "#### Pipeline step 1: Load & parse documents into a Delta Table\n", + "\n", + "In this step, we'll load files from the UC Volume defined in `source_config` into the Delta Table `storage_config.parsed_docs_table` . The contents of each file will become a separate row in our delta table.\n", + "\n", + "The path to the source document will be used as the `doc_uri` which is displayed to your end users in the Agent Evalution web application.\n", + "\n", + "After you test your POC with stakeholders, you can return here to change the parsing logic or extraction additional metadata about the documents to help improve the quality of your retriever." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "application/vnd.databricks.v1+cell": { + "cellMetadata": {}, + "inputWidgets": {}, + "nuid": "27466460-1ee7-4fe4-8faf-da9ddff11847", + "showTitle": false, + "tableResultSettingsMap": {}, + "title": "" + } + }, + "source": [ + "##### ✅✏️ Customize the parsing function\n", + "\n", + "This default implementation parses PDF, HTML, and DOCX files using open source libraries. Adjust `file_parser(...)` and `ParserReturnValue` in `cookbook/data_pipeline/default_parser.py` to add change the parsing logic, add support for more file types, or extract additional metadata about each document." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "application/vnd.databricks.v1+cell": { + "cellMetadata": {}, + "inputWidgets": {}, + "nuid": "d09fd38c-5b7b-47c5-aa6a-ff571ce2f83b", + "showTitle": false, + "tableResultSettingsMap": {}, + "title": "" + } + }, + "outputs": [], + "source": [ + "from cookbook.data_pipeline.default_parser import file_parser, ParserReturnValue\n", + "\n", + "# Print the code of file_parser function for inspection\n", + "import inspect\n", + "print(inspect.getsource(ParserReturnValue))\n", + "print(inspect.getsource(file_parser))\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "application/vnd.databricks.v1+cell": { + "cellMetadata": {}, + "inputWidgets": {}, + "nuid": "61034803-4bdd-4f0b-b173-a82448ee1790", + "showTitle": false, + "tableResultSettingsMap": {}, + "title": "" + } + }, + "source": [ + "The below cell is debugging code to test your parsing function on a single record. " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "application/vnd.databricks.v1+cell": { + "cellMetadata": {}, + "inputWidgets": {}, + "nuid": "48a3ab67-2e30-4e39-b05e-3a8ff304fd5b", + "showTitle": false, + "tableResultSettingsMap": {}, + "title": "" + } + }, + "outputs": [], + "source": [ + "from cookbook.data_pipeline.parse_docs import load_files_to_df\n", + "from pyspark.sql import functions as F\n", + "\n", + "\n", + "raw_files_df = load_files_to_df(\n", + " spark=spark,\n", + " source_path=source_config.volume_path,\n", + ")\n", + "\n", + "print(f\"Loaded {raw_files_df.count()} files from {source_config.volume_path}. Files: {source_config.list_files()}\")\n", + "\n", + "test_records_dict = raw_files_df.toPandas().to_dict(orient=\"records\")\n", + "\n", + "for record in test_records_dict:\n", + " print()\n", + " print(\"Testing parsing for file: \", record[\"path\"])\n", + " print()\n", + " test_result = file_parser(raw_doc_contents_bytes=record['content'], doc_path=record['path'], modification_time=record['modificationTime'], doc_bytes_length=record['length'])\n", + " print(test_result)\n", + " break # pause after 1 file. if you want to test more files, remove the break statement\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "application/vnd.databricks.v1+cell": { + "cellMetadata": {}, + "inputWidgets": {}, + "nuid": "9fb6db6c-faa0-4dac-be84-a832bbbb49b9", + "showTitle": false, + "tableResultSettingsMap": {}, + "title": "" + } + }, + "source": [ + "🚫✏️ The below cell is boilerplate code to apply the parsing function using Spark." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "application/vnd.databricks.v1+cell": { + "cellMetadata": {}, + "inputWidgets": {}, + "nuid": "165706b2-5824-42e7-a22b-3ca0edfd0a77", + "showTitle": false, + "tableResultSettingsMap": {}, + "title": "" + } + }, + "outputs": [], + "source": [ + "from cookbook.data_pipeline.parse_docs import (\n", + " load_files_to_df,\n", + " apply_parsing_fn,\n", + " check_parsed_df_for_errors,\n", + " check_parsed_df_for_empty_parsed_files\n", + ")\n", + "from cookbook.data_pipeline.utils.typed_dicts_to_spark_schema import typed_dicts_to_spark_schema\n", + "from cookbook.databricks_utils import get_table_url\n", + "\n", + "# Tune this parameter to optimize performance. More partitions will improve performance, but may cause out of memory errors if your cluster is too small.\n", + "NUM_PARTITIONS = 50\n", + "\n", + "# Load the UC Volume files into a Spark DataFrame\n", + "raw_files_df = load_files_to_df(\n", + " spark=spark,\n", + " source_path=source_config.volume_path,\n", + ").repartition(NUM_PARTITIONS)\n", + "\n", + "# Apply the parsing UDF to the Spark DataFrame\n", + "parsed_files_df = apply_parsing_fn(\n", + " raw_files_df=raw_files_df,\n", + " # Modify this function to change the parser, extract additional metadata, etc\n", + " parse_file_fn=file_parser,\n", + " # The schema of the resulting Delta Table will follow the schema defined in ParserReturnValue\n", + " parsed_df_schema=typed_dicts_to_spark_schema(ParserReturnValue),\n", + ")\n", + "\n", + "# Write to a Delta Table\n", + "parsed_files_df.write.mode(\"overwrite\").option(\"overwriteSchema\", \"true\").saveAsTable(\n", + " output_config.parsed_docs_table\n", + ")\n", + "\n", + "# Get resulting table\n", + "parsed_files_df = spark.table(output_config.parsed_docs_table)\n", + "parsed_files_no_errors_df = parsed_files_df.filter(\n", + " parsed_files_df.parser_status == \"SUCCESS\"\n", + ")\n", + "\n", + "# Show successfully parsed documents\n", + "print(f\"Parsed {parsed_files_df.count()} / {parsed_files_no_errors_df.count()} documents successfully. Inspect `parsed_files_no_errors_df` or visit {get_table_url(output_config.parsed_docs_table)} to see all parsed documents, including any errors.\")\n", + "display(parsed_files_no_errors_df.toPandas())" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Show any parsing failures or successfully parsed files that resulted in an empty document." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "\n", + "# Any documents that failed to parse\n", + "is_error, msg, failed_docs_df = check_parsed_df_for_errors(parsed_files_df)\n", + "if is_error:\n", + " display(failed_docs_df.toPandas())\n", + " raise Exception(msg)\n", + " \n", + "# Any documents that returned empty parsing results\n", + "is_error, msg, empty_docs_df = check_parsed_df_for_empty_parsed_files(parsed_files_df)\n", + "if is_error:\n", + " display(empty_docs_df.toPandas())\n", + " raise Exception(msg)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "application/vnd.databricks.v1+cell": { + "cellMetadata": {}, + "inputWidgets": {}, + "nuid": "e21c84e8-7682-4a7a-86fc-7f4f990bb490", + "showTitle": false, + "tableResultSettingsMap": {}, + "title": "" + } + }, + "source": [ + "#### Pipeline step 2: Compute chunks of documents\n", + "\n", + "In this step, we will split our documents into smaller chunks so they can be indexed in our vector database." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "application/vnd.databricks.v1+cell": { + "cellMetadata": {}, + "inputWidgets": {}, + "nuid": "eecd460c-f287-47ce-98f1-cea78a1f3f64", + "showTitle": false, + "tableResultSettingsMap": {}, + "title": "" + } + }, + "source": [ + "\n", + "##### ✅✏️ Chunking logic.\n", + "\n", + "We provide a default implementation of a recursive text splitter. To create your own chunking logic, adapt the `get_recursive_character_text_splitter()` function inside `cookbook.data_pipeline.recursive_character_text_splitter.py`." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "application/vnd.databricks.v1+cell": { + "cellMetadata": {}, + "inputWidgets": {}, + "nuid": "02c40228-f933-4af8-9121-ed2efa0985dd", + "showTitle": false, + "tableResultSettingsMap": {}, + "title": "" + } + }, + "outputs": [], + "source": [ + "from cookbook.data_pipeline.recursive_character_text_splitter import (\n", + " get_recursive_character_text_splitter,\n", + ")\n", + "\n", + "# Get the chunking function\n", + "recursive_character_text_splitter_fn = get_recursive_character_text_splitter(\n", + " model_serving_endpoint=chunking_config.embedding_model_endpoint,\n", + " chunk_size_tokens=chunking_config.chunk_size_tokens,\n", + " chunk_overlap_tokens=chunking_config.chunk_overlap_tokens,\n", + ")\n", + "\n", + "# Determine which columns to propagate from the docs table to the chunks table.\n", + "\n", + "# Get the columns from the parser except for the content\n", + "# You can modify this to adjust which fields are propagated from the docs table to the chunks table.\n", + "propagate_columns = [\n", + " field.name\n", + " for field in typed_dicts_to_spark_schema(ParserReturnValue).fields\n", + " if field.name != \"content\"\n", + "]\n", + "\n", + "# If you want to implement retrieval strategies such as presenting the entire document vs. the chunk to the LLM, include `contentich contains the doc's full parsed text. By default this is not included because the size of contcontentquite large and cause performance issues.\n", + "# propagate_columns = [\n", + "# field.name\n", + "# for field in typed_dicts_to_spark_schema(ParserReturnValue).fields\n", + "# ]" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "application/vnd.databricks.v1+cell": { + "cellMetadata": {}, + "inputWidgets": {}, + "nuid": "b17add2c-e7f0-4903-8ae9-40ca0633a8d5", + "showTitle": false, + "tableResultSettingsMap": {}, + "title": "" + } + }, + "source": [ + "🚫✏️ Run the chunking function within Spark" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "application/vnd.databricks.v1+cell": { + "cellMetadata": {}, + "inputWidgets": {}, + "nuid": "0dfa90f8-c4dc-4485-8fa8-dcd4c7d40618", + "showTitle": false, + "tableResultSettingsMap": {}, + "title": "" + } + }, + "outputs": [], + "source": [ + "from cookbook.data_pipeline.chunk_docs import apply_chunking_fn\n", + "from cookbook.databricks_utils import get_table_url\n", + "\n", + "# Tune this parameter to optimize performance. More partitions will improve performance, but may cause out of memory errors if your cluster is too small.\n", + "NUM_PARTITIONS = 50\n", + "\n", + "# Load parsed docs\n", + "parsed_files_df = spark.table(output_config.parsed_docs_table).repartition(NUM_PARTITIONS)\n", + "\n", + "chunked_docs_df = chunked_docs_table = apply_chunking_fn(\n", + " # The source documents table.\n", + " parsed_docs_df=parsed_files_df,\n", + " # The chunking function that takes a string (document) and returns a list of strings (chunks).\n", + " chunking_fn=recursive_character_text_splitter_fn,\n", + " # Choose which columns to propagate from the docs table to chunks table. `doc_uri` column is required we can propagate the original document URL to the Agent's web app.\n", + " propagate_columns=propagate_columns,\n", + ")\n", + "\n", + "# Write to Delta Table\n", + "chunked_docs_df.write.mode(\"overwrite\").option(\n", + " \"overwriteSchema\", \"true\"\n", + ").saveAsTable(output_config.chunked_docs_table)\n", + "\n", + "# Get resulting table\n", + "chunked_docs_df = spark.table(output_config.chunked_docs_table)\n", + "\n", + "# Show number of chunks created\n", + "print(f\"Created {chunked_docs_df.count()} chunks. Inspect `chunked_docs_df` or visit {get_table_url(output_config.chunked_docs_table)} to see the results.\")\n", + "\n", + "# enable CDC feed for VS index sync\n", + "cdc_results = spark.sql(f\"ALTER TABLE {output_config.chunked_docs_table} SET TBLPROPERTIES (delta.enableChangeDataFeed = true)\")\n", + "\n", + "# Show chunks\n", + "display(chunked_docs_df.toPandas())" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "application/vnd.databricks.v1+cell": { + "cellMetadata": {}, + "inputWidgets": {}, + "nuid": "9fe923a8-89c2-4852-9cea-98074b3ce404", + "showTitle": false, + "tableResultSettingsMap": {}, + "title": "" + } + }, + "source": [ + "#### 🚫✏️ Pipeline step 3: Create the vector index\n", + "\n", + "In this step, we'll embed the documents to compute the vector index over the chunks and create our retriever index that will be used to query relevant documents to the user question. The embedding pipeline is handled within Databricks Vector Search using [Delta Sync](https://docs.databricks.com/en/generative-ai/create-query-vector-search.html#create-a-vector-search-index)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "application/vnd.databricks.v1+cell": { + "cellMetadata": {}, + "inputWidgets": {}, + "nuid": "d53faa42-2a65-40b0-8fc1-6c27e88df6d0", + "showTitle": false, + "tableResultSettingsMap": {}, + "title": "" + } + }, + "outputs": [], + "source": [ + "from cookbook.data_pipeline.build_retriever_index import build_retriever_index\n", + "from cookbook.databricks_utils import get_table_url\n", + "\n", + "is_error, msg = retriever_index_result = build_retriever_index(\n", + " # Spark requires `` to escape names with special chars, VS client does not.\n", + " chunked_docs_table_name=output_config.chunked_docs_table.replace(\"`\", \"\"),\n", + " vector_search_endpoint=output_config.vector_search_endpoint,\n", + " vector_search_index_name=output_config.vector_index,\n", + "\n", + " # Must match the embedding endpoint you used to chunk your documents\n", + " embedding_endpoint_name=chunking_config.embedding_model_endpoint,\n", + "\n", + " # Set to true to re-create the vector search endpoint when re-running the data pipeline. If set to True, syncing will not work if re-run the pipeline and change the schema of chunked_docs_table_name. Keeping this as False will allow Vector Search to avoid recomputing embeddings for any row with that has a chunk_id that was previously computed.\n", + " force_delete_index_before_create=False,\n", + ")\n", + "if is_error:\n", + " raise Exception(msg)\n", + "else:\n", + " print(\"NOTE: This cell will complete before the vector index has finished syncing/embedding your chunks & is ready for queries!\")\n", + " print(f\"View sync status here: {get_table_url(output_config.vector_index)}\")\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "application/vnd.databricks.v1+cell": { + "cellMetadata": {}, + "inputWidgets": {}, + "nuid": "1a1ad14b-2573-4485-8369-d417f7a548f6", + "showTitle": false, + "tableResultSettingsMap": {}, + "title": "" + } + }, + "source": [ + "#### 🚫✏️ Print links to view the resulting tables/index" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "application/vnd.databricks.v1+cell": { + "cellMetadata": {}, + "inputWidgets": {}, + "nuid": "0cd40431-4cd3-4cc9-b38d-5ab817c40043", + "showTitle": false, + "tableResultSettingsMap": {}, + "title": "" + } + }, + "outputs": [], + "source": [ + "from cookbook.databricks_utils import get_table_url\n", + "\n", + "print()\n", + "print(f\"Parsed docs table: {get_table_url(output_config.parsed_docs_table)}\\n\")\n", + "print(f\"Chunked docs table: {get_table_url(output_config.chunked_docs_table)}\\n\")\n", + "print(f\"Vector search index: {get_table_url(output_config.vector_index)}\\n\")" + ] + } + ], + "metadata": { + "application/vnd.databricks.v1+notebook": { + "dashboards": [], + "environmentMetadata": null, + "language": "python", + "notebookMetadata": { + "pythonIndentUnit": 2 + }, + "notebookName": "01_data_pipeline", + "widgets": {} + }, + "kernelspec": { + "display_name": "genai-cookbook-T2SdtsNM-py3.11", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.11.10" + } + }, + "nbformat": 4, + "nbformat_minor": 0 +} diff --git a/openai_sdk_agent_app_sample_code/02_agent_setup.ipynb b/openai_sdk_agent_app_sample_code/02_agent_setup.ipynb new file mode 100644 index 0000000..2298851 --- /dev/null +++ b/openai_sdk_agent_app_sample_code/02_agent_setup.ipynb @@ -0,0 +1,282 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": { + "application/vnd.databricks.v1+cell": { + "cellMetadata": {}, + "inputWidgets": {}, + "nuid": "d0640741-6d84-482a-aa79-f87b04d04023", + "showTitle": false, + "tableResultSettingsMap": {}, + "title": "" + } + }, + "source": [ + "## 👉 START HERE: How to use this notebook\n", + "\n", + "### Step 1: Agent storage configuration\n", + "\n", + "This notebook initializes a `AgentStorageConfig` Pydantic class to define the locations where the Agent's code/config and its supporting data & metadata is stored in the Unity Catalog:\n", + "- **Unity Catalog Model:** Stores staging/production versions of the Agent's code/config\n", + "- **MLflow Experiment:** Stores every development version of the Agent's code/config, each version's associated quality/cost/latency evaluation results, and any MLflow Traces from your development & evaluation processes\n", + "- **Evaluation Set Delta Table:** Stores the Agent's evaluation set\n", + "\n", + "This notebook does the following:\n", + "1. Validates the provided locations exist.\n", + "2. Serializes this configuration to `config/agent_storage_config.yaml` so other notebooks can use it" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "application/vnd.databricks.v1+cell": { + "cellMetadata": {}, + "inputWidgets": {}, + "nuid": "7702011a-84dd-4281-bba1-ea9e2b5e551d", + "showTitle": false, + "tableResultSettingsMap": {}, + "title": "" + } + }, + "source": [ + "**Important note:** Throughout this notebook, we indicate which cells you:\n", + "- ✅✏️ *should* customize - these cells contain config settings to change\n", + "- 🚫✏️ *typically will not* customize - these cells contain boilerplate code required to validate / save the configuration\n", + "\n", + "*Cells that don't require customization still need to be run!*" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "application/vnd.databricks.v1+cell": { + "cellMetadata": {}, + "inputWidgets": {}, + "nuid": "f8963d6e-3123-4095-bb92-9d508c52ed41", + "showTitle": false, + "tableResultSettingsMap": {}, + "title": "" + } + }, + "source": [ + "### 🚫✏️ Install Python libraries" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "application/vnd.databricks.v1+cell": { + "cellMetadata": {}, + "inputWidgets": {}, + "nuid": "0a145c3b-d3d9-4b95-b7f6-22e1d8e991c6", + "showTitle": false, + "tableResultSettingsMap": {}, + "title": "" + } + }, + "outputs": [], + "source": [ + "# %pip install -qqqq -U -r requirements.txt\n", + "# %restart_python" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### 🚫✏️ Connect to Databricks\n", + "\n", + "If running locally in an IDE using Databricks Connect, connect the Spark client & configure MLflow to use Databricks Managed MLflow. If this running in a Databricks Notebook, these values are already set." + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [], + "source": [ + "from mlflow.utils import databricks_utils as du\n", + "\n", + "if not du.is_in_databricks_notebook():\n", + " from databricks.connect import DatabricksSession\n", + " import os\n", + "\n", + " spark = DatabricksSession.builder.getOrCreate()\n", + " os.environ[\"MLFLOW_TRACKING_URI\"] = \"databricks\"" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "application/vnd.databricks.v1+cell": { + "cellMetadata": {}, + "inputWidgets": {}, + "nuid": "a9feb28c-c72b-49b2-bbc4-a9bd4721a7cd", + "showTitle": false, + "tableResultSettingsMap": {}, + "title": "" + } + }, + "source": [ + "### 🚫✏️ Get current user info to set default values" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "application/vnd.databricks.v1+cell": { + "cellMetadata": {}, + "inputWidgets": {}, + "nuid": "7824cc0a-1b29-4cf9-a974-2c5ef885979f", + "showTitle": false, + "tableResultSettingsMap": {}, + "title": "" + } + }, + "outputs": [], + "source": [ + "from cookbook.databricks_utils import get_current_user_info\n", + "\n", + "user_email, user_name, default_catalog = get_current_user_info(spark)\n", + "\n", + "print(f\"User email: {user_email}\")\n", + "print(f\"User name: {user_name}\")\n", + "print(f\"Default UC catalog: {default_catalog}\")" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "application/vnd.databricks.v1+cell": { + "cellMetadata": {}, + "inputWidgets": {}, + "nuid": "4b684188-d4eb-4944-86ae-9942a68308c2", + "showTitle": false, + "tableResultSettingsMap": {}, + "title": "" + } + }, + "source": [ + "### ✅✏️ Configure your Agent's storage locations\n", + "\n", + "Either review & accept the default values or enter your preferred location." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "application/vnd.databricks.v1+cell": { + "cellMetadata": {}, + "inputWidgets": {}, + "nuid": "64682c1f-7e61-430e-84c9-4fb9cad8152b", + "showTitle": false, + "tableResultSettingsMap": {}, + "title": "" + } + }, + "outputs": [], + "source": [ + "from cookbook.config.shared.agent_storage_location import AgentStorageConfig\n", + "from cookbook.databricks_utils import get_mlflow_experiment_url\n", + "import mlflow\n", + "\n", + "# Default values below for `AgentStorageConfig` \n", + "agent_name = \"my_agent_2\"\n", + "uc_catalog_name = f\"{default_catalog}\"\n", + "uc_schema_name = f\"{user_name}_agents\"\n", + "uc_catalog_name = f\"ep\"\n", + "uc_schema_name = f\"cookbook_local_test\"\n", + "\n", + "# Agent storage configuration\n", + "agent_storage_config = AgentStorageConfig(\n", + " uc_model_name=f\"{uc_catalog_name}.{uc_schema_name}.{agent_name}\", # UC model to store staging/production versions of the Agent's code/config\n", + " evaluation_set_uc_table=f\"{uc_catalog_name}.{uc_schema_name}.{agent_name}_eval_set\", # UC table to store the evaluation set\n", + " mlflow_experiment_name=f\"/Users/{user_email}/{agent_name}_mlflow_experiment\", # MLflow Experiment to store development versions of the Agent and their associated quality/cost/latency evaluation results + MLflow Traces\n", + ")\n", + "\n", + "# Validate the UC catalog and schema for the Agent'smodel & evaluation table\n", + "is_valid, msg = agent_storage_config.validate_catalog_and_schema()\n", + "if not is_valid:\n", + " raise Exception(msg)\n", + "\n", + "# Set the MLflow experiment, validating the path is valid\n", + "experiment_info = mlflow.set_experiment(agent_storage_config.mlflow_experiment_name)\n", + "# If running in a local IDE, set the MLflow experiment name as an environment variable\n", + "os.environ[\"MLFLOW_EXPERIMENT_NAME\"] = agent_storage_config.mlflow_experiment_name\n", + "\n", + "print(f\"View the MLflow Experiment `{agent_storage_config.mlflow_experiment_name}` at {get_mlflow_experiment_url(experiment_info.experiment_id)}\")" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "application/vnd.databricks.v1+cell": { + "cellMetadata": {}, + "inputWidgets": {}, + "nuid": "7a49117d-f136-41fa-807d-8be60b863fa9", + "showTitle": false, + "tableResultSettingsMap": {}, + "title": "" + } + }, + "source": [ + "### 🚫✏️ Save the configuration for use by other notebooks" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": { + "application/vnd.databricks.v1+cell": { + "cellMetadata": {}, + "inputWidgets": {}, + "nuid": "6dd99015-5b0d-420b-8a3e-067d84b84dc7", + "showTitle": false, + "tableResultSettingsMap": {}, + "title": "" + } + }, + "outputs": [], + "source": [ + "from cookbook.config import serializable_config_to_yaml_file\n", + "\n", + "serializable_config_to_yaml_file(agent_storage_config, \"./configs/agent_storage_config.yaml\")" + ] + } + ], + "metadata": { + "application/vnd.databricks.v1+notebook": { + "dashboards": [], + "environmentMetadata": null, + "language": "python", + "notebookMetadata": { + "pythonIndentUnit": 2 + }, + "notebookName": "00_shared_config", + "widgets": {} + }, + "kernelspec": { + "display_name": "genai-cookbook-T2SdtsNM-py3.11", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.11.10" + } + }, + "nbformat": 4, + "nbformat_minor": 0 +} diff --git a/openai_sdk_agent_app_sample_code/03_create_synthetic_eval.ipynb b/openai_sdk_agent_app_sample_code/03_create_synthetic_eval.ipynb new file mode 100644 index 0000000..afbad77 --- /dev/null +++ b/openai_sdk_agent_app_sample_code/03_create_synthetic_eval.ipynb @@ -0,0 +1,247 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## 👉 START HERE: How to use this notebook\n", + "\n", + "### Step 1: Create synthetic evaluation data\n", + "\n", + "To measure your Agent's quality, you need a diverse, representative evaluation set. This notebook turns your unstructured documents into a high-quality synthetic evaluation set so that you can start to evaluate and improve your Agent's quality before subject matter experts are available to label data.\n", + "\n", + "This notebook does the following:\n", + "1. \n", + "\n", + "THIS DOES NOT WORK FROM LOCAL IDE YET." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "**Important note:** Throughout this notebook, we indicate which cells you:\n", + "- ✅✏️ *should* customize - these cells contain config settings to change\n", + "- 🚫✏️ *typically will not* customize - these cells contain code that is parameterized by your configuration.\n", + "\n", + "*Cells that don't require customization still need to be run!*" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### 🚫✏️ Install Python libraries" + ] + }, + { + "cell_type": "code", + "execution_count": 0, + "metadata": { + "application/vnd.databricks.v1+cell": { + "cellMetadata": { + "byteLimit": 2048000, + "rowLimit": 10000 + }, + "inputWidgets": {}, + "nuid": "cbcdef70-657e-4f12-b564-90d0f5b74e42", + "showTitle": false, + "tableResultSettingsMap": {}, + "title": "" + } + }, + "outputs": [], + "source": [ + "# %pip install -qqqq -U -r requirements.txt\n", + "# dbutils.library.restartPython()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### 🚫✏️ Connect to Databricks\n", + "\n", + "If running locally in an IDE using Databricks Connect, connect the Spark client & configure MLflow to use Databricks Managed MLflow. If this running in a Databricks Notebook, these values are already set." + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [], + "source": [ + "from mlflow.utils import databricks_utils as du\n", + "\n", + "if not du.is_in_databricks_notebook():\n", + " from databricks.connect import DatabricksSession\n", + " import os\n", + "\n", + " spark = DatabricksSession.builder.getOrCreate()\n", + " os.environ[\"MLFLOW_TRACKING_URI\"] = \"databricks\"" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### 🚫✏️ Load the Agent's storage locations\n", + "\n", + "This notebook writes to the evaluation set table that you specified in the [Agent setup](02_agent_setup.ipynb) notebook." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from cookbook.config.shared.agent_storage_location import AgentStorageConfig\n", + "from cookbook.databricks_utils import get_table_url\n", + "from cookbook.config import load_serializable_config_from_yaml_file\n", + "\n", + "# Load the Agent's storage configuration\n", + "agent_storage_config: AgentStorageConfig = load_serializable_config_from_yaml_file('./configs/agent_storage_config.yaml')\n", + "\n", + "# Check if the evaluation set already exists\n", + "try:\n", + " eval_dataset = spark.table(agent_storage_config.evaluation_set_uc_table)\n", + " if eval_dataset.count() > 0:\n", + " print(f\"Evaluation set {get_table_url(agent_storage_config.evaluation_set_uc_table)} already exists! By default, this notebook will append to the evaluation dataset. If you would like to overwrite the existing evaluation set, please delete the table before running this notebook.\")\n", + " else:\n", + " print(f\"Evaluation set {get_table_url(agent_storage_config.evaluation_set_uc_table)} exists, but is empty! By default, this notebook will NOT change the schema of this table - if you experience schema related errors, drop this table before running this notebook so it can be recreated with the correct schema.\")\n", + "except Exception:\n", + " print(f\"Evaluation set `{agent_storage_config.evaluation_set_uc_table}` does not exist. This notebook will create a new Delta Table at this location.\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "#### ✅✏️ Load the source documents for synthetic evaluation data generation\n", + "\n", + "Most often, this will be the same as the document output table from the [data pipeline](01_data_pipeline.ipynb).\n", + "\n", + "Here, we provide code to load the documents table that was created in the [data pipeline](01_data_pipeline.ipynb).\n", + "\n", + "Alternatively, this can be a Spark DataFrame, Pandas DataFrame, or list of dictionaries with the following keys/columns:\n", + "- `doc_uri`: A URI pointing to the document.\n", + "- `content`: The content of the document." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from cookbook.config.data_pipeline import DataPipelineConfig\n", + "from cookbook.config import load_serializable_config_from_yaml_file\n", + "\n", + "datapipeline_config: DataPipelineConfig= load_serializable_config_from_yaml_file('./configs/data_pipeline_config.yaml')\n", + "\n", + "source_documents = spark.table(datapipeline_config.output.parsed_docs_table)\n", + "\n", + "display(source_documents.toPandas())" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "#### ✅✏️ Run the synthetic evaluation data generation\n", + "\n", + "Optionally, you can customize the guidelines to guide the synthetic data generation. By default, guidelines are not applied - to apply the guidelines, uncomment `guidelines=guidelines` in the `generate_evals_df(...)` call. See our [documentation](https://docs.databricks.com/en/generative-ai/agent-evaluation/synthesize-evaluation-set.html) for more details." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "application/vnd.databricks.v1+cell": { + "cellMetadata": { + "byteLimit": 2048000, + "rowLimit": 10000 + }, + "inputWidgets": {}, + "nuid": "a7cb950a-84b1-4e1d-a7fb-5179a0aa69de", + "showTitle": false, + "tableResultSettingsMap": {}, + "title": "" + } + }, + "outputs": [], + "source": [ + "from databricks.agents.eval import generate_evals_df\n", + "\n", + "# NOTE: The guidelines you provide are a free-form string. The markdown string below is the suggested formatting for the set of guidelines, however you are free\n", + "# to add your sections here. Note that this will be prompt-engineering an LLM that generates the synthetic data, so you may have to iterate on these guidelines before\n", + "# you get the results you desire.\n", + "guidelines = \"\"\"\n", + "# Task Description\n", + "The Agent is a RAG chatbot that answers questions about using Spark on Databricks. The Agent has access to a corpus of Databricks documents, and its task is to answer the user's questions by retrieving the relevant docs from the corpus and synthesizing a helpful, accurate response. The corpus covers a lot of info, but the Agent is specifically designed to interact with Databricks users who have questions about Spark. So questions outside of this scope are considered irrelevant.\n", + "\n", + "# User personas\n", + "- A developer who is new to the Databricks platform\n", + "- An experienced, highly technical Data Scientist or Data Engineer\n", + "\n", + "# Example questions\n", + "- what API lets me parallelize operations over rows of a delta table?\n", + "- Which cluster settings will give me the best performance when using Spark?\n", + "\n", + "# Additional Guidelines\n", + "- Questions should be succinct, and human-like\n", + "\"\"\"\n", + "\n", + "synthesized_evals_df = generate_evals_df(\n", + " docs=source_documents,\n", + " # The number of evaluations to generate for each doc.\n", + " num_questions_per_doc=2,\n", + " # A optional set of guidelines that help guide the synthetic generation. This is a free-form string that will be used to prompt the generation.\n", + " # guidelines=guidelines\n", + ")\n", + "\n", + "# Write the synthetic evaluation data to the evaluation set table\n", + "spark.createDataFrame(synthesized_evals_df).write.format(\"delta\").mode(\"append\").saveAsTable(agent_storage_config.evaluation_set_uc_table)\n", + "\n", + "# Display the synthetic evaluation data\n", + "eval_set_df = spark.table(agent_storage_config.evaluation_set_uc_table).show()\n", + "display(eval_set_df.toPandas())" + ] + } + ], + "metadata": { + "application/vnd.databricks.v1+notebook": { + "dashboards": [], + "environmentMetadata": { + "base_environment": "", + "client": "1" + }, + "language": "python", + "notebookMetadata": { + "pythonIndentUnit": 4 + }, + "notebookName": "02_create_synthetic_eval", + "widgets": {} + }, + "kernelspec": { + "display_name": "genai-cookbook-T2SdtsNM-py3.11", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.11.10" + } + }, + "nbformat": 4, + "nbformat_minor": 0 +} diff --git a/openai_sdk_agent_app_sample_code/04_create_tools.ipynb b/openai_sdk_agent_app_sample_code/04_create_tools.ipynb new file mode 100644 index 0000000..0054d05 --- /dev/null +++ b/openai_sdk_agent_app_sample_code/04_create_tools.ipynb @@ -0,0 +1,898 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": { + "application/vnd.databricks.v1+cell": { + "cellMetadata": {}, + "inputWidgets": {}, + "nuid": "31661828-f9bb-4fc2-a1bd-94424a27ed52", + "showTitle": false, + "tableResultSettingsMap": {}, + "title": "" + } + }, + "source": [ + "## 👉 START HERE: How to use this notebook\n", + "\n", + "# Step 2: Create tools for your Agent\n", + "\n", + "" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "application/vnd.databricks.v1+cell": { + "cellMetadata": {}, + "inputWidgets": {}, + "nuid": "5d9f685a-fdb7-49a4-9e3a-a4a9e964d045", + "showTitle": false, + "tableResultSettingsMap": {}, + "title": "" + } + }, + "source": [ + "\n", + "**Important note:** Throughout this notebook, we indicate which cell's code you:\n", + "- ✅✏️ should customize - these cells contain code & config with business logic that you should edit to meet your requirements & tune quality.\n", + "- 🚫✏️ should not customize - these cells contain boilerplate code required to load/save/execute your Agent\n", + "\n", + "*Cells that don't require customization still need to be run! You CAN change these cells, but if this is the first time using this notebook, we suggest not doing so.*" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "application/vnd.databricks.v1+cell": { + "cellMetadata": {}, + "inputWidgets": {}, + "nuid": "bb4f8cc0-1797-4beb-a9f2-df21a9db79f0", + "showTitle": false, + "tableResultSettingsMap": {}, + "title": "" + } + }, + "source": [ + "### 🚫✏️ Install Python libraries\n", + "\n", + "You do not need to modify this cell unless you need additional Python packages in your Agent." + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "metadata": { + "application/vnd.databricks.v1+cell": { + "cellMetadata": {}, + "inputWidgets": {}, + "nuid": "6d4030e8-ae97-4351-bebd-9651d283578f", + "showTitle": false, + "tableResultSettingsMap": {}, + "title": "" + } + }, + "outputs": [], + "source": [ + "# %pip install -qqqq -U -r requirements.txt\n", + "# # Restart to load the packages into the Python environment\n", + "# dbutils.library.restartPython()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### 🚫✏️ Connect to Databricks\n", + "\n", + "If running locally in an IDE using Databricks Connect, connect the Spark client & configure MLflow to use Databricks Managed MLflow. If this running in a Databricks Notebook, these values are already set." + ] + }, + { + "cell_type": "code", + "execution_count": 28, + "metadata": {}, + "outputs": [], + "source": [ + "from mlflow.utils import databricks_utils as du\n", + "\n", + "if not du.is_in_databricks_notebook():\n", + " from databricks.connect import DatabricksSession\n", + " import os\n", + "\n", + " spark = DatabricksSession.builder.getOrCreate()\n", + " os.environ[\"MLFLOW_TRACKING_URI\"] = \"databricks\"" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### 🚫✏️ Load the Agent's UC storage locations; set up MLflow experiment\n", + "\n", + "This notebook uses the UC model, MLflow Experiment, and Evaluation Set that you specified in the [Agent setup](02_agent_setup.ipynb) notebook." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from cookbook.config.shared.agent_storage_location import AgentStorageConfig\n", + "from cookbook.databricks_utils import get_mlflow_experiment_url\n", + "from cookbook.config import load_serializable_config_from_yaml_file\n", + "import mlflow \n", + "\n", + "# Load the Agent's storage locations\n", + "agent_storage_config: AgentStorageConfig= load_serializable_config_from_yaml_file(\"./configs/agent_storage_config.yaml\")\n", + "\n", + "# Show the Agent's storage locations\n", + "agent_storage_config.pretty_print()\n", + "\n", + "# set the MLflow experiment\n", + "experiment_info = mlflow.set_experiment(agent_storage_config.mlflow_experiment_name)\n", + "# If running in a local IDE, set the MLflow experiment name as an environment variable\n", + "os.environ[\"MLFLOW_EXPERIMENT_NAME\"] = agent_storage_config.mlflow_experiment_name\n", + "\n", + "print(f\"View the MLflow Experiment `{agent_storage_config.mlflow_experiment_name}` at {get_mlflow_experiment_url(experiment_info.experiment_id)}\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# create tools\n", + "\n", + "- we will store all tools in the `user_tools` folder\n", + "- first, create a local function & test it with pytest\n", + "- then, deploy it as a UC tool & test it with pytest\n", + "- then, add the tool to the Agent " + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "always reload the tool's code" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "metadata": {}, + "outputs": [], + "source": [ + "%load_ext autoreload\n", + "%autoreload 3" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## lets do an example of a simple, but fake tool that translates old to new SKUs." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "1, create the python function that will become your UC function. you need to annotate the function with docstrings & type hints - these are used to create the tool's metadata in UC." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "%%writefile tools/sample_tool.py\n", + "\n", + "def sku_sample_translator(old_sku: str) -> str:\n", + " \"\"\"\n", + " Translates a pre-2024 SKU formatted as \"OLD-XXX-YYYY\" to the new SKU format \"NEW-YYYY-XXX\".\n", + "\n", + " Args:\n", + " old_sku (str): The old SKU in the format \"OLD-XXX-YYYY\".\n", + "\n", + " Returns:\n", + " str: The new SKU in the format \"NEW-YYYY-XXX\".\n", + "\n", + " Raises:\n", + " ValueError: If the SKU format is invalid, providing specific error details.\n", + " \"\"\"\n", + " import re\n", + "\n", + " if not isinstance(old_sku, str):\n", + " raise ValueError(\"SKU must be a string\")\n", + "\n", + " # Normalize input by removing extra whitespace and converting to uppercase\n", + " old_sku = old_sku.strip().upper()\n", + "\n", + " # Define the regex pattern for the old SKU format\n", + " pattern = r\"^OLD-([A-Z]{3})-(\\d{4})$\"\n", + "\n", + " # Match the old SKU against the pattern\n", + " match = re.match(pattern, old_sku)\n", + " if not match:\n", + " if not old_sku.startswith(\"OLD-\"):\n", + " raise ValueError(\"SKU must start with 'OLD-'\")\n", + " if not re.match(r\"^OLD-[A-Z]{3}-\\d{4}$\", old_sku):\n", + " raise ValueError(\n", + " \"SKU format must be 'OLD-XXX-YYYY' where X is a letter and Y is a digit\"\n", + " )\n", + " raise ValueError(\"Invalid SKU format\")\n", + "\n", + " # Extract the letter code and numeric part\n", + " letter_code, numeric_part = match.groups()\n", + "\n", + " # Additional validation for numeric part\n", + " if not (1 <= int(numeric_part) <= 9999):\n", + " raise ValueError(\"Numeric part must be between 0001 and 9999\")\n", + "\n", + " # Construct the new SKU\n", + " new_sku = f\"NEW-{numeric_part}-{letter_code}\"\n", + " return new_sku\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Now, let's import the tool and test it locally" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from tools.sample_tool import sku_sample_translator\n", + "\n", + "sku_sample_translator(\"OLD-XXX-1234\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "now, lets write some pyTest unit tests for the tool - these are just samples, you will need to write your own" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "%%writefile tools/test_sample_tool.py\n", + "import pytest\n", + "from tools.sample_tool import sku_sample_translator\n", + "\n", + "\n", + "\n", + "def test_valid_sku_translation():\n", + " \"\"\"Test successful SKU translation with valid input.\"\"\"\n", + " assert sku_sample_translator(\"OLD-ABC-1234\") == \"NEW-1234-ABC\"\n", + " assert sku_sample_translator(\"OLD-XYZ-0001\") == \"NEW-0001-XYZ\"\n", + " assert sku_sample_translator(\"old-def-5678\") == \"NEW-5678-DEF\" # Test case insensitivity\n", + "\n", + "\n", + "def test_whitespace_handling():\n", + " \"\"\"Test that the function handles extra whitespace correctly.\"\"\"\n", + " assert sku_sample_translator(\" OLD-ABC-1234 \") == \"NEW-1234-ABC\"\n", + " assert sku_sample_translator(\"\\tOLD-ABC-1234\\n\") == \"NEW-1234-ABC\"\n", + "\n", + "\n", + "def test_invalid_input_type():\n", + " \"\"\"Test that non-string inputs raise ValueError.\"\"\"\n", + " with pytest.raises(ValueError, match=\"SKU must be a string\"):\n", + " sku_sample_translator(123)\n", + " with pytest.raises(ValueError, match=\"SKU must be a string\"):\n", + " sku_sample_translator(None)\n", + "\n", + "\n", + "def test_invalid_prefix():\n", + " \"\"\"Test that SKUs not starting with 'OLD-' raise ValueError.\"\"\"\n", + " with pytest.raises(ValueError, match=\"SKU must start with 'OLD-'\"):\n", + " sku_sample_translator(\"NEW-ABC-1234\")\n", + " with pytest.raises(ValueError, match=\"SKU must start with 'OLD-'\"):\n", + " sku_sample_translator(\"XXX-ABC-1234\")\n", + "\n", + "\n", + "def test_invalid_format():\n", + " \"\"\"Test various invalid SKU formats.\"\"\"\n", + " invalid_skus = [\n", + " \"OLD-AB-1234\", # Too few letters\n", + " \"OLD-ABCD-1234\", # Too many letters\n", + " \"OLD-123-1234\", # Numbers instead of letters\n", + " \"OLD-ABC-123\", # Too few digits\n", + " \"OLD-ABC-12345\", # Too many digits\n", + " \"OLD-ABC-XXXX\", # Letters instead of numbers\n", + " \"OLD-A1C-1234\", # Mixed letters and numbers in middle\n", + " ]\n", + "\n", + " for sku in invalid_skus:\n", + " with pytest.raises(\n", + " ValueError,\n", + " match=\"SKU format must be 'OLD-XXX-YYYY' where X is a letter and Y is a digit\",\n", + " ):\n", + " sku_sample_translator(sku)\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "now, lets run the tests" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import pytest\n", + "\n", + "# Run tests from test_sku_translator.py\n", + "pytest.main([\"-v\", \"tools/test_sample_tool.py\"])\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Now, lets deploy the tool to Unity catalog." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from unitycatalog.ai.core.databricks import DatabricksFunctionClient\n", + "from tools.sample_tool import sku_sample_translator\n", + "\n", + "client = DatabricksFunctionClient()\n", + "CATALOG = \"ep\" # Change me!\n", + "SCHEMA = \"cookbook_local_test\" # Change me if you want\n", + "\n", + "# this will deploy the tool to UC, automatically setting the metadata in UC based on the tool's docstring & typing hints\n", + "tool_uc_info = client.create_python_function(func=sku_sample_translator, catalog=CATALOG, schema=SCHEMA, replace=True)\n", + "\n", + "# the tool will deploy to a function in UC called `{catalog}.{schema}.{func}` where {func} is the name of the function\n", + "# Print the deployed Unity Catalog function name\n", + "print(f\"Deployed Unity Catalog function name: {tool_uc_info.full_name}\")\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Now, wrap it into a UCTool that will be used by our Agent. UC tool is just a Pydnatic base model that is serializable to YAML that will load the tool's metadata from UC and wrap it in a callable object." + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "metadata": {}, + "outputs": [], + "source": [ + "from cookbook.tools.uc_tool import UCTool\n", + "\n", + "# wrap the tool into a UCTool which can be passed to our Agent\n", + "translate_sku_tool = UCTool(uc_function_name=tool_uc_info.full_name)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Now, let's test the UC tool - the UCTool is a directly callable wrapper around the UC function, so it can be used just like a local function, but the output will be put into a dictionary with either the output in a 'value' key or an 'error' key if an error is raised.\n", + "\n", + "when an error happens, the UC tool will also return an instruction prompt to show the agent how to think about handling the error. this can be changed via the `error_prompt` parameter in the UCTool..\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# successful call\n", + "translate_sku_tool(old_sku=\"OLD-XXX-1234\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# unsuccessful call\n", + "translate_sku_tool(old_sku=\"OxxLD-XXX-1234\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "now, let's convert our pytests to work with the UC tool. this requires a bit of transformation to the test code to account for the fact that the output is in a dictionary & exceptions are not raised directly." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "%%writefile tools/test_sample_tool_uc.py\n", + "import pytest\n", + "from cookbook.tools.uc_tool import UCTool\n", + "\n", + "# Load the function from the UCTool versus locally\n", + "@pytest.fixture\n", + "def uc_tool():\n", + " \"\"\"Fixture to translate a UC tool into a local function.\"\"\"\n", + " UC_FUNCTION_NAME = \"ep.cookbook_local_test.sku_sample_translator\"\n", + " loaded_tool = UCTool(uc_function_name=UC_FUNCTION_NAME)\n", + " return loaded_tool\n", + "\n", + "\n", + "# Note: The value will be post processed into the `value` key, so we must check the returned value there.\n", + "def test_valid_sku_translation(uc_tool):\n", + " \"\"\"Test successful SKU translation with valid input.\"\"\"\n", + " assert uc_tool(old_sku=\"OLD-ABC-1234\")[\"value\"] == \"NEW-1234-ABC\"\n", + " assert uc_tool(old_sku=\"OLD-XYZ-0001\")[\"value\"] == \"NEW-0001-XYZ\"\n", + " assert (\n", + " uc_tool(old_sku=\"old-def-5678\")[\"value\"] == \"NEW-5678-DEF\"\n", + " ) # Test case insensitivity\n", + "\n", + "\n", + "# Note: The value will be post processed into the `value` key, so we must check the returned value there.\n", + "def test_whitespace_handling(uc_tool):\n", + " \"\"\"Test that the function handles extra whitespace correctly.\"\"\"\n", + " assert uc_tool(old_sku=\" OLD-ABC-1234 \")[\"value\"] == \"NEW-1234-ABC\"\n", + " assert uc_tool(old_sku=\"\\tOLD-ABC-1234\\n\")[\"value\"] == \"NEW-1234-ABC\"\n", + "\n", + "\n", + "# Note: the input validation happens BEFORE the function is called by Spark, so we will never get these exceptions from the function.\n", + "# Instead, we will get invalid parameters errors from Spark.\n", + "def test_invalid_input_type(uc_tool):\n", + " \"\"\"Test that non-string inputs raise ValueError.\"\"\"\n", + " assert (\n", + " uc_tool(old_sku=123)[\"error\"][\"error_message\"]\n", + " == \"\"\"Invalid parameters provided: {'old_sku': \"Parameter old_sku should be of type STRING (corresponding python type ), but got \"}.\"\"\"\n", + " )\n", + " assert (\n", + " uc_tool(old_sku=None)[\"error\"][\"error_message\"]\n", + " == \"\"\"Invalid parameters provided: {'old_sku': \"Parameter old_sku should be of type STRING (corresponding python type ), but got \"}.\"\"\"\n", + " )\n", + "\n", + "\n", + "# Note: The errors will be post processed into the `error_message` key inside the `error` top level key, so we must check for exceptions there.\n", + "def test_invalid_prefix(uc_tool):\n", + " \"\"\"Test that SKUs not starting with 'OLD-' raise ValueError.\"\"\"\n", + " assert (\n", + " uc_tool(old_sku=\"NEW-ABC-1234\")[\"error\"][\"error_message\"]\n", + " == \"ValueError: SKU must start with 'OLD-'\"\n", + " )\n", + " assert (\n", + " uc_tool(old_sku=\"XXX-ABC-1234\")[\"error\"][\"error_message\"]\n", + " == \"ValueError: SKU must start with 'OLD-'\"\n", + " )\n", + "\n", + "\n", + "# Note: The errors will be post processed into the `error_message` key inside the `error` top level key, so we must check for exceptions there.\n", + "def test_invalid_format(uc_tool):\n", + " \"\"\"Test various invalid SKU formats.\"\"\"\n", + " invalid_skus = [\n", + " \"OLD-AB-1234\", # Too few letters\n", + " \"OLD-ABCD-1234\", # Too many letters\n", + " \"OLD-123-1234\", # Numbers instead of letters\n", + " \"OLD-ABC-123\", # Too few digits\n", + " \"OLD-ABC-12345\", # Too many digits\n", + " \"OLD-ABC-XXXX\", # Letters instead of numbers\n", + " \"OLD-A1C-1234\", # Mixed letters and numbers in middle\n", + " ]\n", + "\n", + " expected_error = \"ValueError: SKU format must be 'OLD-XXX-YYYY' where X is a letter and Y is a digit\"\n", + " for sku in invalid_skus:\n", + " assert uc_tool(old_sku=sku)[\"error\"][\"error_message\"] == expected_error\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import pytest\n", + "\n", + "# Run tests from test_sku_translator.py\n", + "pytest.main([\"-v\", \"tools/test_sample_tool_uc.py\"])\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Now, here's another example of a tool that executes python code." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "%%writefile tools/code_exec.py\n", + "def python_exec(code: str) -> str:\n", + " \"\"\"\n", + " Executes Python code in the sandboxed environment and returns its stdout. The runtime is stateless and you can not read output of the previous tool executions. i.e. No such variables \"rows\", \"observation\" defined. Calling another tool inside a Python code is NOT allowed.\n", + " Use only standard python libraries and these python libraries: bleach, chardet, charset-normalizer, defusedxml, googleapis-common-protos, grpcio, grpcio-status, jmespath, joblib, numpy, packaging, pandas, patsy, protobuf, pyarrow, pyparsing, python-dateutil, pytz, scikit-learn, scipy, setuptools, six, threadpoolctl, webencodings, user-agents, cryptography.\n", + "\n", + " Args:\n", + " code (str): Python code to execute. Remember to print the final result to stdout.\n", + "\n", + " Returns:\n", + " str: The output of the executed code.\n", + " \"\"\"\n", + " import sys\n", + " from io import StringIO\n", + "\n", + " sys_stdout = sys.stdout\n", + " redirected_output = StringIO()\n", + " sys.stdout = redirected_output\n", + " exec(code)\n", + " sys.stdout = sys_stdout\n", + " return redirected_output.getvalue()\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from tools.code_exec import python_exec\n", + "\n", + "python_exec(\"print('hello')\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Test it locally" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "%%writefile tools/test_code_exec.py\n", + "\n", + "import pytest\n", + "from .code_exec import python_exec\n", + "\n", + "\n", + "def test_basic_arithmetic():\n", + " code = \"\"\"result = 2 + 2\\nprint(result)\"\"\"\n", + " assert python_exec(code).strip() == \"4\"\n", + "\n", + "\n", + "def test_multiple_lines():\n", + " code = \"x = 5\\n\" \"y = 3\\n\" \"result = x * y\\n\" \"print(result)\"\n", + " assert python_exec(code).strip() == \"15\"\n", + "\n", + "\n", + "def test_multiple_prints():\n", + " code = \"\"\"print('first')\\nprint('second')\\nprint('third')\\n\"\"\"\n", + " expected = \"first\\nsecond\\nthird\\n\"\n", + " assert python_exec(code) == expected\n", + "\n", + "\n", + "def test_using_pandas():\n", + " code = (\n", + " \"import pandas as pd\\n\"\n", + " \"data = {'col1': [1, 2], 'col2': [3, 4]}\\n\"\n", + " \"df = pd.DataFrame(data)\\n\"\n", + " \"print(df.shape)\"\n", + " )\n", + " assert python_exec(code).strip() == \"(2, 2)\"\n", + "\n", + "\n", + "def test_using_numpy():\n", + " code = \"import numpy as np\\n\" \"arr = np.array([1, 2, 3])\\n\" \"print(arr.mean())\"\n", + " assert python_exec(code).strip() == \"2.0\"\n", + "\n", + "\n", + "def test_syntax_error():\n", + " code = \"if True\\n\" \" print('invalid syntax')\"\n", + " with pytest.raises(SyntaxError):\n", + " python_exec(code)\n", + "\n", + "\n", + "def test_runtime_error():\n", + " code = \"x = 1 / 0\\n\" \"print(x)\"\n", + " with pytest.raises(ZeroDivisionError):\n", + " python_exec(code)\n", + "\n", + "\n", + "def test_undefined_variable():\n", + " code = \"print(undefined_variable)\"\n", + " with pytest.raises(NameError):\n", + " python_exec(code)\n", + "\n", + "\n", + "def test_multiline_string_manipulation():\n", + " code = \"text = '''\\n\" \"Hello\\n\" \"World\\n\" \"'''\\n\" \"print(text.strip())\"\n", + " expected = \"Hello\\nWorld\"\n", + " assert python_exec(code).strip() == expected\n", + "\n", + "# Will not fail locally, but will fail in UC.\n", + "# def test_unauthorized_flask():\n", + "# code = \"from flask import Flask\\n\" \"app = Flask(__name__)\\n\" \"print(app)\"\n", + "# with pytest.raises(ImportError):\n", + "# python_exec(code)\n", + "\n", + "\n", + "def test_no_print_statement():\n", + " code = \"x = 42\\n\" \"y = x * 2\"\n", + " assert python_exec(code) == \"\"\n", + "\n", + "\n", + "def test_calculation_without_print():\n", + " code = \"result = sum([1, 2, 3, 4, 5])\\n\" \"squared = [x**2 for x in range(5)]\"\n", + " assert python_exec(code) == \"\"\n", + "\n", + "\n", + "def test_function_definition_without_call():\n", + " code = \"def add(a, b):\\n\" \" return a + b\\n\" \"result = add(3, 4)\"\n", + " assert python_exec(code) == \"\"\n", + "\n", + "\n", + "def test_class_definition_without_instantiation():\n", + " code = (\n", + " \"class Calculator:\\n\"\n", + " \" def add(self, a, b):\\n\"\n", + " \" return a + b\\n\"\n", + " \"calc = Calculator()\"\n", + " )\n", + " assert python_exec(code) == \"\"\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import pytest\n", + "\n", + "# Run tests from test_sku_translator.py\n", + "pytest.main([\"-v\", \"tools/test_code_exec.py\"])\n", + "\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Deploy to UC" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from unitycatalog.ai.core.databricks import DatabricksFunctionClient\n", + "from tools.code_exec import python_exec\n", + "from cookbook.tools.uc_tool import UCTool\n", + "\n", + "client = DatabricksFunctionClient()\n", + "CATALOG = \"ep\" # Change me!\n", + "SCHEMA = \"cookbook_local_test\" # Change me if you want\n", + "\n", + "# this will deploy the tool to UC, automatically setting the metadata in UC based on the tool's docstring & typing hints\n", + "python_exec_tool_uc_info = client.create_python_function(func=python_exec, catalog=CATALOG, schema=SCHEMA, replace=True)\n", + "\n", + "# the tool will deploy to a function in UC called `{catalog}.{schema}.{func}` where {func} is the name of the function\n", + "# Print the deployed Unity Catalog function name\n", + "print(f\"Deployed Unity Catalog function name: {python_exec_tool_uc_info.full_name}\")\n", + "\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Test as UC Tool for the Agent" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from cookbook.tools.uc_tool import UCTool\n", + "\n", + "\n", + "# wrap the tool into a UCTool which can be passed to our Agent\n", + "python_exec_tool = UCTool(uc_function_name=python_exec_tool_uc_info.full_name)\n", + "\n", + "python_exec_tool(code=\"print('hello')\")\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "New tests" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "%%writefile tools/test_code_exec_as_uc_tool.py\n", + "\n", + "import pytest\n", + "from cookbook.tools.uc_tool import UCTool\n", + "\n", + "CATALOG = \"ep\"\n", + "SCHEMA = \"cookbook_local_test\"\n", + "\n", + "\n", + "@pytest.fixture\n", + "def python_exec():\n", + " \"\"\"Fixture to provide the python_exec function from UCTool.\"\"\"\n", + " python_exec_tool = UCTool(uc_function_name=f\"{CATALOG}.{SCHEMA}.python_exec\")\n", + " return python_exec_tool\n", + "\n", + "\n", + "def test_basic_arithmetic(python_exec):\n", + " code = \"\"\"result = 2 + 2\\nprint(result)\"\"\"\n", + " assert python_exec(code=code)[\"value\"].strip() == \"4\"\n", + "\n", + "\n", + "def test_multiple_lines(python_exec):\n", + " code = \"x = 5\\n\" \"y = 3\\n\" \"result = x * y\\n\" \"print(result)\"\n", + " assert python_exec(code=code)[\"value\"].strip() == \"15\"\n", + "\n", + "\n", + "def test_multiple_prints(python_exec):\n", + " code = \"\"\"print('first')\\nprint('second')\\nprint('third')\\n\"\"\"\n", + " expected = \"first\\nsecond\\nthird\\n\"\n", + " assert python_exec(code=code)[\"value\"] == expected\n", + "\n", + "\n", + "def test_using_pandas(python_exec):\n", + " code = (\n", + " \"import pandas as pd\\n\"\n", + " \"data = {'col1': [1, 2], 'col2': [3, 4]}\\n\"\n", + " \"df = pd.DataFrame(data)\\n\"\n", + " \"print(df.shape)\"\n", + " )\n", + " assert python_exec(code=code)[\"value\"].strip() == \"(2, 2)\"\n", + "\n", + "\n", + "def test_using_numpy(python_exec):\n", + " code = \"import numpy as np\\n\" \"arr = np.array([1, 2, 3])\\n\" \"print(arr.mean())\"\n", + " assert python_exec(code=code)[\"value\"].strip() == \"2.0\"\n", + "\n", + "\n", + "def test_syntax_error(python_exec):\n", + " code = \"if True\\n\" \" print('invalid syntax')\"\n", + " result = python_exec(code=code)\n", + " assert \"Syntax error at or near 'invalid'.\" in result[\"error\"][\"error_message\"]\n", + "\n", + "\n", + "def test_runtime_error(python_exec):\n", + " code = \"x = 1 / 0\\n\" \"print(x)\"\n", + " result = python_exec(code=code)\n", + " assert \"ZeroDivisionError\" in result[\"error\"][\"error_message\"]\n", + "\n", + "\n", + "def test_undefined_variable(python_exec):\n", + " code = \"print(undefined_variable)\"\n", + " result = python_exec(code=code)\n", + " assert \"NameError\" in result[\"error\"][\"error_message\"]\n", + "\n", + "\n", + "def test_multiline_string_manipulation(python_exec):\n", + " code = \"text = '''\\n\" \"Hello\\n\" \"World\\n\" \"'''\\n\" \"print(text.strip())\"\n", + " expected = \"Hello\\nWorld\"\n", + " assert python_exec(code=code)[\"value\"].strip() == expected\n", + "\n", + "\n", + "def test_unauthorized_flask(python_exec):\n", + " code = \"from flask import Flask\\n\" \"app = Flask(__name__)\\n\" \"print(app)\"\n", + " result = python_exec(code=code)\n", + " assert (\n", + " \"ModuleNotFoundError: No module named 'flask'\"\n", + " in result[\"error\"][\"error_message\"]\n", + " )\n", + "\n", + "\n", + "def test_no_print_statement(python_exec):\n", + " code = \"x = 42\\n\" \"y = x * 2\"\n", + " assert python_exec(code=code)[\"value\"] == \"\"\n", + "\n", + "\n", + "def test_calculation_without_print(python_exec):\n", + " code = \"result = sum([1, 2, 3, 4, 5])\\n\" \"squared = [x**2 for x in range(5)]\"\n", + " assert python_exec(code=code)[\"value\"] == \"\"\n", + "\n", + "\n", + "def test_function_definition_without_call(python_exec):\n", + " code = \"def add(a, b):\\n\" \" return a + b\\n\" \"result = add(3, 4)\"\n", + " assert python_exec(code=code)[\"value\"] == \"\"\n", + "\n", + "\n", + "def test_class_definition_without_instantiation(python_exec):\n", + " code = (\n", + " \"class Calculator:\\n\"\n", + " \" def add(self, a, b):\\n\"\n", + " \" return a + b\\n\"\n", + " \"calc = Calculator()\"\n", + " )\n", + " assert python_exec(code=code)[\"value\"] == \"\"\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import pytest\n", + "\n", + "# Run tests from test_sku_translator.py\n", + "pytest.main([\"-v\", \"tools/test_code_exec_as_uc_tool.py\"])\n", + "\n" + ] + } + ], + "metadata": { + "application/vnd.databricks.v1+notebook": { + "dashboards": [], + "environmentMetadata": null, + "language": "python", + "notebookMetadata": {}, + "notebookName": "02_agent__function_calling_mlflow_sdk", + "widgets": {} + }, + "kernelspec": { + "display_name": "genai-cookbook-T2SdtsNM-py3.11", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.11.10" + } + }, + "nbformat": 4, + "nbformat_minor": 0 +} diff --git a/openai_sdk_agent_app_sample_code/05_tool_calling_agent.ipynb b/openai_sdk_agent_app_sample_code/05_tool_calling_agent.ipynb new file mode 100644 index 0000000..0d60c00 --- /dev/null +++ b/openai_sdk_agent_app_sample_code/05_tool_calling_agent.ipynb @@ -0,0 +1,618 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": { + "application/vnd.databricks.v1+cell": { + "cellMetadata": {}, + "inputWidgets": {}, + "nuid": "31661828-f9bb-4fc2-a1bd-94424a27ed52", + "showTitle": false, + "tableResultSettingsMap": {}, + "title": "" + } + }, + "source": [ + "## 👉 START HERE: How to use this notebook\n", + "\n", + "# Step 3: Build, evaluate, & deploy your Agent\n", + "\n", + "Use this notebook to iterate on the code and configuration of your Agent.\n", + "\n", + "By the end of this notebook, you will have 1+ registered versions of your Agent, each coupled with a detailed quality evaluation.\n", + "\n", + "Optionally, you can deploy a version of your Agent that you can interact with in the [Mosiac AI Playground](https://docs.databricks.com/en/large-language-models/ai-playground.html) and let your business stakeholders who don't have Databricks accounts interact with it & provide feedback in the [Review App](https://docs.databricks.com/en/generative-ai/agent-evaluation/human-evaluation.html#review-app-ui).\n", + "\n", + "\n", + "For each version of your agent, you will have an MLflow run inside your MLflow experiment that contains:\n", + "- Your Agent's code & config\n", + "- Evaluation metrics for cost, quality, and latency" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "application/vnd.databricks.v1+cell": { + "cellMetadata": {}, + "inputWidgets": {}, + "nuid": "5d9f685a-fdb7-49a4-9e3a-a4a9e964d045", + "showTitle": false, + "tableResultSettingsMap": {}, + "title": "" + } + }, + "source": [ + "\n", + "**Important note:** Throughout this notebook, we indicate which cell's code you:\n", + "- ✅✏️ should customize - these cells contain code & config with business logic that you should edit to meet your requirements & tune quality.\n", + "- 🚫✏️ should not customize - these cells contain boilerplate code required to load/save/execute your Agent\n", + "\n", + "*Cells that don't require customization still need to be run! You CAN change these cells, but if this is the first time using this notebook, we suggest not doing so.*" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "application/vnd.databricks.v1+cell": { + "cellMetadata": {}, + "inputWidgets": {}, + "nuid": "bb4f8cc0-1797-4beb-a9f2-df21a9db79f0", + "showTitle": false, + "tableResultSettingsMap": {}, + "title": "" + } + }, + "source": [ + "### 🚫✏️ Install Python libraries\n", + "\n", + "You do not need to modify this cell unless you need additional Python packages in your Agent." + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "metadata": { + "application/vnd.databricks.v1+cell": { + "cellMetadata": {}, + "inputWidgets": {}, + "nuid": "6d4030e8-ae97-4351-bebd-9651d283578f", + "showTitle": false, + "tableResultSettingsMap": {}, + "title": "" + } + }, + "outputs": [], + "source": [ + "# %pip install -qqqq -U -r requirements.txt\n", + "# # Restart to load the packages into the Python environment\n", + "# dbutils.library.restartPython()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### 🚫✏️ Connect to Databricks\n", + "\n", + "If running locally in an IDE using Databricks Connect, connect the Spark client & configure MLflow to use Databricks Managed MLflow. If this running in a Databricks Notebook, these values are already set." + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [], + "source": [ + "from mlflow.utils import databricks_utils as du\n", + "\n", + "if not du.is_in_databricks_notebook():\n", + " from databricks.connect import DatabricksSession\n", + " import os\n", + "\n", + " spark = DatabricksSession.builder.getOrCreate()\n", + " os.environ[\"MLFLOW_TRACKING_URI\"] = \"databricks\"" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### 🚫✏️ Load the Agent's UC storage locations; set up MLflow experiment\n", + "\n", + "This notebook uses the UC model, MLflow Experiment, and Evaluation Set that you specified in the [Agent setup](02_agent_setup.ipynb) notebook." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from cookbook.config.shared.agent_storage_location import AgentStorageConfig\n", + "from cookbook.databricks_utils import get_mlflow_experiment_url\n", + "from cookbook.config import load_serializable_config_from_yaml_file\n", + "import mlflow \n", + "\n", + "# Load the Agent's storage locations\n", + "agent_storage_config: AgentStorageConfig= load_serializable_config_from_yaml_file(\"./configs/agent_storage_config.yaml\")\n", + "\n", + "# Show the Agent's storage locations\n", + "agent_storage_config.pretty_print()\n", + "\n", + "# set the MLflow experiment\n", + "experiment_info = mlflow.set_experiment(agent_storage_config.mlflow_experiment_name)\n", + "# If running in a local IDE, set the MLflow experiment name as an environment variable\n", + "os.environ[\"MLFLOW_EXPERIMENT_NAME\"] = agent_storage_config.mlflow_experiment_name\n", + "\n", + "print(f\"View the MLflow Experiment `{agent_storage_config.mlflow_experiment_name}` at {get_mlflow_experiment_url(experiment_info.experiment_id)}\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### 🚫✏️ Helper method to log the Agent's code & config to MLflow\n", + "\n", + "Before we start, let's define a helper method to log the Agent's code & config to MLflow. We will use this to log the agent's code & config to MLflow & the Unity Catalog. It is used in evaluation & for deploying to Agent Evaluation's [Review App](https://docs.databricks.com/en/generative-ai/agent-evaluation/human-evaluation.html#review-app-ui) (a chat UI for your stakeholders to test this agent) and later, deplying the Agent to production." + ] + }, + { + "cell_type": "code", + "execution_count": 15, + "metadata": {}, + "outputs": [], + "source": [ + "\n", + "import mlflow\n", + "from mlflow.types.llm import CHAT_MODEL_INPUT_SCHEMA\n", + "from mlflow.models.rag_signatures import StringResponse\n", + "from cookbook.agents.utils.signatures import STRING_RESPONSE_WITH_MESSAGES\n", + "from mlflow.models.signature import ModelSignature\n", + "from cookbook.agents.function_calling_agent import FunctionCallingAgent\n", + "from cookbook.agents.function_calling_agent import FunctionCallingAgentConfig\n", + "\n", + "# This helper will log the Agent's code & config to an MLflow run and return the logged model's URI\n", + "# If run from inside a mlfow.start_run() block, it will log to that run, otherwise it will log to a new run.\n", + "# This logged Agent is ready for deployment, so if you are happy with your evaluation, it is ready to deploy!\n", + "def log_function_calling_agent_to_mlflow(agent_config: FunctionCallingAgentConfig):\n", + " # Get the agent's code path from the imported Agent class\n", + " agent_code_path = f\"{os.getcwd()}/{FunctionCallingAgent.__module__.replace('.', '/')}.py\"\n", + "\n", + " # Get the pip requirements from the requirements.txt file\n", + " with open(\"requirements.txt\", \"r\") as file:\n", + " pip_requirements = [line.strip() for line in file.readlines()] + [\"pyspark\"] # manually add pyspark\n", + "\n", + " logged_agent_info = mlflow.pyfunc.log_model(\n", + " artifact_path=\"agent\",\n", + " python_model=agent_code_path,\n", + " input_example=agent_config.input_example,\n", + " model_config=agent_config.model_dump(),\n", + " resources=agent_config.get_resource_dependencies(), # This allows the agents.deploy() command to securely provision credentials for the Agent's databricks resources e.g., vector index, model serving endpoints, etc\n", + " signature=ModelSignature(\n", + " inputs=CHAT_MODEL_INPUT_SCHEMA,\n", + " # outputs=STRING_RESPONSE_WITH_MESSAGES #TODO: replace with MLflow signature\n", + " outputs=StringResponse()\n", + " ),\n", + " code_paths=[os.path.join(os.getcwd(), \"cookbook\")],\n", + " pip_requirements=pip_requirements,\n", + " )\n", + "\n", + " return logged_agent_info" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "application/vnd.databricks.v1+cell": { + "cellMetadata": {}, + "inputWidgets": {}, + "nuid": "9933d05f-29fa-452e-abdc-2a02328fbe22", + "showTitle": false, + "tableResultSettingsMap": {}, + "title": "" + } + }, + "source": [ + "\n", + "## 1️⃣ Iterate on the Agent's code & config to improve quality\n", + "\n", + "The below cells are used to execute your inner dev loop to improve the Agent's quality.\n", + "\n", + "We suggest the following process:\n", + "1. Vibe check the Agent for 5 - 10 queries to verify it works\n", + "2. Make any necessary changes to the code/config\n", + "3. Use Agent Evaluation to evaluate the Agent using your evaluation set, which will provide a quality assessment & identify the root causes of any quality issues\n", + "4. Based on that evaluation, make & test changes to the code/config to improve quality\n", + "5. 🔁 Repeat steps 3 and 4 until you are satisified with the Agent's quality\n", + "6. Deploy the Agent to Agent Evaluation's [Review App](https://docs.databricks.com/en/generative-ai/agent-evaluation/human-evaluation.html#review-app-ui) for pre-production testing\n", + "7. Use the following notebooks to review that feedback (optionally adding new records to your evaluation set) & identify any further quality issues\n", + "8. 🔁 Repeat steps 3 and 4 to fix any issues identified in step 7\n", + "9. Deploy the Agent to a production-ready REST API endpoint (using the same cells in this notebook as step 6)\n" + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "metadata": {}, + "outputs": [], + "source": [ + "# Import Cookbook Agent configurations, which are Pydantic models\n", + "from cookbook.config import serializable_config_to_yaml_file\n", + "from cookbook.config.agents.function_calling_agent import (\n", + " FunctionCallingAgentConfig,\n", + ")\n", + "from cookbook.config.data_pipeline import (\n", + " DataPipelineConfig,\n", + ")\n", + "from cookbook.config.shared.llm import LLMConfig, LLMParametersConfig\n", + "from cookbook.config import load_serializable_config_from_yaml_file\n", + "from cookbook.tools.vector_search import (\n", + " VectorSearchRetrieverTool,\n", + " VectorSearchSchema,\n", + ")\n", + "import json\n", + "from cookbook.tools.uc_tool import UCTool\n", + "\n", + "\n", + "########################\n", + "# #### 🚫✏️ Load the Vector Index Unity Cataloglocation from the data pipeline configuration\n", + "# Usage:\n", + "# - If you used `01_data_pipeline` to create your Vector Index, run this cell.\n", + "# - If your Vector Index was created elsewhere, comment out this logic and set the UC location in the Retriever config.\n", + "########################\n", + "\n", + "data_pipeline_config: DataPipelineConfig = load_serializable_config_from_yaml_file(\n", + " \"./configs/data_pipeline_config.yaml\"\n", + ")\n", + "\n", + "########################\n", + "# #### ✅✏️ Retriever tool that connects to the Vector Search index\n", + "########################\n", + "\n", + "retriever_tool = VectorSearchRetrieverTool(\n", + " name=\"search_product_docs\",\n", + " description=\"Use this tool to search for product documentation.\",\n", + " vector_search_index=\"ep.cookbook_local_test.product_docs_docs_chunked_index__v1\",\n", + " vector_search_schema=VectorSearchSchema(\n", + " # These columns are the default values used in the `01_data_pipeline` notebook\n", + " # If you used a different column names in that notebook OR you are using a pre-built vector index, update the column names here.\n", + " chunk_text=\"content_chunked\", # Contains the text of each document chunk\n", + " document_uri=\"doc_uri\", # The document URI of the chunk e.g., \"/Volumes/catalog/schema/volume/file.pdf\" - displayed as the document ID in the Review App\n", + " # additional_metadata_columns=[], # Additional columns to return from the vector database and present to the LLM\n", + " ),\n", + " # Optional parameters, see VectorSearchRetrieverTool.__doc__ for details. The default values are shown below.\n", + " # doc_similarity_threshold=0.0,\n", + " # vector_search_parameters=VectorSearchParameters(\n", + " # num_results=5,\n", + " # query_type=\"ann\"\n", + " # ),\n", + " # Adding columns here will allow the Agent's LLM to dynamically apply filters based on the user's query.\n", + " # filterable_columns=[]\n", + ")\n", + "\n", + "########################\n", + "# #### ✅✏️ Add Unity Catalog tools to the Agent\n", + "########################\n", + "\n", + "translate_sku_tool = UCTool(uc_function_name=\"ep.cookbook_local_test.sku_sample_translator\")\n", + "\n", + "\n", + "########################\n", + "# #### ✅✏️ Add a local Python function as a tool in the Agent\n", + "########################\n", + "\n", + "from cookbook.tools.local_function import LocalFunctionTool\n", + "from tools.sample_tool import sku_sample_translator\n", + "\n", + "# translate_sku_tool = LocalFunctionTool(func=translate_sku, description=\"Translates a pre-2024 SKU formatted as 'OLD-XXX-YYYY' to the new SKU format 'NEW-YYYY-XXX'.\")\n", + "\n", + "########################\n", + "#### ✅✏️ Agent's LLM configuration\n", + "########################\n", + "\n", + "system_prompt = \"\"\"\n", + "## Role\n", + "You are a helpful assistant that answers questions using a set of tools. If needed, you ask the user follow-up questions to clarify their request.\n", + "\n", + "## Objective\n", + "Your goal is to provide accurate, relevant, and helpful response based solely on the outputs from these tools. You are concise and direct in your responses.\n", + "\n", + "## Instructions\n", + "1. **Understand the Query**: Think step by step to analyze the user's question and determine the core need or problem. \n", + "\n", + "2. **Assess available tools**: Think step by step to consider each available tool and understand their capabilities in the context of the user's query.\n", + "\n", + "3. **Select the appropriate tool(s) OR ask follow up questions**: Based on your understanding of the query and the tool descriptions, decide which tool(s) should be used to generate a response. If you do not have enough information to use the available tools to answer the question, ask the user follow up questions to refine their request. If you do not have a relevant tool for a question or the outputs of the tools are not helpful, respond with: \"I'm sorry, I can't help you with that.\"\n", + "\"\"\".strip()\n", + "\n", + "fc_agent_config = FunctionCallingAgentConfig(\n", + " llm_config=LLMConfig(\n", + " llm_endpoint_name=\"ep-gpt4o-new\", # Model serving endpoint w/ a Chat Completions API\n", + " llm_system_prompt_template=system_prompt, # System prompt template\n", + " llm_parameters=LLMParametersConfig(\n", + " temperature=0.01, max_tokens=1500\n", + " ), # LLM parameters\n", + " ),\n", + " # Add one or more tools that comply with the CookbookTool interface\n", + " tools=[retriever_tool, translate_sku_tool],\n", + " # tools=[retriever_tool],\n", + ")\n", + "\n", + "# Print the configuration as a JSON string to see it all together\n", + "# print(json.dumps(fc_agent_config.model_dump(), indent=4))\n", + "\n", + "########################\n", + "##### Dump the configuration to a YAML\n", + "# Optional step, this allows the Agent's code file to be run by itself (e.g., outside of this notebook) using the above configuration.\n", + "########################\n", + "# Import the default YAML config file name from the Agent's code file\n", + "from cookbook.agents.function_calling_agent import FC_AGENT_DEFAULT_YAML_CONFIG_FILE_NAME\n", + "\n", + "# Dump the configuration to a YAML file\n", + "serializable_config_to_yaml_file(fc_agent_config, \"./configs/\"+FC_AGENT_DEFAULT_YAML_CONFIG_FILE_NAME)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "#### ✅✏️ Optionally, adjust the Agent's code\n", + "\n", + "Here, we import the Agent's code so we can run the Agent locally within the notebook. To modify the code, open the Agent's code file in a separate window, enable reload, make your changes, and re-run this cell.\n", + "\n", + "**Typically, when building the first version of your agent, we suggest first trying to tune the configuration (prompts, etc) to improve quality. If you need more control to fix quality issues, you can then modify the Agent's code.**" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from cookbook.agents.function_calling_agent import FunctionCallingAgent\n", + "import inspect\n", + "\n", + "# Print the Agent code for inspection\n", + "print(inspect.getsource(FunctionCallingAgent))" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "metadata": {}, + "outputs": [], + "source": [ + "%load_ext autoreload\n", + "%autoreload 3" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "#### ✅✏️ 🅰 Vibe check the Agent for a single query\n", + "\n", + "Running this cell will produce an MLflow Trace that you can use to see the Agent's outputs and understand the steps it took to produce that output.\n", + "\n", + "If you are running in a local IDE, browse to the MLflow Experiment page to view the Trace (link to the Experiment UI is at the top of this notebook). If running in a Databricks Notebook, your trace will appear inline below." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from cookbook.databricks_utils import get_mlflow_experiment_traces_url\n", + "from cookbook.agents.function_calling_agent import FunctionCallingAgent\n", + "\n", + "# Load the Agent's code with the above configuration\n", + "agent = FunctionCallingAgent(agent_config=fc_agent_config)\n", + "\n", + "# Vibe check the Agent for a single query\n", + "output = agent.predict(model_input={\"messages\": [{\"role\": \"user\", \"content\": \"How does the blender work?\"}]})\n", + "# output = agent.predict(model_input={\"messages\": [{\"role\": \"user\", \"content\": \"Translate the sku `OLD-abs-1234` to the new format\"}]})\n", + "\n", + "print(f\"View the MLflow Traces at {get_mlflow_experiment_traces_url(experiment_info.experiment_id)}\")\n", + "print(f\"Agent's final response:\\n----\\n{output['content']}\\n----\")\n", + "print()\n", + "# print(f\"Agent's full message history (useful for debugging):\\n----\\n{json.dumps(output['messages'], indent=2)}\\n----\")\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Now, let's test a multi-turn conversation with the Agent." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "second_turn = {'messages': output['messages'] + [{\"role\": \"user\", \"content\": \"How do I turn it on?\"}]}\n", + "\n", + "# Run the Agent again with the same input to continue the conversation\n", + "second_turn_output = agent.predict(model_input=second_turn)\n", + "\n", + "print(f\"View the MLflow Traces at {get_mlflow_experiment_traces_url(experiment_info.experiment_id)}\")\n", + "print(f\"Agent's final response:\\n----\\n{second_turn_output['content']}\\n----\")\n", + "print()\n", + "print(f\"Agent's full message history (useful for debugging):\\n----\\n{json.dumps(second_turn_output['messages'], indent=2)}\\n----\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "#### ✅✏️ 🅱 Evaluate the Agent using your evaluation set\n", + "\n", + "Note: If you do not have an evaluation set, you can create a synthetic evaluation set by using the 03_synthetic_evaluation notebook." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "evaluation_set = spark.table(agent_storage_config.evaluation_set_uc_table)\n", + "\n", + "with mlflow.start_run():\n", + " logged_agent_info = log_function_calling_agent_to_mlflow(fc_agent_config)\n", + "\n", + " # Run the agent for these queries, using Agent evaluation to parallelize the calls\n", + " eval_results = mlflow.evaluate(\n", + " model=logged_agent_info.model_uri, # use the MLflow logged Agent\n", + " data=evaluation_set, # Evaluate the Agent for every row of the evaluation set\n", + " model_type=\"databricks-agent\", # use Agent Evaluation\n", + " )\n", + "\n", + " # Show all outputs. Click on a row in this table to display the MLflow Trace.\n", + " display(eval_results.tables[\"eval_results\"])\n", + "\n", + " # Click 'View Evaluation Results' to see the Agent's inputs/outputs + quality evaluation displayed in a UI" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## 2️⃣ Deploy a version of your Agent - either to the Review App or Production\n", + "\n", + "Once you have a version of your Agent that has sufficient quality, you will register the Agent's model from the MLflow Experiment into the Unity Catalog & use Agent Framework's `agents.deploy(...)` command to deploy it. Note these steps are the same for deploying to pre-production (e.g., the [Review App](https://docs.databricks.com/en/generative-ai/agent-evaluation/human-evaluation.html#review-app-ui) or production.\n", + "\n", + "By the end of this step, you will have deployed a version of your Agent that you can interact with and share with your business stakeholders for feedback, even if they don't have access to your Databricks workspace:\n", + "\n", + "1. A production-ready scalable REST API deployed as a Model Serving endpoint that logged every request/request/MLflow Trace to a Delta Table.\n", + " - REST API for querying the Agent\n", + " - REST API for sending user feedback from your UI to the Agent\n", + "2. Agent Evaluation's [Review App](https://docs.databricks.com/en/generative-ai/agent-evaluation/human-evaluation.html#review-app-ui) connected to these endpoints.\n", + "3. [Mosiac AI Playground](https://docs.databricks.com/en/large-language-models/ai-playground.html) connected to these endpoints." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Option 1: Deploy the last agent you logged above" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from databricks import agents\n", + "\n", + "# Use Unity Catalog as the model registry\n", + "mlflow.set_registry_uri(\"databricks-uc\")\n", + "\n", + "# Register the Agent's model to the Unity Catalog\n", + "uc_registered_model_info = mlflow.register_model(\n", + " model_uri=logged_agent_info.model_uri, name=agent_storage_config.uc_model_name\n", + ")\n", + "\n", + "# Deploy the model to the review app and a model serving endpoint\n", + "agents.deploy(agent_storage_config.uc_model_name, uc_registered_model_info.version)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Option 2: Log the latest copy of the Agent's code/config and deploy it" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from databricks import agents\n", + "\n", + "# Use Unity Catalog as the model registry\n", + "mlflow.set_registry_uri(\"databricks-uc\")\n", + "\n", + "with mlflow.start_run():\n", + " logged_agent_info = log_function_calling_agent_to_mlflow(fc_agent_config)\n", + "\n", + " # Register the Agent's model to the Unity Catalog\n", + " uc_registered_model_info = mlflow.register_model(\n", + " model_uri=logged_agent_info.model_uri, name=agent_storage_config.uc_model_name\n", + " )\n", + "\n", + "# Deploy the model to the review app and a model serving endpoint\n", + "# agents.deploy(agent_storage_config.uc_model_name, uc_registered_model_info.version)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Load the logged model to test it locally" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import mlflow\n", + "\n", + "loaded_model = mlflow.pyfunc.load_model(logged_agent_info.model_uri)\n", + "\n", + "loaded_model.predict({\"messages\": [{\"role\": \"user\", \"content\": \"A test question?\"}]})" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from databricks import agents\n", + "\n", + "# Use Unity Catalog as the model registry\n", + "mlflow.set_registry_uri(\"databricks-uc\")\n", + "\n", + "with mlflow.start_run():\n", + " logged_agent_info = log_agent_to_mlflow(fc_agent_config)" + ] + } + ], + "metadata": { + "application/vnd.databricks.v1+notebook": { + "dashboards": [], + "environmentMetadata": null, + "language": "python", + "notebookMetadata": {}, + "notebookName": "02_agent__function_calling_mlflow_sdk", + "widgets": {} + }, + "kernelspec": { + "display_name": "genai-cookbook-T2SdtsNM-py3.11", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.11.10" + } + }, + "nbformat": 4, + "nbformat_minor": 0 +} diff --git a/openai_sdk_agent_app_sample_code/06_multi_agent_with_genie.ipynb b/openai_sdk_agent_app_sample_code/06_multi_agent_with_genie.ipynb new file mode 100644 index 0000000..a79e6ae --- /dev/null +++ b/openai_sdk_agent_app_sample_code/06_multi_agent_with_genie.ipynb @@ -0,0 +1,648 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": { + "application/vnd.databricks.v1+cell": { + "cellMetadata": {}, + "inputWidgets": {}, + "nuid": "31661828-f9bb-4fc2-a1bd-94424a27ed52", + "showTitle": false, + "tableResultSettingsMap": {}, + "title": "" + } + }, + "source": [ + "## 👉 START HERE: How to use this notebook\n", + "\n", + "# Step 3: Build, evaluate, & deploy your Agent\n", + "\n", + "Use this notebook to iterate on the code and configuration of your Agent.\n", + "\n", + "By the end of this notebook, you will have 1+ registered versions of your Agent, each coupled with a detailed quality evaluation.\n", + "\n", + "Optionally, you can deploy a version of your Agent that you can interact with in the [Mosiac AI Playground](https://docs.databricks.com/en/large-language-models/ai-playground.html) and let your business stakeholders who don't have Databricks accounts interact with it & provide feedback in the [Review App](https://docs.databricks.com/en/generative-ai/agent-evaluation/human-evaluation.html#review-app-ui).\n", + "\n", + "\n", + "For each version of your agent, you will have an MLflow run inside your MLflow experiment that contains:\n", + "- Your Agent's code & config\n", + "- Evaluation metrics for cost, quality, and latency" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "application/vnd.databricks.v1+cell": { + "cellMetadata": {}, + "inputWidgets": {}, + "nuid": "5d9f685a-fdb7-49a4-9e3a-a4a9e964d045", + "showTitle": false, + "tableResultSettingsMap": {}, + "title": "" + } + }, + "source": [ + "\n", + "**Important note:** Throughout this notebook, we indicate which cell's code you:\n", + "- ✅✏️ should customize - these cells contain code & config with business logic that you should edit to meet your requirements & tune quality.\n", + "- 🚫✏️ should not customize - these cells contain boilerplate code required to load/save/execute your Agent\n", + "\n", + "*Cells that don't require customization still need to be run! You CAN change these cells, but if this is the first time using this notebook, we suggest not doing so.*" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "application/vnd.databricks.v1+cell": { + "cellMetadata": {}, + "inputWidgets": {}, + "nuid": "bb4f8cc0-1797-4beb-a9f2-df21a9db79f0", + "showTitle": false, + "tableResultSettingsMap": {}, + "title": "" + } + }, + "source": [ + "### 🚫✏️ Install Python libraries\n", + "\n", + "You do not need to modify this cell unless you need additional Python packages in your Agent." + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "metadata": { + "application/vnd.databricks.v1+cell": { + "cellMetadata": {}, + "inputWidgets": {}, + "nuid": "6d4030e8-ae97-4351-bebd-9651d283578f", + "showTitle": false, + "tableResultSettingsMap": {}, + "title": "" + } + }, + "outputs": [], + "source": [ + "# %pip install -qqqq -U -r requirements.txt\n", + "# # Restart to load the packages into the Python environment\n", + "# dbutils.library.restartPython()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### 🚫✏️ Connect to Databricks\n", + "\n", + "If running locally in an IDE using Databricks Connect, connect the Spark client & configure MLflow to use Databricks Managed MLflow. If this running in a Databricks Notebook, these values are already set." + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [], + "source": [ + "from mlflow.utils import databricks_utils as du\n", + "\n", + "if not du.is_in_databricks_notebook():\n", + " from databricks.connect import DatabricksSession\n", + " import os\n", + "\n", + " spark = DatabricksSession.builder.getOrCreate()\n", + " os.environ[\"MLFLOW_TRACKING_URI\"] = \"databricks\"" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### 🚫✏️ Load the Agent's UC storage locations; set up MLflow experiment\n", + "\n", + "This notebook uses the UC model, MLflow Experiment, and Evaluation Set that you specified in the [Agent setup](02_agent_setup.ipynb) notebook." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from cookbook.config.shared.agent_storage_location import AgentStorageConfig\n", + "from cookbook.databricks_utils import get_mlflow_experiment_url\n", + "from cookbook.config import load_serializable_config_from_yaml_file\n", + "import mlflow \n", + "\n", + "# Load the Agent's storage locations\n", + "agent_storage_config: AgentStorageConfig= load_serializable_config_from_yaml_file(\"./configs/agent_storage_config.yaml\")\n", + "\n", + "# Show the Agent's storage locations\n", + "agent_storage_config.pretty_print()\n", + "\n", + "# set the MLflow experiment\n", + "experiment_info = mlflow.set_experiment(agent_storage_config.mlflow_experiment_name)\n", + "# If running in a local IDE, set the MLflow experiment name as an environment variable\n", + "os.environ[\"MLFLOW_EXPERIMENT_NAME\"] = agent_storage_config.mlflow_experiment_name\n", + "\n", + "print(f\"View the MLflow Experiment `{agent_storage_config.mlflow_experiment_name}` at {get_mlflow_experiment_url(experiment_info.experiment_id)}\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### 🚫✏️ Helper method to log the Agent's code & config to MLflow\n", + "\n", + "Before we start, let's define a helper method to log the Agent's code & config to MLflow. We will use this to log the agent's code & config to MLflow & the Unity Catalog. It is used in evaluation & for deploying to Agent Evaluation's [Review App](https://docs.databricks.com/en/generative-ai/agent-evaluation/human-evaluation.html#review-app-ui) (a chat UI for your stakeholders to test this agent) and later, deplying the Agent to production." + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [], + "source": [ + "\n", + "import mlflow\n", + "from mlflow.types.llm import CHAT_MODEL_INPUT_SCHEMA\n", + "from mlflow.models.rag_signatures import StringResponse\n", + "from cookbook.agents.utils.signatures import STRING_RESPONSE_WITH_MESSAGES\n", + "from mlflow.models.signature import ModelSignature\n", + "from cookbook.agents.multi_agent_supervisor import MultiAgentSupervisor, MultiAgentSupervisorConfig\n", + "from cookbook.agents.genie_agent import GenieAgent, GenieAgentConfig\n", + "from cookbook.agents.function_calling_agent import FunctionCallingAgent\n", + "from cookbook.agents.function_calling_agent import FunctionCallingAgentConfig\n", + "\n", + "# This helper will log the Agent's code & config to an MLflow run and return the logged model's URI\n", + "# If run from inside a mlfow.start_run() block, it will log to that run, otherwise it will log to a new run.\n", + "# This logged Agent is ready for deployment, so if you are happy with your evaluation, it is ready to deploy!\n", + "def log_multi_agent_supervisor_to_mlflow(agent_config: MultiAgentSupervisorConfig):\n", + " # Get the agent's code path from the imported Agent class\n", + " agent_code_path = f\"{os.getcwd()}/{MultiAgentSupervisor.__module__.replace('.', '/')}.py\"\n", + "\n", + " # Get the pip requirements from the requirements.txt file\n", + " with open(\"requirements.txt\", \"r\") as file:\n", + " pip_requirements = [line.strip() for line in file.readlines()] + [\"pyspark\"] # manually add pyspark\n", + "\n", + " logged_agent_info = mlflow.pyfunc.log_model(\n", + " artifact_path=\"agent\",\n", + " python_model=agent_code_path,\n", + " input_example=agent_config.input_example,\n", + " model_config=agent_config.model_dump(),\n", + " resources=agent_config.get_resource_dependencies(), # This allows the agents.deploy() command to securely provision credentials for the Agent's databricks resources e.g., vector index, model serving endpoints, etc\n", + " signature=ModelSignature(\n", + " inputs=CHAT_MODEL_INPUT_SCHEMA,\n", + " # outputs=STRING_RESPONSE_WITH_MESSAGES #TODO: replace with MLflow signature\n", + " outputs=StringResponse()\n", + " ),\n", + " code_paths=[os.path.join(os.getcwd(), \"cookbook\")],\n", + " pip_requirements=pip_requirements,\n", + " )\n", + "\n", + " return logged_agent_info\n", + "\n", + "def log_genie_agent_to_mlflow(agent_config: GenieAgentConfig):\n", + " # Get the agent's code path from the imported Agent class\n", + " agent_code_path = f\"{os.getcwd()}/{GenieAgent.__module__.replace('.', '/')}.py\"\n", + "\n", + " # Get the pip requirements from the requirements.txt file\n", + " with open(\"requirements.txt\", \"r\") as file:\n", + " pip_requirements = [line.strip() for line in file.readlines()] + [\"pyspark\"] # manually add pyspark\n", + "\n", + " logged_agent_info = mlflow.pyfunc.log_model(\n", + " artifact_path=\"agent\",\n", + " python_model=agent_code_path,\n", + " input_example=agent_config.input_example,\n", + " model_config=agent_config.model_dump(),\n", + " resources=agent_config.get_resource_dependencies(), # This allows the agents.deploy() command to securely provision credentials for the Agent's databricks resources e.g., vector index, model serving endpoints, etc\n", + " signature=ModelSignature(\n", + " inputs=CHAT_MODEL_INPUT_SCHEMA,\n", + " # outputs=STRING_RESPONSE_WITH_MESSAGES #TODO: replace with MLflow signature\n", + " outputs=StringResponse()\n", + " ),\n", + " code_paths=[os.path.join(os.getcwd(), \"cookbook\")],\n", + " pip_requirements=pip_requirements,\n", + " )\n", + "\n", + " return logged_agent_info\n", + "\n", + "# This helper will log the Agent's code & config to an MLflow run and return the logged model's URI\n", + "# If run from inside a mlfow.start_run() block, it will log to that run, otherwise it will log to a new run.\n", + "# This logged Agent is ready for deployment, so if you are happy with your evaluation, it is ready to deploy!\n", + "def log_function_calling_agent_to_mlflow(agent_config: FunctionCallingAgentConfig):\n", + " # Get the agent's code path from the imported Agent class\n", + " agent_code_path = f\"{os.getcwd()}/{FunctionCallingAgent.__module__.replace('.', '/')}.py\"\n", + "\n", + " # Get the pip requirements from the requirements.txt file\n", + " with open(\"requirements.txt\", \"r\") as file:\n", + " pip_requirements = [line.strip() for line in file.readlines()] + [\"pyspark\"] # manually add pyspark\n", + "\n", + " logged_agent_info = mlflow.pyfunc.log_model(\n", + " artifact_path=\"agent\",\n", + " python_model=agent_code_path,\n", + " input_example=agent_config.input_example,\n", + " model_config=agent_config.model_dump(),\n", + " resources=agent_config.get_resource_dependencies(), # This allows the agents.deploy() command to securely provision credentials for the Agent's databricks resources e.g., vector index, model serving endpoints, etc\n", + " signature=ModelSignature(\n", + " inputs=CHAT_MODEL_INPUT_SCHEMA,\n", + " # outputs=STRING_RESPONSE_WITH_MESSAGES #TODO: replace with MLflow signature\n", + " outputs=StringResponse()\n", + " ),\n", + " code_paths=[os.path.join(os.getcwd(), \"cookbook\")],\n", + " pip_requirements=pip_requirements,\n", + " )\n", + "\n", + " return logged_agent_info" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "application/vnd.databricks.v1+cell": { + "cellMetadata": {}, + "inputWidgets": {}, + "nuid": "9933d05f-29fa-452e-abdc-2a02328fbe22", + "showTitle": false, + "tableResultSettingsMap": {}, + "title": "" + } + }, + "source": [ + "\n", + "## 1️⃣ Iterate on the Agent's code & config to improve quality\n", + "\n", + "The below cells are used to execute your inner dev loop to improve the Agent's quality.\n", + "\n", + "We suggest the following process:\n", + "1. Vibe check the Agent for 5 - 10 queries to verify it works\n", + "2. Make any necessary changes to the code/config\n", + "3. Use Agent Evaluation to evaluate the Agent using your evaluation set, which will provide a quality assessment & identify the root causes of any quality issues\n", + "4. Based on that evaluation, make & test changes to the code/config to improve quality\n", + "5. 🔁 Repeat steps 3 and 4 until you are satisified with the Agent's quality\n", + "6. Deploy the Agent to Agent Evaluation's [Review App](https://docs.databricks.com/en/generative-ai/agent-evaluation/human-evaluation.html#review-app-ui) for pre-production testing\n", + "7. Use the following notebooks to review that feedback (optionally adding new records to your evaluation set) & identify any further quality issues\n", + "8. 🔁 Repeat steps 3 and 4 to fix any issues identified in step 7\n", + "9. Deploy the Agent to a production-ready REST API endpoint (using the same cells in this notebook as step 6)\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Create the agents to be overseen by the multi-agent supervisor" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### 1. create the genie agent" + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [], + "source": [ + "\n", + "from cookbook.config.agents.genie_agent import GenieAgentConfig\n", + "from cookbook.agents.genie_agent import GENIE_AGENT_DEFAULT_YAML_CONFIG_FILE_NAME, GenieAgent\n", + "from cookbook.config import serializable_config_to_yaml_file\n", + "\n", + "\n", + "genie_agent_config = GenieAgentConfig(genie_space_id=\"01ef92e3b5631f0da85834290964831d\")\n", + "serializable_config_to_yaml_file(genie_agent_config, \"./configs/\"+GENIE_AGENT_DEFAULT_YAML_CONFIG_FILE_NAME)\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "mlflow.set_registry_uri(\"databricks-uc\")\n", + "\n", + "with mlflow.start_run(run_name=\"genie_agent_test_1\"):\n", + " logged_genie_info = log_genie_agent_to_mlflow(genie_agent_config)\n", + " uc_registered_model_info = mlflow.register_model(\n", + " model_uri=logged_genie_info.model_uri, name=agent_storage_config.uc_model_name+\"_genie_test_1\"\n", + " )\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### 2. create the FC agent" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [], + "source": [ + "# Import Cookbook Agent configurations, which are Pydantic models\n", + "from cookbook.config import serializable_config_to_yaml_file\n", + "from cookbook.config.agents.function_calling_agent import (\n", + " FunctionCallingAgentConfig,\n", + ")\n", + "from cookbook.config.data_pipeline import (\n", + " DataPipelineConfig,\n", + ")\n", + "from cookbook.config.shared.llm import LLMConfig, LLMParametersConfig\n", + "from cookbook.config import load_serializable_config_from_yaml_file\n", + "from cookbook.tools.vector_search import (\n", + " VectorSearchRetrieverTool,\n", + " VectorSearchSchema,\n", + ")\n", + "import json\n", + "from cookbook.tools.uc_tool import UCTool\n", + "\n", + "\n", + "########################\n", + "# #### 🚫✏️ Load the Vector Index Unity Cataloglocation from the data pipeline configuration\n", + "# Usage:\n", + "# - If you used `01_data_pipeline` to create your Vector Index, run this cell.\n", + "# - If your Vector Index was created elsewhere, comment out this logic and set the UC location in the Retriever config.\n", + "########################\n", + "\n", + "data_pipeline_config: DataPipelineConfig = load_serializable_config_from_yaml_file(\n", + " \"./configs/data_pipeline_config.yaml\"\n", + ")\n", + "\n", + "########################\n", + "# #### ✅✏️ Retriever tool that connects to the Vector Search index\n", + "########################\n", + "\n", + "retriever_tool = VectorSearchRetrieverTool(\n", + " name=\"search_product_docs\",\n", + " description=\"Use this tool to search for product documentation.\",\n", + " vector_search_index=\"ep.cookbook_local_test.product_docs_docs_chunked_index__v1\",\n", + " vector_search_schema=VectorSearchSchema(\n", + " # These columns are the default values used in the `01_data_pipeline` notebook\n", + " # If you used a different column names in that notebook OR you are using a pre-built vector index, update the column names here.\n", + " chunk_text=\"content_chunked\", # Contains the text of each document chunk\n", + " document_uri=\"doc_uri\", # The document URI of the chunk e.g., \"/Volumes/catalog/schema/volume/file.pdf\" - displayed as the document ID in the Review App\n", + " additional_metadata_columns=[], # Additional columns to return from the vector database and present to the LLM\n", + " ),\n", + " # Optional parameters, see VectorSearchRetrieverTool.__doc__ for details. The default values are shown below.\n", + " # doc_similarity_threshold=0.0,\n", + " # vector_search_parameters=VectorSearchParameters(\n", + " # num_results=5,\n", + " # query_type=\"ann\"\n", + " # ),\n", + " # Adding columns here will allow the Agent's LLM to dynamically apply filters based on the user's query.\n", + " # filterable_columns=[]\n", + ")\n", + "\n", + "########################\n", + "# #### ✅✏️ Add Unity Catalog tools to the Agent\n", + "########################\n", + "\n", + "translate_sku_tool = UCTool(uc_function_name=\"ep.cookbook_local_test.translate_sku\")\n", + "\n", + "from tools.sku_translator import translate_sku\n", + "# from cookbook.config import serializable_config_to_yaml_file\n", + "\n", + "# translate_sku(\"OLD-XXX-1234\")\n", + "\n", + "from cookbook.tools.local_function import LocalFunctionTool\n", + "from tools.sku_translator import translate_sku\n", + "\n", + "# translate_sku_tool = LocalFunctionTool(func=translate_sku, description=\"Translates a pre-2024 SKU formatted as 'OLD-XXX-YYYY' to the new SKU format 'NEW-YYYY-XXX'.\")\n", + "\n", + "########################\n", + "#### ✅✏️ Agent's LLM configuration\n", + "########################\n", + "\n", + "system_prompt = \"\"\"\n", + "## Role\n", + "You are a helpful assistant that answers questions using a set of tools. If needed, you ask the user follow-up questions to clarify their request.\n", + "\n", + "## Objective\n", + "Your goal is to provide accurate, relevant, and helpful response based solely on the outputs from these tools. You are concise and direct in your responses.\n", + "\n", + "## Instructions\n", + "1. **Understand the Query**: Think step by step to analyze the user's question and determine the core need or problem. \n", + "\n", + "2. **Assess available tools**: Think step by step to consider each available tool and understand their capabilities in the context of the user's query.\n", + "\n", + "3. **Select the appropriate tool(s) OR ask follow up questions**: Based on your understanding of the query and the tool descriptions, decide which tool(s) should be used to generate a response. If you do not have enough information to use the available tools to answer the question, ask the user follow up questions to refine their request. If you do not have a relevant tool for a question or the outputs of the tools are not helpful, respond with: \"I'm sorry, I can't help you with that.\"\n", + "\"\"\".strip()\n", + "\n", + "fc_agent_config = FunctionCallingAgentConfig(\n", + " llm_config=LLMConfig(\n", + " llm_endpoint_name=\"ep-gpt4o-new\", # Model serving endpoint w/ a Chat Completions API\n", + " llm_system_prompt_template=system_prompt, # System prompt template\n", + " llm_parameters=LLMParametersConfig(\n", + " temperature=0.01, max_tokens=1500\n", + " ), # LLM parameters\n", + " ),\n", + " # Add one or more tools that comply with the CookbookTool interface\n", + " tools=[retriever_tool, translate_sku_tool],\n", + " # tools=[retriever_tool],\n", + ")\n", + "\n", + "# Print the configuration as a JSON string to see it all together\n", + "# print(json.dumps(fc_agent_config.model_dump(), indent=4))\n", + "\n", + "########################\n", + "##### Dump the configuration to a YAML\n", + "# Optional step, this allows the Agent's code file to be run by itself (e.g., outside of this notebook) using the above configuration.\n", + "########################\n", + "# Import the default YAML config file name from the Agent's code file\n", + "from cookbook.agents.function_calling_agent import FC_AGENT_DEFAULT_YAML_CONFIG_FILE_NAME\n", + "\n", + "# Dump the configuration to a YAML file\n", + "serializable_config_to_yaml_file(fc_agent_config, \"./configs/\"+FC_AGENT_DEFAULT_YAML_CONFIG_FILE_NAME)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Create the multi-agent supervisor" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [], + "source": [ + "from cookbook.config.agents.multi_agent_supervisor import MultiAgentSupervisorConfig, SupervisedAgentConfig\n", + "from cookbook.config.agents.multi_agent_supervisor import MultiAgentSupervisorConfig\n", + "from cookbook.agents.multi_agent_supervisor import MULTI_AGENT_DEFAULT_YAML_CONFIG_FILE_NAME\n", + "from cookbook.config.shared.llm import LLMConfig\n", + "from cookbook.config import serializable_config_to_yaml_file\n", + "from cookbook.agents.function_calling_agent import FunctionCallingAgent\n", + "from cookbook.config.shared.llm import LLMParametersConfig\n", + "\n", + "\n", + "fc_supervised = SupervisedAgentConfig(name=\"fc_agent\", \n", + " description=\"looks up product docs\", \n", + " endpoint_name=\"\", \n", + " agent_config=fc_agent_config,\n", + " agent_class=FunctionCallingAgent)\n", + "\n", + "genie_supervised = SupervisedAgentConfig(name=\"genie_agent\", \n", + " description=\"queries for customer info\", \n", + " endpoint_name=\"\", \n", + " agent_config=genie_agent_config,\n", + " agent_class=GenieAgent)\n", + "\n", + "\n", + "multi_agent_config = MultiAgentSupervisorConfig(\n", + " llm_endpoint_name=\"ep-gpt4o-new\",\n", + " llm_parameters=LLMParametersConfig(\n", + " max_tokens= 1500,\n", + " temperature= 0.01\n", + " ),\n", + "\n", + " playground_debug_mode=True,\n", + " agent_loading_mode=\"local\",\n", + " agents=[fc_supervised, genie_supervised]\n", + ")\n", + "\n", + "serializable_config_to_yaml_file(multi_agent_config, \"./configs/\"+MULTI_AGENT_DEFAULT_YAML_CONFIG_FILE_NAME)\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from cookbook.databricks_utils import get_mlflow_experiment_traces_url\n", + "from cookbook.agents.multi_agent_supervisor import MultiAgentSupervisor\n", + "\n", + "# Load the Agent's code with the above configuration\n", + "agent = MultiAgentSupervisor(multi_agent_config)\n", + "\n", + "# Vibe check the Agent for a single query\n", + "output = agent.predict(model_input={\"messages\": [{\"role\": \"user\", \"content\": \"How does the blender work?\"}]})\n", + "# output = agent.predict(model_input={\"messages\": [{\"role\": \"user\", \"content\": \"Translate the sku `OLD-abs-1234` to the new format\"}]})\n", + "\n", + "print(f\"View the MLflow Traces at {get_mlflow_experiment_traces_url(experiment_info.experiment_id)}\")\n", + "print(f\"Agent's final response:\\n----\\n{output['content']}\\n----\")\n", + "print()\n", + "print(f\"Agent's full message history (useful for debugging):\\n----\\n{json.dumps(output['messages'], indent=2)}\\n----\")\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Design for multi-agent\n", + "\n", + "requirements\n", + "* can test locally with just the agent's pyfunc classes\n", + "* when you change any config, it all just reloads\n", + "\n", + "when you deploy:\n", + "* you deploy each supervised agent separately to model serving\n", + "* then mutli agent picks these up \n", + "* then mutli agent deploys\n", + "\n", + "* each child agent has [name, description, config, code]\n", + " - when deployed, it reads it from the UC\n", + " - locally, from the config" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Testing endpoint based " + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [], + "source": [ + "from cookbook.config.agents.multi_agent_supervisor import MultiAgentSupervisorConfig, SupervisedAgentConfig\n", + "from cookbook.config.agents.multi_agent_supervisor import MultiAgentSupervisorConfig, MULTI_AGENT_DEFAULT_YAML_CONFIG_FILE_NAME\n", + "# from cookbook.agents.multi_agent_supervisor import MULTI_AGENT_DEFAULT_YAML_CONFIG_FILE_NAME\n", + "from cookbook.config.shared.llm import LLMConfig\n", + "from cookbook.config import serializable_config_to_yaml_file\n", + "from cookbook.agents.function_calling_agent import FunctionCallingAgent\n", + "from cookbook.config.shared.llm import LLMParametersConfig\n", + "\n", + "\n", + "fc_supervised_ep = SupervisedAgentConfig(name=\"fc_agent\", \n", + " description=\"looks up product docs\", \n", + " endpoint_name=\"agents_ep-cookbook_local_test-my_agent_new_test_with_ONLY_retri\", \n", + " # agent_config=fc_agent_config,\n", + " # agent_class=FunctionCallingAgent\n", + " )\n", + "\n", + "# genie_supervised = SupervisedAgentConfig(name=\"genie_agent\", \n", + "# description=\"queries for customer info\", \n", + "# endpoint_name=\"\", \n", + "# agent_config=genie_agent_config,\n", + "# agent_class=GenieAgent)\n", + "\n", + "\n", + "multi_agent_config_with_ep = MultiAgentSupervisorConfig(\n", + " llm_endpoint_name=\"ep-gpt4o-new\",\n", + " llm_parameters=LLMParametersConfig(\n", + " max_tokens= 1500,\n", + " temperature= 0.01\n", + " ),\n", + "\n", + " playground_debug_mode=True,\n", + " agent_loading_mode=\"model_serving\",\n", + " agents=[fc_supervised_ep]\n", + ")\n", + "\n", + "serializable_config_to_yaml_file(multi_agent_config_with_ep, \"./configs/\"+MULTI_AGENT_DEFAULT_YAML_CONFIG_FILE_NAME)\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from cookbook.config import load_serializable_config_from_yaml_file\n", + "\n", + "multi_agent_config_with_ep_loaded = load_serializable_config_from_yaml_file(\"./configs/multi_agent_supervisor_config.yaml\")\n", + "\n", + "print(multi_agent_config_with_ep_loaded)" + ] + } + ], + "metadata": { + "application/vnd.databricks.v1+notebook": { + "dashboards": [], + "environmentMetadata": null, + "language": "python", + "notebookMetadata": {}, + "notebookName": "02_agent__function_calling_mlflow_sdk", + "widgets": {} + }, + "kernelspec": { + "display_name": "genai-cookbook-T2SdtsNM-py3.11", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.11.10" + } + }, + "nbformat": 4, + "nbformat_minor": 0 +} diff --git a/openai_sdk_agent_app_sample_code/README.md b/openai_sdk_agent_app_sample_code/README.md new file mode 100644 index 0000000..ab90eca --- /dev/null +++ b/openai_sdk_agent_app_sample_code/README.md @@ -0,0 +1,160 @@ +# How to use local IDE + +- databricks auth profile DEFAULT is set up +``` +databricks auth profile login +``` +- add a cluster_id in ~/.databrickscfg (if you want to use Spark code) +- add `openai_sdk_agent_app_sample_code/.env` to point to mlflow exp + dbx tracking uri (if you want to run any agent code from the terminal and have it logged to mlflow). Make sure this mlflow experiment maps to the one in 02_agent_setup.ipynb. +``` +MLFLOW_TRACKING_URI=databricks +MLFLOW_EXPERIMENT_NAME=/Users/your.name@company.com/my_agent_mlflow_experiment +``` +- install poetry env & activate in your IDE +``` +poetry install +``` + +if you want to use the data pipeline code in spark, you need to build the cookbook wheel and install it in the cluster +- build cookbook wheel +``` +poetry build +``` +- install cookbook wheel in cluster + - Copy the wheel file to a UC Volume or Workspace folder + - Go to the cluster's Libraries page and install the wheel file as a new library + + +# NOTES/what doesn't work: +- Works locally & once deployed: + - Tool calling agent with vector search, UC tool +- Works locally, deployment not tested. + - Genie Agent + - Multi-Agent supervisor w/ "local" mode + - Multi-Agent supervisor w/ "endpoint" mode + +# TODO: +- Refactor the cookbook folder to + - make it easy to add as `code_path` without putting all agent code + data pipeline code into the agent mlflow model + - make the data pipeline competely seperate + - make the tools inherit from a version of serializableConfig that is "serializableTool" - same exact thing just not overloaded. + +- Multi-agent + - test with deployed endpoints + - make deployed endpoint optional if model = local, otherwise, make class/config optional. + +- Create a version of each of these Agents with LangGraph, LlamaIndex, and AutoGen. + +# Docs + +This cookbook contains example Agents built using Python code + the OpenAI SDK to call Databricks Model Serving/External Models. Each Agent is configurable via a Pydantic-based configuration classes and is wrapped in an MLflow PyFunc class for logging and deployment. + +Included are 3 types of Agents: +- Tool Calling Agent +- Genie Agent +- Multi-Agent Supervisor Agent + +## Genie Agent + +The Genie Agent is a simple wrapper around AI/BI Genie Spaces API. It does not use the OpenAI SDK. It is configured using the `GenieAgentConfig` class: +- Required + - `genie_space_id: str` - The ID of the Genie Space +- Optional Variables with Default Values + - `input_example: Any` - Defaults to: + ```python + { + "messages": [ + { + "role": "user", + "content": "What types of data can I query?", + }, + ] + } + ``` + - `encountered_error_user_message: str` - Defaults to: + > "I encountered an error trying to answer your question, please try again." + +## Tool-calling Agent + +The tool-calling agent uses the configured LLM to decide which tool(s) to call based on the user's query. + +The agent is configured using the `FunctionCallingAgentConfig` class: + +- Required: + - `llm_config: LLMConfig` - Configuration for the LLM endpoint + - `tools: List[BaseTool]` - List of tools available to the agent. + +- Optional Variables with Default Values: + - `input_example: Any` - Defaults to: + ```python + { + "messages": [ + { + "role": "user", + "content": "What can you help me with?", + }, + ] + } + ``` + +The `LLMConfig` requires: +- `llm_endpoint_name: str` - Name of the model serving endpoint +- `llm_system_prompt_template: str` - System prompt for the LLM +- `llm_parameters: Dict` - Parameters for the LLM (temperature, max_tokens, etc.) + +The `BaseTool` class is used to define a tool that the agent can call. The cookbook includes several pre-built tools. If you need to create your own tool, we suggest creating a UC Function and calling that function using the `UCTool`. +- UC Tool + - Wraps the uc toolkit. Adds add't code to parse errors from spark exceptions to just show the Python errors. +- Vector Search Retriever Tool + - A + + + + +## How Pydantic configuration classes work +All configuration classes inherit from `SerializableConfig`, defined in `config/__init__.py`. This class enables a Pydantic BaseModel to be serialized to a YAML file and loaded back from that YAML file. + + + +The Genie Agent is a simple wrapper around AI/BI Genie Spaces API. It does not use the OpenAI SDK. + + + +s code is wrapped in an MLflow PyFunc class and + + +UC Function Tool + +Local Function Tool + +## Vector Search Retriever Tool + +Issues +- Vector Search index does not store the source table's column name / description metadata, so the tool currently uses the source table's metadata to populate the filterable columns. However, this causes deployment to fail since the deployed model does not have access to the source table, so it is toggled off by `USE_SOURCE_TABLE_FOR_METADATA`. + + +Features: +* User can specify a list of filterable columns; these are presented to the tool-calling LLM as parameters of the tool. + + +* Validates all provided columns exist + + + + +what do you need to do? + +- make your data pipeline +- create your genie spaces +- create your tools +- create your agents +- create your multi-agent supervisor + + + +create a unsutrcutred data agent +- create data pipeline +- create synthetic data +- create agent with retriever tool +- evaluate and iterate +- maybe add some tools diff --git a/openai_sdk_agent_app_sample_code/__init__.py b/openai_sdk_agent_app_sample_code/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/openai_sdk_agent_app_sample_code/configs/README.md b/openai_sdk_agent_app_sample_code/configs/README.md new file mode 100644 index 0000000..afa743f --- /dev/null +++ b/openai_sdk_agent_app_sample_code/configs/README.md @@ -0,0 +1 @@ +This folder stores the configurations generated by the cookbook notebooks. \ No newline at end of file diff --git a/openai_sdk_agent_app_sample_code/cookbook/__init__.py b/openai_sdk_agent_app_sample_code/cookbook/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/openai_sdk_agent_app_sample_code/cookbook/agents/__init__.py b/openai_sdk_agent_app_sample_code/cookbook/agents/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/openai_sdk_agent_app_sample_code/cookbook/agents/function_calling_agent.py b/openai_sdk_agent_app_sample_code/cookbook/agents/function_calling_agent.py new file mode 100644 index 0000000..359e406 --- /dev/null +++ b/openai_sdk_agent_app_sample_code/cookbook/agents/function_calling_agent.py @@ -0,0 +1,241 @@ +# In this file, we construct a function-calling Agent with a Retriever tool using MLflow + the OpenAI SDK connected to Databricks Model Serving. This Agent is encapsulated in a MLflow PyFunc class called `FunctionCallingAgent()`. + +# Add the parent directory to the path so we can import the `cookbook` modules +# import sys +# sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))) + + +import json +from typing import Any, Dict, List, Optional, Union +import mlflow +import pandas as pd +from mlflow.models import set_model +from mlflow.models.rag_signatures import StringResponse, ChatCompletionRequest +from databricks.sdk import WorkspaceClient +from cookbook.agents.utils.execute_function import execute_function + +from cookbook.agents.utils.chat import ( + get_messages_array, + extract_user_query_string, + extract_chat_history, +) +from cookbook.config.agents.function_calling_agent import ( + FunctionCallingAgentConfig, +) +from cookbook.agents.utils.execute_function import execute_function +from cookbook.agents.utils.load_config import load_config +import logging + +FC_AGENT_DEFAULT_YAML_CONFIG_FILE_NAME = "function_calling_agent_config.yaml" + + +class FunctionCallingAgent(mlflow.pyfunc.PythonModel): + """ + Class representing an Agent that does function-calling with tools using OpenAI SDK + """ + + def __init__( + self, agent_config: Optional[Union[FunctionCallingAgentConfig, str]] = None + ): + super().__init__() + # Empty variables that will be initialized after loading the agent config. + self.model_serving_client = None + self.tool_functions = None + self.tool_json_schemas = None + self.chat_history = None + self.agent_config = None + + # load the Agent's configuration. See load_config() for details. + self.agent_config = load_config( + passed_agent_config=agent_config, + default_config_file_name=FC_AGENT_DEFAULT_YAML_CONFIG_FILE_NAME, + ) + if not self.agent_config: + logging.error( + f"No agent config found. If you are in your local development environment, make sure you either [1] are calling init(agent_config=...) with either an instance of FunctionCallingAgentConfig or the full path to a YAML config file or [2] have a YAML config file saved at {{your_project_root_folder}}/configs/{FC_AGENT_DEFAULT_YAML_CONFIG_FILE_NAME}." + ) + else: + logging.info("Successfully loaded agent config in __init__.") + + # Now, initialize the rest of the Agent + w = WorkspaceClient() + self.model_serving_client = w.serving_endpoints.get_open_ai_client() + + # Initialize the tools + self.tool_functions = {} + self.tool_json_schemas = [] + for tool in self.agent_config.tools: + self.tool_functions[tool.name] = tool + self.tool_json_schemas.append(tool.get_json_schema()) + + # Initialize the chat history to empty + self.chat_history = [] + + @mlflow.trace(name="agent", span_type="AGENT") + def predict( + self, + context: Any = None, + model_input: Union[ChatCompletionRequest, Dict, pd.DataFrame] = None, + params: Any = None, + ) -> StringResponse: + # Check here to allow the Agent class to be initialized without a configuration file, which is required to import the class as a module in other files. + if not self.agent_config: + raise RuntimeError("Agent config not loaded. Cannot call predict()") + + ############################################################################## + # Extract `messages` key from the `model_input` + messages = get_messages_array(model_input) + + ############################################################################## + # Parse `messages` array into the user's query & the chat history + with mlflow.start_span(name="parse_input", span_type="PARSER") as span: + span.set_inputs({"messages": messages}) + # in a multi-agent setting, the last message can be from another assistant, not the user + last_message = extract_user_query_string(messages) + last_message_role = messages[-1]["role"] + # Save the history inside the Agent's internal state + self.chat_history = extract_chat_history(messages) + span.set_outputs( + { + "last_message": last_message, + "chat_history": self.chat_history, + "last_message_role": last_message_role, + } + ) + + ############################################################################## + # Call LLM + + # messages to send the model + # For models with shorter context length, you will need to trim this to ensure it fits within the model's context length + system_prompt = self.agent_config.llm_config.llm_system_prompt_template + messages = ( + [{"role": "system", "content": system_prompt}] + + self.chat_history # append chat history for multi turn + + [{"role": last_message_role, "content": last_message}] + ) + + # Call the LLM to recursively calls tools and eventually deliver a generation to send back to the user + ( + model_response, + messages_log_with_tool_calls, + ) = self.recursively_call_and_run_tools(messages=messages) + + # If your front end keeps of converastion history and automatically appends the bot's response to the messages history, remove this line. + messages_log_with_tool_calls.append( + model_response.choices[0].message.to_dict() + ) # OpenAI client + + # remove the system prompt - this should not be exposed to the Agent caller + messages_log_with_tool_calls = messages_log_with_tool_calls[1:] + + return { + "content": model_response.choices[0].message.content, + # messages should be returned back to the Review App (or any other front end app) and stored there so it can be passed back to this stateless agent with the next turns of converastion. + "messages": messages_log_with_tool_calls, + } + + @mlflow.trace(span_type="AGENT") + def recursively_call_and_run_tools(self, max_iter=10, **kwargs): + messages = kwargs["messages"] + del kwargs["messages"] + i = 0 + while i < max_iter: + response = self.chat_completion(messages=messages, tools=True) + assistant_message = response.choices[0].message # openai client + tool_calls = assistant_message.tool_calls # openai + if tool_calls is None: + # the tool execution finished, and we have a generation + return (response, messages) + tool_messages = [] + for tool_call in tool_calls: # TODO: should run in parallel + function = tool_call.function # openai + args = json.loads(function.arguments) # openai + result = execute_function(self.tool_functions[function.name], args) + tool_message = { + "role": "tool", + "tool_call_id": tool_call.id, + "content": result, + } # openai + + tool_messages.append(tool_message) + assistant_message_dict = assistant_message.dict().copy() # openai + del assistant_message_dict["content"] + del assistant_message_dict["function_call"] # openai only + if "audio" in assistant_message_dict: + del assistant_message_dict["audio"] # llama70b hack + messages = ( + messages + + [ + assistant_message_dict, + ] + + tool_messages + ) + i += 1 + # TODO: Handle more gracefully + raise "ERROR: max iter reached" + + def chat_completion(self, messages: List[Dict[str, str]], tools: bool = False): + endpoint_name = self.agent_config.llm_config.llm_endpoint_name + llm_options = self.agent_config.llm_config.llm_parameters.dict() + + # # Trace the call to Model Serving - openai versio + traced_create = mlflow.trace( + self.model_serving_client.chat.completions.create, + name="chat_completions_api", + span_type="CHAT_MODEL", + ) + + if tools: + return traced_create( + model=endpoint_name, + messages=messages, + tools=self.tool_json_schemas, + parallel_tool_calls=False, + **llm_options, + ) + else: + return traced_create(model=endpoint_name, messages=messages, **llm_options) + + +logging.basicConfig(level=logging.INFO) + +# tell MLflow logging where to find the agent's code +set_model(FunctionCallingAgent()) + + +# IMPORTANT: set this to False before logging the model to MLflow +debug = False + +if debug: + # logging.basicConfig(level=logging.INFO) + # print(find_config_folder_location()) + # print(os.path.abspath(os.getcwd())) + # mlflow.tracing.disable() + agent = FunctionCallingAgent() + + vibe_check_query = { + "messages": [ + # {"role": "user", "content": f"what is agent evaluation?"}, + # {"role": "user", "content": f"How does the blender work?"}, + # { + # "role": "user", + # "content": f"find all docs from the section header 'Databricks documentation archive' or 'Work with files on Databricks'", + # }, + { + "role": "user", + "content": "Translate the sku `OLD-abs-1234` to the new format", + } + # { + # "role": "user", + # "content": f"convert sku 'OLD-XXX-1234' to the new format", + # }, + # { + # "role": "user", + # "content": f"what are recent customer issues? what words appeared most frequently?", + # }, + ] + } + + output = agent.predict(model_input=vibe_check_query) + print(output["content"]) diff --git a/openai_sdk_agent_app_sample_code/cookbook/agents/genie_agent.py b/openai_sdk_agent_app_sample_code/cookbook/agents/genie_agent.py new file mode 100644 index 0000000..b158f7a --- /dev/null +++ b/openai_sdk_agent_app_sample_code/cookbook/agents/genie_agent.py @@ -0,0 +1,500 @@ +# Databricks notebook source +# MAGIC %md +# MAGIC # Genie Space Agent +# MAGIC +# MAGIC In this notebook, we construct a Genie space as an Agent. This Agent is encapsulated in a MLflow PyFunc class called `GenieAgent()`. + +# COMMAND ---------- + +# # # If running this notebook by itself, uncomment these. +# %pip install --upgrade -qqqq mlflow databricks-sdk tabulate tiktoken +# dbutils.library.restartPython() + +# COMMAND ---------- + +import sys + +# Add the parent directory to the path so we can import the `utils` modules +sys.path.append("../..") + +import json +from typing import Any, Dict, Optional, Union +import mlflow +from dataclasses import asdict, dataclass +import pandas as pd +from mlflow.models import set_model +from mlflow.models.rag_signatures import StringResponse, ChatCompletionRequest +import tiktoken +import logging +import uuid +import time +import os +from datetime import datetime +from typing import Union +import pandas as pd +from cookbook.config.agents.genie_agent import GenieAgentConfig +from cookbook.agents.utils.load_config import load_first_yaml_file +from cookbook.agents.utils.chat import ( + get_messages_array, + extract_user_query_string, + extract_chat_history, + convert_messages_to_open_ai_format, + concat_messages_array_to_string, +) +from databricks.sdk import WorkspaceClient +from cookbook.agents.utils.load_config import load_config + + +# COMMAND ---------- + +GENIE_AGENT_DEFAULT_YAML_CONFIG_FILE_NAME = "genie_agent_config.yaml" + +# COMMAND ---------- + +MAX_TOKENS_OF_DATA = 20000 # max tokens of data in markdown format +MAX_ITERATIONS = 50 # max times to poll the API when polling for either result or the query results, each iteration is ~1 second, so max latency == 2 * MAX_ITERATIONS + + +@mlflow.trace(span_type="PARSER") +def _parse_query_result(resp) -> Union[str, pd.DataFrame]: + columns = resp["manifest"]["schema"]["columns"] + header = [str(col["name"]) for col in columns] + rows = [] + output = resp["result"] + if not output: + return None + + for item in resp["result"]["data_typed_array"]: + row = [] + for column, value in zip(columns, item["values"]): + type_name = column["type_name"] + str_value = value.get("str", None) + if str_value is None: + row.append(None) + continue + + if type_name in ["INT", "LONG", "SHORT", "BYTE"]: + row.append(int(str_value)) + elif type_name in ["FLOAT", "DOUBLE", "DECIMAL"]: + row.append(float(str_value)) + elif type_name == "BOOLEAN": + row.append(str_value.lower() == "true") + elif type_name == "DATE": + row.append(datetime.strptime(str_value[:10], "%Y-%m-%d").date()) + elif type_name == "TIMESTAMP": + row.append(datetime.strptime(str_value[:10], "%Y-%m-%d").date()) + elif type_name == "BINARY": + row.append(bytes(str_value, "utf-8")) + else: + row.append(str_value) + + rows.append(row) + + # initial parsing to markdown + query_result = pd.DataFrame(rows, columns=header).to_markdown() + + # trim down from the total rows + trimmed_rows = len(rows) + tokens_used = count_tokens(query_result) + + # if the first iteration is < MAX_TOKENS_OF_DATA it will just return and skip this loop + while trimmed_rows > 0 and tokens_used > MAX_TOKENS_OF_DATA: + with mlflow.start_span(name="reduce_data_tokens") as span: + span.set_inputs( + { + "output_rows_to_show": trimmed_rows, + "max_tokens_target": MAX_TOKENS_OF_DATA, + } + ) + # convert to markdown + query_result = ( + pd.DataFrame(rows, columns=header).head(trimmed_rows).to_markdown() + ) + # keep trimming down until we get under the token limit + trimmed_rows -= 5 + # worst case, return None, which the Agent will handle and not display the query results + tokens_used = count_tokens(query_result) + if trimmed_rows == 0: + query_result = None + tokens_used = 0 + span.set_outputs({"query_result": query_result, "tokens_used": tokens_used}) + return query_result.strip() if query_result else query_result + + +# Define a function to count tokens +def count_tokens(text): + encoding = tiktoken.encoding_for_model("gpt-4o") + return len(encoding.encode(text)) + + +@dataclass +class GenieResponse: + sql_query: str = None # generated sql query + response: str = ( + None # description of the sql query or Genie's response back to the user + ) + data_table: str = None # datatable returned formatted as markdown by pandas + + +class GenieAPIWrapper: + def __init__( + self, + space_id, + encountered_error_user_message: str = "I encountered an error trying to answer your question, please try again.", + ): + self.space_id = space_id + + self.headers = { + "Accept": "application/json", + "Content-Type": "application/json", + } + + workspace_client = WorkspaceClient() + self._genie_client = workspace_client.genie + self.encountered_error_user_message = encountered_error_user_message + + # We build the GenieResponse throughout this wrapper's logic since you must poll for the result & the results come back from multiple polling requests. + self.genie_result = GenieResponse() + + @mlflow.trace() + def start_conversation(self, content): + resp = self._genie_client._api.do( + "POST", + f"/api/2.0/genie/spaces/{self.space_id}/start-conversation", + body={"content": content}, + headers=self.headers, + ) + return resp + + @mlflow.trace() + def create_message(self, conversation_id, content): + resp = self._genie_client._api.do( + "POST", + f"/api/2.0/genie/spaces/{self.space_id}/conversations/{conversation_id}/messages", + body={"content": content}, + headers=self.headers, + ) + return resp + + @mlflow.trace() + def poll_for_result(self, conversation_id, message_id): + @mlflow.trace() + def poll_result(): + iteration_count = 0 + while True and iteration_count < MAX_ITERATIONS: + # try: # genie API randomly crashes with BadRequest: Message does not have a query statementId. This is instead caught in the Agent itself to capture all unknown exceptions from the API wrapper. + iteration_count += 1 + logging.debug( + f"Polling for result {message_id} {conversation_id} iteration {iteration_count}" + ) + resp = self._genie_client._api.do( + "GET", + f"/api/2.0/genie/spaces/{self.space_id}/conversations/{conversation_id}/messages/{message_id}", + headers=self.headers, + ) + logging.debug(f"Genie polling response: {resp}") + if resp["status"] == "EXECUTING_QUERY": + with mlflow.start_span(name="get_sql_query") as span: + query_result = next( + r for r in resp["attachments"] if "query" in r + )["query"] + span.set_inputs(resp) + self.genie_result.sql_query = query_result.get("query") + self.genie_result.response = query_result.get("description") + span.set_outputs( + { + "sql_query": self.genie_result.sql_query, + "response": self.genie_result.response, + } + ) + return poll_query_results() + elif resp["status"] == "COMPLETED": + """ + Genie didn't run a query, returned a question or comment to the user + """ + with mlflow.start_span(name="get_genie_response") as span: + logging.debug(f"Genie polling returned {resp}") + span.set_inputs(resp) + # Get first attachment from array safely + first_attachment = ( + resp.get("attachments", [])[0] + if resp.get("attachments") + else None + ) + if first_attachment: + # TODO: we shouldn't need this logic, but it's here to handle a bug in the Genie API where sometimes you get COMPLETED before EXECUTING_QUERY is returned. + if "text" in first_attachment: + # genie didn't run a query, just returned a question or comment to the user + response = first_attachment["text"]["content"] + self.genie_result.response = response + span.set_outputs( + {"response": self.genie_result.response} + ) + return asdict(self.genie_result) + elif "query" in first_attachment: + # genie ran a query, get the results + response = first_attachment["query"]["description"] + self.genie_result.sql_query = first_attachment["query"][ + "query" + ] + self.genie_result.response = first_attachment["query"][ + "description" + ] + span.set_outputs( + { + "sql_query": self.genie_result.sql_query, + "response": self.genie_result.response, + } + ) + return poll_query_results() + else: + # unknown state, assume an error state + self.genie_result.response = ( + self.encountered_error_user_message + ) + span.set_outputs( + {"response": self.genie_result.response} + ) + return asdict(self.genie_result) + else: + # no response, must be an error state + self.genie_result.response = ( + self.encountered_error_user_message + ) + span.set_outputs({"response": self.genie_result.response}) + return asdict(self.genie_result) + + elif resp["status"] == "FAILED": + """ + Genie failed + """ + self.genie_result.response = self.encountered_error_user_message + return asdict(self.genie_result) + else: + logging.debug(f"Waiting...: {resp['status']}") + time.sleep(1) + # except Exception as e: # hack per above + # logging.error( + # f"Error polling for result: {e}, in polling iteration {iteration_count} of {MAX_ITERATIONS}" + # ) + # print(iteration_count) + # continue + + @mlflow.trace() + def poll_query_results(): + iteration_count = 0 + while True and iteration_count < MAX_ITERATIONS: + iteration_count += 1 + resp = self._genie_client._api.do( + "GET", + f"/api/2.0/genie/spaces/{self.space_id}/conversations/{conversation_id}/messages/{message_id}/query-result", + headers=self.headers, + )["statement_response"] + + state = resp["status"]["state"] + if state == "SUCCEEDED": + with mlflow.start_span(name="get_sql_query_results") as span: + span.set_inputs(resp) + data_table_as_md = _parse_query_result(resp) + self.genie_result.data_table = data_table_as_md + span.set_outputs(self.genie_result.data_table) + return asdict(self.genie_result) + elif state == "RUNNING" or state == "PENDING": + logging.debug("Waiting for query result...") + time.sleep(1) + else: + logging.debug(f"No query result: {resp['state']}") + return None + + return poll_result() + + @mlflow.trace(span_type="AGENT", name="genie") + def ask_question(self, question): + self.genie_result = GenieResponse() + resp = self.start_conversation(question) + return self.poll_for_result(resp["conversation_id"], resp["message_id"]) + + +# COMMAND ---------- + + +# DBTITLE 1,Agent +class GenieAgent(mlflow.pyfunc.PythonModel): + """ + Class representing an Agent that does function-calling with tools using OpenAI SDK + """ + + def __init__( + self, + agent_config: Optional[Union[GenieAgentConfig, str]] = None, + ): + # load the Agent's configuration. See load_config() for details. + self.agent_config = load_config( + passed_agent_config=agent_config, + default_config_file_name=GENIE_AGENT_DEFAULT_YAML_CONFIG_FILE_NAME, + ) + if not self.agent_config: + raise ValueError( + f"No agent config found. If you are in your local development environment, make sure you either [1] are calling init(agent_config=...) with either an instance of GenieAgentConfig or the full path to a YAML config file or [2] have a YAML config file saved at {{your_project_root_folder}}/configs/{GENIE_AGENT_DEFAULT_YAML_CONFIG_FILE_NAME}." + ) + else: + logging.info("Successfully loaded agent config in __init__.") + # Load the API wrapper + self._genie_agent = GenieAPIWrapper(self.agent_config.genie_space_id) + + self.chat_history = [] + + @mlflow.trace(name="genie_orchestator", span_type="AGENT") + def predict( + self, + context: Any = None, + model_input: Union[ChatCompletionRequest, Dict, pd.DataFrame] = None, + params: Any = None, + ) -> StringResponse: + # Check here to allow the Agent class to be initialized without a configuration file, which is required to import the class as a module in other files. + if not self.agent_config: + raise RuntimeError("Agent config not loaded. Cannot call predict()") + ############################################################################## + # Extract `messages` key from the `model_input` + messages = get_messages_array(model_input) + + ############################################################################## + # Parse `messages` array into the user's query & the chat history + with mlflow.start_span(name="parse_input", span_type="PARSER") as span: + span.set_inputs({"messages": messages}) + # in a multi-agent setting, the last message can be from another assistant, not the user + last_message = extract_user_query_string(messages) + last_message_role = messages[-1]["role"] + # Save the history inside the Agent's internal state + self.chat_history = extract_chat_history(messages) + span.set_outputs( + { + "last_message": last_message, + "chat_history": self.chat_history, + "last_message_role": last_message_role, + } + ) + + # HACK: Since Genie API doesn't provide a stateless API that you can pass the chat history in, we "prompt hack" Genie by adding the chat history to the user's query. + # This avoids the need for this agent to maintain the genie converastion ID between turns - which is impracticle since this Agent is deployed as a stateless API. + if len(self.chat_history) > 0: + message = f"I will provide you a chat, where your name is 'assistant' and the user is 'user'. Please help with the user's last query. DO NOT reference the query or context in your response.\n" + # message += f"Chat history length: {len(history_as_string)} characters\n" + + # Concatenate messages to form the chat history + # message += concat_messages_array_to_string(messages) + + history_as_string = concat_messages_array_to_string(messages) + + # Genie API has a character limit of 25,000 + # Iteratively remove oldest messages to get it small enough + messages_copy = messages.copy() + while ( + len(history_as_string) + len(message) > 25000 and len(messages_copy) > 1 + ): + + messages_copy = messages_copy[1:] + history_as_string = concat_messages_array_to_string(messages_copy) + + # If we still exceed the limit with just one message, fall back to just the user query + if len(history_as_string) + len(message) > 25000: + message = last_message + else: + message += history_as_string + else: + message = last_message + + # if the user's original message is too long (very unlikely), we just truncate the message + if len(message) > 25000: + message = message[:25000] + logging.warning( + f"Truncated message to {len(message)} characters; even with just 1 message it was still too long." + ) + + # Send the message and wait for a response + try: + genie_response = self._genie_agent.ask_question(message) + except ( + Exception + ) as e: # genie API randomly crashes with BadRequest: Message does not have a query statementId + genie_response = None + logging.error(f"Error calling Genie API wrapper: {e}.") + + if genie_response: + if genie_response["data_table"]: + output_message = ( + f"{genie_response['response']}\n\n{genie_response['data_table']}" + ) + else: + output_message = f"{genie_response['response']}" + else: + output_message = self.agent_config.encountered_error_user_message + + with mlflow.start_span(name="update_message_history") as span: + # message log + # only put the actual query in it + message_log = convert_messages_to_open_ai_format(messages) + # add a fake tool call version of genie so we can debug this in the MLflow UIs + message_log += self.get_faked_tool_calls(message, genie_response) + # add genie's text response + message_log.append({"role": "assistant", "content": output_message}) + span.set_outputs(message_log) + + return { + "content": output_message, + # messages should be returned back to the Review App (or any other front end app) and stored there so it can be passed back to this stateless agent with the next turns of converastion. + "messages": message_log, + } + + @mlflow.trace() + def get_faked_tool_calls(self, user_query, genie_response): + random_unique_id = str(uuid.uuid4().hex) + # openai expects a <=40 character id + tool_call_id = ( + f"call_genie_space_{self.agent_config.genie_space_id}__{random_unique_id}"[ + :40 + ] + ) + args = {"query": user_query} + return [ + { + "role": "assistant", + "tool_calls": [ + { + "id": tool_call_id, + "function": {"arguments": json.dumps(args), "name": "genie"}, + "type": "function", + } + ], + }, + { + "role": "tool", + "tool_call_id": tool_call_id, + "content": json.dumps(genie_response), + }, + ] + + +# tell MLflow logging where to find the agent's code +set_model(GenieAgent()) + +# COMMAND ---------- + +debug = False +if debug: + # mlflow.tracing.disable() + agent = GenieAgent() + + vibe_check_query = { + "messages": [ + { + "role": "user", + # "content": f"What is the churn rate?", + # "content": f"a irrelevant question", + "content": f"what tables you got?", + }, + ] + } + + output = agent.predict(model_input=vibe_check_query) + print(output) diff --git a/openai_sdk_agent_app_sample_code/cookbook/agents/multi_agent_supervisor.py b/openai_sdk_agent_app_sample_code/cookbook/agents/multi_agent_supervisor.py new file mode 100644 index 0000000..37927f2 --- /dev/null +++ b/openai_sdk_agent_app_sample_code/cookbook/agents/multi_agent_supervisor.py @@ -0,0 +1,616 @@ +import json +import os +from typing import Any, Callable, Dict, List, Optional, Union +from cookbook.config.agents.multi_agent_supervisor import ( + MULTI_AGENT_DEFAULT_YAML_CONFIG_FILE_NAME, +) +import mlflow +from dataclasses import asdict, dataclass, field +import pandas as pd +from mlflow.models import set_model, ModelConfig +from mlflow.models.rag_signatures import StringResponse, ChatCompletionRequest, Message +from databricks.sdk import WorkspaceClient +import os +from cookbook.agents.utils.chat import ( + remove_message_keys_with_null_values, + remove_tool_calls_from_messages, +) +from cookbook.agents.utils.load_config import load_config +from cookbook.config.agents.multi_agent_supervisor import ( + MultiAgentSupervisorConfig, + WORKER_PROMPT_TEMPLATE, + ROUTING_FUNCTION_NAME, + CONVERSATION_HISTORY_THINKING_PARAM, + WORKER_CAPABILITIES_THINKING_PARAM, + NEXT_WORKER_OR_FINISH_PARAM, + FINISH_ROUTE_NAME, + SUPERVISOR_ROUTE_NAME, +) +from cookbook.agents.utils.chat import get_messages_array +from cookbook.agents.utils.playground_parser import ( + convert_messages_to_playground_tool_display_strings, +) +import importlib +import logging + +# logging.basicConfig(level=logging.INFO) + +from mlflow.entities import Trace +import mlflow.deployments + + +AGENT_RAW_OUTPUT_KEY = "raw_agent_output" +AGENT_NEW_MESSAGES_KEY = "new_messages" + + +@dataclass +class SupervisorState: + """Tracks essential conversation state""" + + chat_history: List[Dict[str, str]] = field(default_factory=list) + last_agent_called: str = "" + number_of_supervisor_loops_completed: int = 0 + num_messages_at_start: int = 0 + # error: Optional[str] = None + + @mlflow.trace(span_type="FUNCTION", name="state.append_new_message_to_history") + def append_new_message_to_history(self, message: Dict[str, str]) -> None: + span = mlflow.get_current_active_span() + if span: # TODO: Hack, when mlflow tracing is disabled, span == None. + span.set_inputs({"message": message}) + with mlflow.start_span( + name="remove_message_keys_with_null_values" + ) as span_inner: + span_inner.set_inputs({"message": message}) + message_with_no_null_values_for_keys = remove_message_keys_with_null_values( + message + ) + span_inner.set_outputs( + { + "message_with_no_null_values_for_keys": message_with_no_null_values_for_keys + } + ) + self.chat_history.append(message_with_no_null_values_for_keys) + span.set_outputs(self.chat_history) + + @mlflow.trace(span_type="FUNCTION", name="state.overwrite_chat_history") + def overwrite_chat_history(self, new_chat_history: List[Dict[str, str]]) -> None: + span = mlflow.get_current_active_span() + if span: # TODO: Hack, when mlflow tracing is disabled, span == None. + span.set_inputs( + { + "new_chat_history": new_chat_history, + "current_chat_history": self.chat_history, + } + ) + messages_with_no_null_values_for_keys = [] + with mlflow.start_span( + name="remove_message_keys_with_null_values" + ) as span_inner: + span_inner.set_inputs({"new_chat_history": new_chat_history}) + for message in new_chat_history: + messages_with_no_null_values_for_keys.append( + remove_message_keys_with_null_values(message) + ) + span_inner.set_outputs( + { + "messages_with_no_null_values_for_keys": messages_with_no_null_values_for_keys + } + ) + self.chat_history = messages_with_no_null_values_for_keys.copy() + span.set_outputs(self.chat_history) + + +class MultiAgentSupervisor(mlflow.pyfunc.PythonModel): + """ + Class representing an Agent that does function-calling with tools using OpenAI SDK + """ + + def __init__( + self, agent_config: Optional[Union[MultiAgentSupervisorConfig, str]] = None + ): + logging.info("Initializing MultiAgentSupervisor") + + # load the Agent's configuration. See load_config() for details. + self.agent_config = load_config( + passed_agent_config=agent_config, + default_config_file_name=MULTI_AGENT_DEFAULT_YAML_CONFIG_FILE_NAME, + ) + if not self.agent_config: + raise ValueError( + f"No agent config found. If you are in your local development environment, make sure you either [1] are calling init(agent_config=...) with either an instance of MultiAgentSupervisorConfig or the full path to a YAML config file or [2] have a YAML config file saved at {{your_project_root_folder}}/configs/{MULTI_AGENT_DEFAULT_YAML_CONFIG_FILE_NAME}." + ) + else: + logging.info("Successfully loaded agent config in __init__.") + + # Initialize clients + self._initialize_model_serving_clients() + + # Set up agents and routing + self._initialize_supervised_agents() + + # Set up prompts and tools + self._initialize_supervisor_prompts_and_tools() + + # Initialize state + self.state = None # Will be initialized per conversation + logging.info("Initialized MultiAgentSupervisor") + + def _initialize_model_serving_clients(self): + """Initialize API clients for model serving""" + w = WorkspaceClient() + self.model_serving_client = w.serving_endpoints.get_open_ai_client() + + # used for calling the child agent's deployments + self.mlflow_serving_client = mlflow.deployments.get_deploy_client("databricks") + logging.info("Initialized model serving clients") + + def _initialize_supervised_agents(self): + """Initialize the agent registry and capabilities""" + self.agents = {} + + # Add configured worker agents + if self.agent_config.agent_loading_mode == "model_serving": + # using the model serving endpoints of the agents + for agent in self.agent_config.agents: + self.agents[agent.name] = { + "agent_description": agent.description, + "endpoint_name": agent.endpoint_name, + } + elif self.agent_config.agent_loading_mode == "local": + # using the local agent classes + for agent in self.agent_config.agents: + # load the agent class + module_name, class_name = agent.agent_class_path.rsplit(".", 1) + + module = importlib.import_module(module_name) + # Load the Agent class, which will be a PyFunc + agent_class_obj = getattr(module, class_name) + self.agents[agent.name] = { + "agent_description": agent.description, + "agent_pyfunc_instance": agent_class_obj( + agent_config=agent.agent_config + ), # instantiate the PyFunc + } + logging.info(f"Loaded agent: {agent.name}") + else: + raise ValueError( + f"Invalid agent loading mode: {self.agent_config.agent_loading_mode}" + ) + + def _initialize_supervisor_prompts_and_tools(self): + """Initialize prompts and function calling tools""" + # Create agents string for system prompt + agents_info = [ + WORKER_PROMPT_TEMPLATE.format( + worker_name=key, worker_description=value["agent_description"] + ) + for key, value in self.agents.items() + ] + workers_names_and_descriptions = "".join(agents_info) + + # Update system prompt with template variables + self.supervisor_system_prompt = ( + self.agent_config.supervisor_system_prompt.format( + ROUTING_FUNCTION_NAME=ROUTING_FUNCTION_NAME, + CONVERSATION_HISTORY_THINKING_PARAM=CONVERSATION_HISTORY_THINKING_PARAM, + WORKER_CAPABILITIES_THINKING_PARAM=WORKER_CAPABILITIES_THINKING_PARAM, + NEXT_WORKER_OR_FINISH_PARAM=NEXT_WORKER_OR_FINISH_PARAM, + FINISH_ROUTE_NAME=FINISH_ROUTE_NAME, + workers_names_and_descriptions=workers_names_and_descriptions, + ) + ) + + self.supervisor_user_prompt = self.agent_config.supervisor_user_prompt.format( + worker_names_with_finish=list(self.agents.keys()) + [FINISH_ROUTE_NAME], + NEXT_WORKER_OR_FINISH_PARAM=NEXT_WORKER_OR_FINISH_PARAM, + ROUTING_FUNCTION_NAME=ROUTING_FUNCTION_NAME, + FINISH_ROUTE_NAME=FINISH_ROUTE_NAME, + ) + + # Initialize routing function schema + self.route_function = { + "type": "function", + "function": { + "name": ROUTING_FUNCTION_NAME, + "description": "Route the conversation by providing your thinking and next worker selection.", + "parameters": { + "properties": { + CONVERSATION_HISTORY_THINKING_PARAM: {"type": "string"}, + WORKER_CAPABILITIES_THINKING_PARAM: {"type": "string"}, + NEXT_WORKER_OR_FINISH_PARAM: { + "enum": list(self.agents.keys()), + "type": "string", + }, + }, + "required": [ + CONVERSATION_HISTORY_THINKING_PARAM, + WORKER_CAPABILITIES_THINKING_PARAM, + NEXT_WORKER_OR_FINISH_PARAM, + ], + "type": "object", + }, + }, + } + self.tool_json_schemas = [self.route_function] + + @mlflow.trace(span_type="AGENT") + def _get_supervisor_routing_decision(self, messages: List[Dict[str, str]]) -> str: + + supervisor_messages = ( + [{"role": "system", "content": self.supervisor_system_prompt}] + + messages + + [ + { + "role": "user", + "content": self.supervisor_user_prompt, + } + ] + ) + + response = self.chat_completion(messages=supervisor_messages, tools=True) + supervisor_llm_response = response.choices[0].message + supervisor_tool_calls = supervisor_llm_response.tool_calls + + if supervisor_tool_calls: + for tool_call in supervisor_tool_calls: + function = tool_call.function + args = json.loads(function.arguments) + if function.name == ROUTING_FUNCTION_NAME: + return args # includes all keys from the function call + else: + logging.error( + f"Supervisor LLM failed to call the {ROUTING_FUNCTION_NAME}(...) function to determine the next step, so we will default to finishing. It tried to call `{function.name}` with args `{function.arguments}`." + ) + return FINISH_ROUTE_NAME + else: + logging.error( + f"Supervisor LLM failed to choose a tool at all, so we will default to finishing. It said `{supervisor_llm_response}`." + ) + return FINISH_ROUTE_NAME + + @mlflow.trace() + def _call_supervised_agent( + self, agent_name: str, input_messages: List[Dict[str, str]] + ) -> Dict[str, Any]: + """ + Calls a supervised agent and returns ONLY the new [messages] produced by that agent. + """ + span = mlflow.get_current_active_span() + if span: # TODO: Hack, when mlflow tracing is disabled, span == None. + span.set_attribute( + "self.agent_config.agent_loading_mode", + self.agent_config.agent_loading_mode, + ) + raw_agent_output = {} + if self.agent_config.agent_loading_mode == "model_serving": + endpoint_name = self.agents.get(agent_name).get("endpoint_name") + if endpoint_name: + # this request will grab the mlflow trace from the endpoint + request = { + "databricks_options": {"return_trace": True}, + "messages": input_messages.copy(), + } + completion = self.mlflow_serving_client.predict( + endpoint=endpoint_name, inputs=request + ) + + logging.info(f"Called agent: {agent_name}") + logging.info(f"Got response agent: {completion}") + + # Add the trace from model serving API call to the active trace + if trace := completion.pop("databricks_output", {}).get("trace"): + trace = Trace.from_dict(trace) + mlflow.add_trace(trace) + + raw_agent_output = completion + else: + raise ValueError(f"Invalid agent selected: {agent_name}") + elif self.agent_config.agent_loading_mode == "local": + agent_pyfunc_instance = self.agents.get(agent_name).get( + "agent_pyfunc_instance" + ) + if agent_pyfunc_instance: + request = { + # "databricks_options": {"return_trace": True}, + "messages": input_messages.copy(), + } + raw_agent_output = agent_pyfunc_instance.predict(model_input=request) + else: + raise ValueError(f"Invalid agent selected: {agent_name}") + else: + raise ValueError( + f"Invalid agent loading mode: {self.agent_config.agent_loading_mode}" + ) + + # return only the net new messages produced by the agent + agent_output_messages = raw_agent_output.get("messages", []) + num_messages_previously = len(input_messages) + num_messages_after_agent = len(agent_output_messages) + if ( + num_messages_after_agent == 0 + or num_messages_after_agent == num_messages_previously + ): + raise Exception( + f"Agent {agent_name} either returned no messages at all or returned the same number of messages it received, indicating it did not produce any new messages." + ) + + else: + # Add the Agent's name to its messages + new_messages = agent_output_messages[num_messages_previously:].copy() + for new_message in new_messages: + new_message["name"] = agent_name + return { + # agent's raw output + AGENT_RAW_OUTPUT_KEY: raw_agent_output, + # new messages produced by the agent + AGENT_NEW_MESSAGES_KEY: new_messages, + } + + @mlflow.trace(name="agent", span_type="AGENT") + def predict( + self, + context: Any = None, + model_input: Union[ChatCompletionRequest, Dict, pd.DataFrame] = None, + params: Any = None, + ) -> StringResponse: + # Check here to allow the Agent class to be initialized without a configuration file, which is required to import the class as a module in other files. + if not self.agent_config: + raise RuntimeError("Agent config not loaded. Cannot call predict()") + # try: + # Initialize conversation state + messages = get_messages_array(model_input) + self.state = SupervisorState() + self.state.overwrite_chat_history(messages) + self.state.num_messages_at_start = len(messages) + + # Run the supervisor loop up to self.agent_config.max_workers_called times + while ( + self.state.number_of_supervisor_loops_completed + < self.agent_config.max_supervisor_loops + ): + with mlflow.start_span(name="supervisor_loop_iteration") as span: + self.state.number_of_supervisor_loops_completed += 1 + + chat_history_without_tool_calls = remove_tool_calls_from_messages( + self.state.chat_history + ) + routing_function_output = self._get_supervisor_routing_decision( + chat_history_without_tool_calls + ) + + next_agent = routing_function_output.get(NEXT_WORKER_OR_FINISH_PARAM) + span.set_inputs( + { + f"supervisor.{NEXT_WORKER_OR_FINISH_PARAM}": next_agent, + f"supervisor.{CONVERSATION_HISTORY_THINKING_PARAM}": routing_function_output.get( + CONVERSATION_HISTORY_THINKING_PARAM + ), + f"supervisor.{WORKER_CAPABILITIES_THINKING_PARAM}": routing_function_output.get( + WORKER_CAPABILITIES_THINKING_PARAM + ), + "state.number_of_workers_called": self.state.number_of_supervisor_loops_completed, + "state.chat_history": self.state.chat_history, + "chat_history_without_tool_calls": chat_history_without_tool_calls, + } + ) + + if next_agent is None: + logging.error( + f"Supervisor returned no next agent, so we will default to finishing." + ) + span.set_outputs( + { + "post_processed_decision": FINISH_ROUTE_NAME, + "post_processing_reason": "Supervisor returned no next agent, so we will default to finishing.", + "updated_chat_history": self.state.chat_history, + } + ) + break + if next_agent == FINISH_ROUTE_NAME: + logging.info( + f"Supervisor called {FINISH_ROUTE_NAME} after {self.state.number_of_supervisor_loops_completed} workers being called." + ) + span.set_outputs( + { + "post_processed_decision": FINISH_ROUTE_NAME, + "post_processing_reason": "Supervisor selected it.", + "updated_chat_history": self.state.chat_history, + } + ) + break # finish by exiting the while loop + # prevent the supervisor from calling an agent multiple times in a row + elif next_agent != self.state.last_agent_called: + # Call worker agent and update history + try: + agent_output = self._call_supervised_agent( + next_agent, chat_history_without_tool_calls + ) + agent_new_messages = agent_output[AGENT_NEW_MESSAGES_KEY] + agent_raw_output = agent_output[AGENT_RAW_OUTPUT_KEY] + + self.state.overwrite_chat_history( + self.state.chat_history + agent_new_messages + ) + self.state.last_agent_called = next_agent + span.set_outputs( + { + "post_processed_decision": next_agent, + "post_processing_reason": "Supervisor selected it.", + "updated_chat_history": self.state.chat_history, + f"called_agent.{AGENT_NEW_MESSAGES_KEY}": agent_new_messages, + f"called_agent.{AGENT_RAW_OUTPUT_KEY}": agent_raw_output, + } + ) + + except ValueError as e: + logging.error( + f"Error calling agent {next_agent}: {e}. We will default to finishing." + ) + span.set_outputs( + { + "post_processed_decision": FINISH_ROUTE_NAME, + "post_processing_reason": "Supervisor selected an invalid agent, so defaulting to finishing.", + "updated_chat_history": self.state.chat_history, + } + ) + break # finish by exiting the while loop + else: + logging.warning( + f"Supervisor called the same agent {next_agent} twice in a row. We will default to finishing." + ) + span.set_outputs( + { + "post_processed_decision": FINISH_ROUTE_NAME, + "post_processing_reason": f"Supervisor selected {next_agent} twice in a row, so business logic decided to finish instead.", + "updated_chat_history": self.state.chat_history, + } + ) + break # finish by exiting the while loop + + # if the last message is not from the assistant, we need to add a fake assistant message + # TODO: add the name of the supervisor agent here + if self.state.chat_history[-1]["role"] != "assistant": + logging.warning( + "No assistant ended up replying, so we'll add an error response" + ) + with mlflow.start_span(name="add_error_response_to_history") as span: + span.set_inputs( + { + "state.chat_history": self.state.chat_history, + } + ) + self.state.append_new_message_to_history( + { + "role": "assistant", + "content": self.agent_config.supervisor_error_response, + # "name": "supervisor", + } + ) + span.set_outputs( + { + "updated_chat_history": self.state.chat_history, + } + ) + + # Return the resulting conversation back to the user + with mlflow.start_span(name="return_conversation_to_user") as span: + span.set_inputs( + { + "state.chat_history": self.state.chat_history, + "agent_config.playground_debug_mode": self.agent_config.playground_debug_mode, + } + ) + if self.agent_config.playground_debug_mode is True: + return_value = { + "response": ( + self.state.chat_history[-1]["content"] + if self.state.chat_history + else "" + ), + "messages": self.state.chat_history, + # only parse the new messages we added into playground format + "content": convert_messages_to_playground_tool_display_strings( + self.state.chat_history[self.state.num_messages_at_start :] + ), + } + span.set_outputs(return_value) + return return_value + else: + return_value = { + "content": ( + self.state.chat_history[-1]["content"] + if self.state.chat_history + else "" + ), + "messages": self.state.chat_history, + } + span.set_outputs(return_value) + return return_value + + def chat_completion(self, messages: List[Dict[str, str]], tools: bool = False): + endpoint_name = self.agent_config.llm_endpoint_name + llm_options = self.agent_config.llm_parameters.model_dump() + + # # Trace the call to Model Serving - openai versio + traced_create = mlflow.trace( + self.model_serving_client.chat.completions.create, + name="chat_completions_api", + span_type="CHAT_MODEL", + ) + + # Openai - start + if tools: + return traced_create( + model=endpoint_name, + messages=messages, + tools=self.tool_json_schemas, + parallel_tool_calls=False, + **llm_options, + ) + else: + return traced_create(model=endpoint_name, messages=messages, **llm_options) + # Openai - end + + +# tell MLflow logging where to find the agent's code +set_model(MultiAgentSupervisor()) + + +# IMPORTANT: set this to False before logging the model to MLflow +debug = False + +if debug: + + # agent = MultiAgentSupervisor(agent_config=MULTI_AGENT_DEFAULT_YAML_CONFIG_FILE_NAME) + agent = MultiAgentSupervisor() + + vibe_check_query = { + "messages": [ + # {"role": "user", "content": f"how does the CoolTech Elite 5500 work?"}, + {"role": "user", "content": f"calculate the value of 2+2?"}, + # { + # "role": "user", + # "content": f"How does account age affect the likelihood of churn?", + # }, + ] + } + + output = agent.predict(model_input=vibe_check_query) + print(output["content"]) + # print(output) + + # input_2 = output["messages"].copy() + # input_2.append( + # { + # "role": "user", + # "content": f"who is the user most likely to do this?", + # # "content": f"how do i turn it on?", + # }, + # ) + + # output_2 = agent.predict(model_input={"messages": input_2}) + # print(output_2["content"]) + +# # COMMAND ---------- + +# if debug: +# agent = MultiAgentSupervisor(agent_config="supervisor_config.yml") +# vibe_check_query = { +# "messages": [ +# # {"role": "user", "content": f"What is agent evaluation?"}, +# # {"role": "user", "content": f"What users have churned?"}, +# { +# "role": "user", +# "content": f"What is the capacity of the BrewMaster Elite 3000 coffee maker?", +# }, +# # {"role": "user", "content": f"calculate the value of 2+2?"}, +# # { +# # "role": "user", +# # "content": f"did user 8e753fa6-2464-4354-887c-a25ace971a7e experience any issues?", +# # }, +# ] +# } + +# output = agent.predict(model_input=vibe_check_query) +# # print(output) diff --git a/openai_sdk_agent_app_sample_code/cookbook/agents/utils/__init__.py b/openai_sdk_agent_app_sample_code/cookbook/agents/utils/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/openai_sdk_agent_app_sample_code/cookbook/agents/utils/chat.py b/openai_sdk_agent_app_sample_code/cookbook/agents/utils/chat.py new file mode 100644 index 0000000..a817c02 --- /dev/null +++ b/openai_sdk_agent_app_sample_code/cookbook/agents/utils/chat.py @@ -0,0 +1,145 @@ +import mlflow +from typing import Dict, List, Union +from dataclasses import asdict +import pandas as pd +from mlflow.models.rag_signatures import ChatCompletionRequest, Message + + +@mlflow.trace(span_type="PARSER") +def get_messages_array( + model_input: Union[ChatCompletionRequest, Dict, pd.DataFrame] +) -> List[Dict[str, str]]: + if type(model_input) == ChatCompletionRequest: + return model_input.messages + elif type(model_input) == dict: + return model_input.get("messages") + elif type(model_input) == pd.DataFrame: + return model_input.iloc[0].to_dict().get("messages") + + +@mlflow.trace(span_type="PARSER") +def extract_user_query_string(chat_messages_array: List[Dict[str, str]]) -> str: + """ + Extracts user query string from the chat messages array. + + Args: + chat_messages_array: Array of chat messages. + + Returns: + User query string. + """ + + if isinstance(chat_messages_array, pd.Series): + chat_messages_array = chat_messages_array.tolist() + + if isinstance(chat_messages_array[-1], dict): + return chat_messages_array[-1]["content"] + elif isinstance(chat_messages_array[-1], Message): + return chat_messages_array[-1].content + else: + return chat_messages_array[-1] + + +@mlflow.trace(span_type="PARSER") +def extract_chat_history( + chat_messages_array: List[Dict[str, str]] +) -> List[Dict[str, str]]: + """ + Extracts the chat history from the chat messages array. + + Args: + chat_messages_array: Array of chat messages. + + Returns: + The chat history. + """ + # Convert DataFrame to dict + if isinstance(chat_messages_array, pd.Series): + chat_messages_array = chat_messages_array.tolist() + + # Dictionary, return as is + if isinstance(chat_messages_array[0], dict): + return chat_messages_array[:-1] # return all messages except the last one + # MLflow Message, convert to Dictionary + elif isinstance(chat_messages_array[0], Message): + new_array = [] + for message in chat_messages_array[:-1]: + new_array.append(asdict(message)) + return new_array + else: + raise ValueError( + "chat_messages_array is not an Array of Dictionary, Pandas DataFrame, or array of MLflow Message." + ) + + +@mlflow.trace(span_type="PARSER") +def convert_messages_to_open_ai_format( + chat_messages_array: List[Dict[str, str]] +) -> List[Dict[str, str]]: + """ + Extracts the chat history from the chat messages array. + + Args: + chat_messages_array: Array of chat messages. + + Returns: + The chat history. + """ + # Convert DataFrame to dict + if isinstance(chat_messages_array, pd.Series): + chat_messages_array = chat_messages_array.tolist() + + # Dictionary, return as is + if isinstance(chat_messages_array[0], dict): + return chat_messages_array # return all messages except the last one + # MLflow Message, convert to Dictionary + elif isinstance(chat_messages_array[0], Message): + new_array = [] + for message in chat_messages_array: + new_array.append(asdict(message)) + return new_array + else: + raise ValueError( + "chat_messages_array is not an Array of Dictionary, Pandas DataFrame, or array of MLflow Message." + ) + + +@mlflow.trace(span_type="PARSER") +def concat_messages_array_to_string(messages): + concatenated_message = "\n".join( + [ + ( + f"{message.get('role', message.get('name', 'unknown'))}: {message.get('content', '')}" + if message.get("role") in ("assistant", "user") + else "" + ) + for message in messages + ] + ) + return concatenated_message + + +def remove_message_keys_with_null_values(message: Dict[str, str]) -> Dict[str, str]: + """ + Remove any keys with None/null values from the message. + Having a null value for a key breaks DBX model serving input validation even if that key is marked as optional in the schema, so we remove them. + Example: refusal key is set as None by OpenAI + """ + return {k: v for k, v in message.items() if v is not None} + + +@mlflow.trace(span_type="PARSER") +def remove_tool_calls_from_messages( + messages: List[Dict[str, str]] +) -> List[Dict[str, str]]: + modified_messages = messages.copy() + return [ + msg + for msg in modified_messages + if not ( + msg.get("role") == "tool" # Remove tool messages + or ( + msg.get("role") == "assistant" and "tool_calls" in msg + ) # Remove assistant messages with tool_calls + ) + ] diff --git a/openai_sdk_agent_app_sample_code/cookbook/agents/utils/execute_function.py b/openai_sdk_agent_app_sample_code/cookbook/agents/utils/execute_function.py new file mode 100644 index 0000000..1d0a7df --- /dev/null +++ b/openai_sdk_agent_app_sample_code/cookbook/agents/utils/execute_function.py @@ -0,0 +1,8 @@ +import mlflow +import json + + +@mlflow.trace(span_type="FUNCTION") +def execute_function(tool, args): + result = tool(**args) + return json.dumps(result) diff --git a/openai_sdk_agent_app_sample_code/cookbook/agents/utils/load_config.py b/openai_sdk_agent_app_sample_code/cookbook/agents/utils/load_config.py new file mode 100644 index 0000000..c924005 --- /dev/null +++ b/openai_sdk_agent_app_sample_code/cookbook/agents/utils/load_config.py @@ -0,0 +1,136 @@ +import logging +from typing import List +from cookbook.config import SerializableConfig +import yaml +import mlflow +from cookbook.config import ( + load_serializable_config_from_yaml, +) +import os + + +def load_first_yaml_file(config_paths: List[str]) -> str: + for path in config_paths: + if os.path.exists(path): + logging.info(f"Found YAML config file at {path}") + with open(path, "r") as handle: + return handle.read() + raise ValueError( + f"No config file found at any of the following paths: {config_paths}. " + f"Please ensure a config file exists at one of those paths." + ) + + +def load_config_from_mlflow_model_config() -> SerializableConfig: + try: + model_config_as_yaml = yaml.dump(mlflow.models.ModelConfig()._read_config()) + loaded_config = load_serializable_config_from_yaml(model_config_as_yaml) + logging.info(f"Loaded config from mlflow.models.ModelConfig(): {loaded_config}") + return loaded_config + except Exception as e: + logging.info(f"Could not load config from mlflow.models.ModelConfig(): {e}") + return None + + +def try_to_load_config_file(agent_config_file_or_path: str) -> SerializableConfig: + """ + Try to load configuration from a local YAML file. + """ + + # otherwise, we try to look for the YAML file + # this logic accounts for the fact that the agent can be called from any working directory, so we have to search for the config folder to find the YAML. + config_paths = [] + config_paths.append( + agent_config_file_or_path + ) # will try from the passed location first. + + # Then try a from a few common locations - these are set based on the common working directory locations for a notebook/shell. + config_paths.extend( + [ + "./configs/" + agent_config_file_or_path, + "../configs/" + agent_config_file_or_path, + "../../configs/" + agent_config_file_or_path, + "../openai_sdk_agent_app_sample_code/configs/" + agent_config_file_or_path, + "./openai_sdk_agent_app_sample_code/configs/" + agent_config_file_or_path, + ] + ) + + logging.info( + f"Trying to load YAML file {agent_config_file_or_path} from paths: {config_paths}" + ) + try: + config_file = load_first_yaml_file(config_paths) + return load_serializable_config_from_yaml(config_file) + except Exception as e: + logging.info( + f"Exception loading YAML file {agent_config_file_or_path} at {config_paths}: {e}" + ) + raise ValueError( + f"Could not load the provided YAML file {agent_config_file_or_path}." + ) + + +def load_config( + passed_agent_config: SerializableConfig | str | None = None, + default_config_file_name: str = None, +) -> SerializableConfig: + """ + Load configuration from various sources in order of precedence: + # load the Agent's configuration. Priority order: + 1. MLflow Model config + 2. passed_agent_config + 3. default_config_file_name + + Returns: + SerializableModel: Loaded configuration object + """ + + # 1. Try to use MLflow ModelConfig + try: + logging.info("Trying to load config from mlflow.models.ModelConfig()") + model_config_as_yaml = yaml.dump(mlflow.models.ModelConfig()._read_config()) + loaded_config = load_serializable_config_from_yaml(model_config_as_yaml) + logging.info(f"Loaded config from mlflow.models.ModelConfig(): {loaded_config}") + return loaded_config + except FileNotFoundError as e: + logging.info(f"Could not load config from mlflow.models.ModelConfig(): {e}") + + # 2a. passed_agent_config is an instantiated config class, use that + if isinstance(passed_agent_config, SerializableConfig): + logging.info( + "passed_agent_config` is an instantiated config class, using that." + ) + return passed_agent_config + + # 2b. passed_agent_config is a YAML file name or file path, try to load from that YAML file + # try_to_load_config_file logic accounts for the fact that the agent can be called from any working directory, so we will search for the config folder to find the YAML. + if isinstance(passed_agent_config, str): + logging.info( + f"`passed_agent_config` is a string, trying to load from YAML: {passed_agent_config}" + ) + try: + loaded_config = try_to_load_config_file(passed_agent_config) + logging.info( + f"Loaded config from YAML file {passed_agent_config}: {loaded_config}" + ) + return loaded_config + except ValueError as e: + logging.info(f"{passed_agent_config} was not found.") + + # 3. Try to load from default config file + if default_config_file_name: + logging.info(f"Trying to load from YAML: {default_config_file_name}") + try: + loaded_config = try_to_load_config_file(default_config_file_name) + logging.info( + f"Loaded config from YAML file {default_config_file_name}: {loaded_config}" + ) + return loaded_config + except ValueError as e: + logging.info(f"{default_config_file_name} was not found.") + + # If no config is found so far, return None + logging.error( + "load_config could not find a config file. Returning None. Refer to your Agent's error message for next steps." + ) + return None diff --git a/openai_sdk_agent_app_sample_code/cookbook/agents/utils/playground_parser.py b/openai_sdk_agent_app_sample_code/cookbook/agents/utils/playground_parser.py new file mode 100644 index 0000000..20800cc --- /dev/null +++ b/openai_sdk_agent_app_sample_code/cookbook/agents/utils/playground_parser.py @@ -0,0 +1,98 @@ +import mlflow +from typing import List, Dict +import json + +## +# Utility functions for formatting OpenAI tool calls and responses for display in Databricks +# playground and review applications. These functions convert the raw message format into +# a more readable, XML-tagged format suitable for UI rendering. +## + + +@mlflow.trace(span_type="PARSER") +def convert_messages_to_playground_tool_display_strings( + messages: List[Dict[str, str]] +) -> str: + """Format a list of OpenAI chat messages for display in Databricks playground/review UI. + + Processes a sequence of OpenAI chat messages, with special handling for tool calls + and their responses. Tool-related content is wrapped in XML-like tags for proper + UI rendering and readability. + + Args: + messages (List[Dict[str, str]]): List of OpenAI message dictionaries containing role + (user/assistant/tool), content, and optional tool_calls from the chat completion API. + + Returns: + str: UI-friendly string with tool calls wrapped in tags and + tool responses wrapped in tags. + """ + output = "" + for msg in messages: # ignore first user input + if msg["role"] == "assistant" and msg.get("tool_calls"): # tool call + for tool_call in msg["tool_calls"]: + output += stringify_tool_call(tool_call) + # output += f"{json.dumps(msg, indent=2)}" + elif msg["role"] == "tool": # tool response + output += stringify_tool_result(msg) + # output += f"{json.dumps(msg, indent=2)}" + else: + output += msg["content"] if msg["content"] != None else "" + return output + + +@mlflow.trace(span_type="PARSER") +def stringify_tool_call(tool_call) -> str: + """Format an OpenAI tool call for display in Databricks playground/review UI. + + Extracts relevant information from an OpenAI tool call and formats it into a + UI-friendly string wrapped in XML-like tags for proper rendering. + + Args: + tool_call (dict): OpenAI tool call dictionary containing function details + (name, arguments) and call ID from the chat completion API. + + Returns: + str: UI-friendly string wrapped in tags, containing the + tool's name, ID, and arguments in a structured format. + """ + try: + function = tool_call["function"] + args_dict = json.loads(function["arguments"]) + request = { + "id": tool_call["id"], + "name": function["name"], + "arguments": json.dumps(args_dict), + } + + return f"{json.dumps(request)}" + + except Exception as e: + print("Failed to stringify tool call: ", e) + return str(tool_call) + + +@mlflow.trace(span_type="PARSER") +def stringify_tool_result(tool_msg) -> str: + """Format an OpenAI tool response for display in Databricks playground/review UI. + + Processes a tool's response message and formats it into a UI-friendly string + wrapped in XML-like tags for proper rendering. + + Args: + tool_msg (dict): OpenAI tool response dictionary containing the tool_call_id + and response content from the chat completion API. + + Returns: + str: UI-friendly string wrapped in tags, containing the + tool's response ID and content. + """ + try: + + result = json.dumps( + {"id": tool_msg["tool_call_id"], "content": tool_msg["content"]} + ) + return f"{result}" + except Exception as e: + print("Failed to stringify tool result:", e) + return str(tool_msg) diff --git a/openai_sdk_agent_app_sample_code/cookbook/agents/utils/signatures.py b/openai_sdk_agent_app_sample_code/cookbook/agents/utils/signatures.py new file mode 100644 index 0000000..4c0f5e7 --- /dev/null +++ b/openai_sdk_agent_app_sample_code/cookbook/agents/utils/signatures.py @@ -0,0 +1,49 @@ +from mlflow.types.schema import Array, ColSpec, DataType, Map, Object, Property, Schema + +# This is a custom version of the StringResponse class from Databricks Agents +# that includes the `messages` field. +# StringResponse: from mlflow.models.rag_signatures import StringResponse + +STRING_RESPONSE_WITH_MESSAGES = Schema( + [ + ColSpec(name="content", type=DataType.string), + ColSpec( + name="messages", + type=Array( + Object( + [ + Property("role", DataType.string), + Property("content", DataType.string, False), + Property("name", DataType.string, False), + Property("refusal", DataType.string, False), + Property( + "tool_calls", + Array( + Object( + [ + Property("id", DataType.string), + Property( + "function", + Object( + [ + Property("name", DataType.string), + Property( + "arguments", DataType.string + ), + ] + ), + ), + Property("type", DataType.string), + ] + ) + ), + False, + ), + Property("tool_call_id", DataType.string, False), + ] + ), + ), + required=False, + ), + ] +) diff --git a/openai_sdk_agent_app_sample_code/cookbook/config/__init__.py b/openai_sdk_agent_app_sample_code/cookbook/config/__init__.py new file mode 100644 index 0000000..20222c0 --- /dev/null +++ b/openai_sdk_agent_app_sample_code/cookbook/config/__init__.py @@ -0,0 +1,99 @@ +from typing import Any, Dict, Tuple, Type +import yaml +from pydantic import BaseModel +import importlib +import json + + +def serializable_config_to_yaml(obj: BaseModel) -> str: + data = obj.model_dump() + return yaml.dump(data) + + +# The way serialization works: +# The goal of serialization is to save the class name (e.g., util.xx.xx.configClassName) with the dumped YAML. +# This allows ANY config to be dynamically loaded from a YAML without knowing about the configClassName before OR having it imported in your python env. +# This is necessary for MultiAgent.`agents` and FunctionCallingAgent.`tools` since they can have multiple types of agent or tool configs in them -- when the config is loaded in the serving or local env, we don't know what these configClassName will be ahead of time & we want to avoid importing them all in the python env. +# How it works: +# the ONLY way to dump a class is to call model_dump() on it, which will return a dict with the _CLASS_PATH_KEY key containing the class path e.g., util.xx.xx.configClassName +# all other dumping methods (yaml, etc) call model_dump() since it is a Pydantic method +# the ONLY way to load a serialized class is to call load_obj_from_yaml with the YAML string +# load_obj_from_yaml will parse the YAML string and get the class path key +# it will then use that class path key to dynamically load the class from the python path +# it will then call that class's _load_class_from_dict method with the remaining data to let it do anything custom e.g,. load the tools or the agents +# if you haven't overridden _load_class_from_dict, it will call the default implementation of this method from SerializableModel +# otherwise, it will call your overridden _load_class_from_dict method + +# How to use: +# Inherit your config class from SerializableModel +# If you don't have any SerializableModel fields, you can just call load_obj_from_yaml directly on your class's dumped YAML string, nothing else required +# If you have SerializableModel fields, you need to +# 1. Override the _load_class_from_dict method to handle the deserialization of those sub-configs +# 2. Override the model_dump method to call the model_dump of each of those sub-configs properly +# +# Examples +# 1. No sub-configs: GenieAgentConfig, UCTool +# 2. Has sub-configs: FunctionCallingAgentConfig (in `tools`), MultiAgentConfig (in `agents`) +# load_obj_from_yaml --> the only way a class is loaded, will get the class path key + +# TODO: add tests. this was tested manually in a notebook verifying that all classes worked. + + +_CLASS_PATH_KEY = "class_path" + + +class SerializableConfig(BaseModel): + def to_yaml(self) -> str: + return serializable_config_to_yaml(self) + + def model_dump(self, **kwargs) -> Dict[str, Any]: + """Override model_dump to exclude name and description fields. + + Returns: + Dict[str, Any]: Dictionary representation of the model excluding name and description. + """ + model_dumped = super().model_dump(**kwargs) + model_dumped[_CLASS_PATH_KEY] = f"{self.__module__}.{self.__class__.__name__}" + return model_dumped + + @classmethod + def _load_class_from_dict( + cls, class_object, data: Dict[str, Any] + ) -> "SerializableConfig": + return class_object(**data) + + def pretty_print(self): + print(json.dumps(self.model_dump(), indent=2)) + + +def serializable_config_to_yaml_file(obj: BaseModel, yaml_file_path: str) -> None: + with open(yaml_file_path, "w") as handle: + handle.write(serializable_config_to_yaml(obj)) + + +# Helper method used by SerializableModel's with fields containing SerializableModels +def _load_class_from_dict(data: Dict[str, Any]) -> Tuple[Type, Dict[str, Any]]: + """Dynamically load a class from data containing a class path. + + Args: + data: Dictionary containing _CLASS_PATH_KEY and other data + + Returns: + Tuple[Type, Dict[str, Any]]: The class object and the remaining data + """ + class_path = data.pop(_CLASS_PATH_KEY) + + module_name, class_name = class_path.rsplit(".", 1) + module = importlib.import_module(module_name) + return getattr(module, class_name), data + + +def load_serializable_config_from_yaml(yaml_str: str) -> SerializableConfig: + data = yaml.safe_load(yaml_str) + class_obj, remaining_data = _load_class_from_dict(data) + return class_obj._load_class_from_dict(class_obj, remaining_data) + + +def load_serializable_config_from_yaml_file(yaml_file_path: str) -> SerializableConfig: + with open(yaml_file_path, "r") as file: + return load_serializable_config_from_yaml(file.read()) diff --git a/openai_sdk_agent_app_sample_code/cookbook/config/agents/__init__.py b/openai_sdk_agent_app_sample_code/cookbook/config/agents/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/openai_sdk_agent_app_sample_code/cookbook/config/agents/function_calling_agent.py b/openai_sdk_agent_app_sample_code/cookbook/config/agents/function_calling_agent.py new file mode 100644 index 0000000..f536ff6 --- /dev/null +++ b/openai_sdk_agent_app_sample_code/cookbook/config/agents/function_calling_agent.py @@ -0,0 +1,75 @@ +from typing import List, Any, Dict +from cookbook.config import serializable_config_to_yaml +import yaml +from pydantic import BaseModel +from cookbook.config import ( + load_serializable_config_from_yaml, +) +from cookbook.config.shared.llm import LLMConfig +from cookbook.config import ( + SerializableConfig, +) +from mlflow.models.resources import DatabricksResource, DatabricksServingEndpoint + + +class FunctionCallingAgentConfig(SerializableConfig): + """ + Configuration for the agent with MLflow input example. + + Attributes: + llm_config (LLMConfig): Configuration for the function-calling LLM. + input_example (Any): Used by MLflow to set the Agent's input schema. + tools (List[BaseTool]): List of tools used by the agent. + """ + + tools: List[Any] + llm_config: LLMConfig + # Used by MLflow to set the Agent's input schema + input_example: Any = { + "messages": [ + { + "role": "user", + "content": "What can you help me with?", + }, + ] + } + + # name: str + # description: str + # endpoint_name: str + + def model_dump(self, **kwargs) -> Dict[str, Any]: + """Override model_dump to exclude name and description fields. + + Returns: + Dict[str, Any]: Dictionary representation of the model excluding name and description. + """ + model_dumped = super().model_dump(**kwargs) + model_dumped["tools"] = [ + yaml.safe_load(serializable_config_to_yaml(tool)) for tool in self.tools + ] + return model_dumped + + @classmethod + def _load_class_from_dict( + cls, class_object, data: Dict[str, Any] + ) -> "SerializableConfig": + # Deserialize tools, dynamically reconstructing each tool + tools = [] + for tool_dict in data["tools"]: + tool_yml = yaml.dump(tool_dict) + tools.append(load_serializable_config_from_yaml(tool_yml)) + + # Replace tools with deserialized instances + data["tools"] = tools + return class_object(**data) + + def get_resource_dependencies(self) -> List[DatabricksResource]: + dependencies = [ + DatabricksServingEndpoint(endpoint_name=self.llm_config.llm_endpoint_name), + ] + + # Add the Databricks resources for the retriever's vector indexes + for tool in self.tools: + dependencies.extend(tool.get_resource_dependencies()) + return dependencies diff --git a/openai_sdk_agent_app_sample_code/cookbook/config/agents/genie_agent.py b/openai_sdk_agent_app_sample_code/cookbook/config/agents/genie_agent.py new file mode 100644 index 0000000..42d9c37 --- /dev/null +++ b/openai_sdk_agent_app_sample_code/cookbook/config/agents/genie_agent.py @@ -0,0 +1,37 @@ +from typing import Any, List +from cookbook.config import SerializableConfig +from mlflow.models.resources import DatabricksResource, DatabricksGenieSpace + + +class GenieAgentConfig(SerializableConfig): + """ + Configuration for the agent with MLflow input example. + + Attributes: + llm_config (FunctionCallingLLMConfig): Configuration for the function-calling LLM. + input_example (Any): Used by MLflow to set the Agent's input schema. + """ + + # TODO: Add validation for the genie_space_id once the API is available. + genie_space_id: str + + # Used by MLflow to set the Agent's input schema + input_example: Any = { + "messages": [ + { + "role": "user", + "content": "What types of data can I query?", + }, + ] + } + + encountered_error_user_message: str = ( + "I encountered an error trying to answer your question, please try again." + ) + + # name: str + # description: str + # endpoint_name: str + + def get_resource_dependencies(self) -> List[DatabricksResource]: + return [DatabricksGenieSpace(genie_space_id=self.genie_space_id)] diff --git a/openai_sdk_agent_app_sample_code/cookbook/config/agents/multi_agent_supervisor.py b/openai_sdk_agent_app_sample_code/cookbook/config/agents/multi_agent_supervisor.py new file mode 100644 index 0000000..4588b0b --- /dev/null +++ b/openai_sdk_agent_app_sample_code/cookbook/config/agents/multi_agent_supervisor.py @@ -0,0 +1,266 @@ +from cookbook.config import _CLASS_PATH_KEY, serializable_config_to_yaml +from pydantic import BaseModel, field_validator +from typing import Any, List, Literal, Dict +from cookbook.config import ( + SerializableConfig, +) +from cookbook.config.shared.llm import LLMConfig, LLMParametersConfig +from cookbook.config import ( + load_serializable_config_from_yaml, +) +import yaml +from mlflow.pyfunc import PythonModel +from typing import Optional + + +# Design for multi-agent + +# requirements +# * can test locally with just the agent's pyfunc classes +# * when you change any config, it all just reloads + +# when you deploy: +# * you deploy each supervised agent separately to model serving +# * then mutli agent picks these up +# * then mutli agent deploys + +# * each child agent has [name, description, config, code] +# - when deployed, it reads it from the UC +# - locally, from the config + +# Internal implementation details for strings that the LLM sees and may need tuning +# These constants can be adjusted to improve the quality and reliability of the LLM's responses +FINISH_ROUTE_NAME = "FINISH" # reserved name for the finish agent which is hardcoded logic to return the last worker's response to the user +SUPERVISOR_ROUTE_NAME = "SUPERVISOR" # reserved name for the supervisor agent which is the main agent that controls the conversation +ROUTING_FUNCTION_NAME = "decide_next_worker_or_finish" # function name presented to the supervisor LLM via OpenAI function calling. Used by supervisor to return it's routing decision. +WORKER_PROMPT_TEMPLATE = "{worker_name}{worker_description}\n" +# Variable names in the ROUTING_FUNCTION_NAME for the supervisor agent's outputted thinking process and decision making +CONVERSATION_HISTORY_THINKING_PARAM = "conversation_history_thinking" +WORKER_CAPABILITIES_THINKING_PARAM = "worker_capabilities_thinking" +NEXT_WORKER_OR_FINISH_PARAM = "next_worker_or_finish" + +MULTI_AGENT_DEFAULT_YAML_CONFIG_FILE_NAME = "multi_agent_supervisor_config.yaml" + + +class MultiAgentSupervisorConfig(SerializableConfig): + """ + Configuration for the multi-agent supervisor. + + Attributes: + llm_endpoint_name (str): Databricks Model Serving endpoint name for the supervisor's LLM. + llm_parameters (LLMParametersConfig): Parameters controlling LLM response behavior. + input_example (Any): Example input used by MLflow to set the model's input schema. + playground_debug_mode (bool): When True, outputs debug info to playground UI. Defaults to False. + agent_loading_mode (str): Mode for loading supervised agents - "local" or "model_serving". + max_workers_called (int): Maximum number of worker agent turns before finishing. + supervisor_system_prompt (str): System prompt template for the supervisor agent. + """ + + llm_endpoint_name: str + """ + Databricks Model Serving endpoint name. + This is the LLM used by the supervisor to make decisions. + Databricks foundational model endpoints can be found here: https://docs.databricks.com/en/machine-learning/foundation-models/index.html + """ + + llm_parameters: LLMParametersConfig + """ + Parameters that control how the LLM responds, including temperature and max_tokens. + See LLMParametersConfig for details on available parameters. + """ + input_example: Any = { + "messages": [ + { + "role": "user", + "content": "What can you help me with?", + }, + ] + } + """ + Example input used by MLflow to set the Agent's input schema when calling mlflow.pyfunc.log_model(). + This should match the format of inputs that will be passed to the model's predict() method. + For chat agents, this is typically a dictionary containing a 'messages' key with an array of message objects. + Example: {'messages': [{'role': 'user', 'content': 'Hello'}]} + """ + + playground_debug_mode: bool = False + """ + Outputs details of all supervised agent's tool calling to the playground UI by adding it to the agent's response. + Turn off if you don't want end users to see this debugging information, but highly recommended to keep enabled + during development and pre-prod to visualize the agent's logic in playground/review app. + """ + + agent_loading_mode: Literal["local", "model_serving"] = "local" + """ + Mode for loading supervised agents: + - local: Supervised agent's code and config are loaded from your local environment. Use this mode during development for faster inner loop testing. + - model_serving: Supervised agent is deployed as a Databricks Model Serving endpoint that gets called. Use this mode when deploying the agent to pre-prod/prod environments. + """ + + @field_validator("max_supervisor_loops") + def validate_max_workers(cls, v: int) -> int: + if v <= 1: + raise ValueError("max_workers_called must be greater than 1") + return v + + max_supervisor_loops: int = 5 + """ + The maximum turns of conversations with the workers before the last worker's response is returned to the user by the supervisor's hard coded logic. + Must be greater than 1. + """ + + supervisor_system_prompt: str = """## Role +You are a supervisor responsible for managing a conversation between a user and the following workers. You select the next worker to respond or end the conversation to return the last worker's response to the user. Use the `{ROUTING_FUNCTION_NAME}` function to share your step-by-step reasoning and decision. + +## Workers +{workers_names_and_descriptions} + +## Objective +Your goal is to facilitate the conversation and ensure the user receives a helpful response. + +## Instructions +1. **Review the Conversation History**: Think step by step by to understand the user's request and the conversation history which includes previous worker's responses. Output to the `{CONVERSATION_HISTORY_THINKING_PARAM}` variable. +2. **Assess Worker Descriptions**: Think step by step to consider the description of each worker to understand their capabilities in the context of the conversation history. Output to the `{WORKER_CAPABILITIES_THINKING_PARAM}` variable. +3. **Select the next worker OR finish the conversation**: Based on the converastion history, the worker's descriptions and your thinking, decide which worker should respond next OR if the conversation should finish with the last worker's response going to the user. Output either the or "{FINISH_ROUTE_NAME}" to the `{NEXT_WORKER_OR_FINISH_PARAM}` variable. + +## Additional Notes +- A conversation is considered "stuck" if there is no progress or if workers are unable to proceed with their tasks.""" + """ + System prompt sent to the supervisor agent before the conversation history to guide its decision-making process. + The variable names like {ROUTING_FUNCTION_NAME}, {workers_names_and_descriptions}, etc. will be used by format() in the agent's code to populate the prompt at runtime, so do not change them. + Improving quality: You will tune this prompt to improve the supervisor's ability to route the conversation - start with worker descriptions & names, then tune the rest of the prompt. + """ + + supervisor_user_prompt: str = ( + """Given the converastion history, the worker's descriptions and your thinking, which worker should act next OR should we FINISH? Respond with one of {worker_names_with_finish} to the `{NEXT_WORKER_OR_FINISH_PARAM}` variable in the `{ROUTING_FUNCTION_NAME}` function.""" + ) + """ + Prompt sent to supervisor after system prompt and conversation history to request next worker selection. + The variable names will be populated at runtime via format(). + """ + + supervisor_error_response: str = "I'm sorry, I don't know how to help with that." + + finish_agent_description: str = ( + "End the conversation, returning the last role='assistant'message to the user." + ) + + agents: List[Any] + """ + List of supervised agents that will be called by the supervisor agent. Each agent must be a agent that implements the cookbook's Agent configuration interface. + """ + + @classmethod + def _load_class_from_dict( + cls, class_object, data: Dict[str, Any] + ) -> "SerializableConfig": + # Deserialize tools, dynamically reconstructing each tool + agents = [] + for agent_dict in data["agents"]: + agent_yml = yaml.dump(agent_dict) + agents.append(load_serializable_config_from_yaml(agent_yml)) + + # Replace tools with deserialized instances + data["agents"] = agents + return class_object(**data) + + def model_dump(self, **kwargs) -> Dict[str, Any]: + """Override model_dump to exclude name and description fields. + + Returns: + Dict[str, Any]: Dictionary representation of the model excluding name and description. + """ + + model_dumped = super().model_dump(**kwargs) + model_dumped["agents"] = [ + yaml.safe_load(serializable_config_to_yaml(agent)) for agent in self.agents + ] + return model_dumped + + +class SupervisedAgentConfig(SerializableConfig): + name: str + description: str + endpoint_name: Optional[str] = None + agent_config: Optional[SerializableConfig] = None + agent_class_path: Optional[str] = None + + # TODO: check agent_class is a subclass of our Agent - need to refactor Agent to a common base class + def __init__( + self, + name: str, + description: str, + *, + endpoint_name: Optional[str] = None, + agent_config: Optional[SerializableConfig] = None, + agent_class: Optional[type] = None, + agent_class_path: Optional[str] = None, + ): + """Initialize a SupervisedAgentConfig instance. + + Args: + name (str): Name of the supervised agent + description (str): Description of the agent's capabilities + endpoint_name (str): Databricks Model Serving endpoint name + config (Any): Agent's configuration + code (Any): Agent's implementation class + """ + if agent_class is not None and agent_class_path is not None: + raise ValueError( + "Only one of agent_class or agent_class_path can be provided" + ) + + if agent_class is not None: + if not isinstance(agent_class, type): + raise ValueError("agent_class must be an uninstantiated class") + if not issubclass(agent_class, PythonModel): + raise ValueError("agent_class must be a subclass of PythonModel") + + agent_class_path = f"{agent_class.__module__}.{agent_class.__name__}" + + if (endpoint_name is None) and ( + agent_config is None and agent_class_path is None + ): + raise ValueError( + "One of endpoint_name or agent_config/agent_class(_path) must be provided" + ) + + super().__init__( + name=name, + description=description, + endpoint_name=endpoint_name, + agent_config=agent_config, + agent_class_path=agent_class_path, + ) + + def model_dump(self, **kwargs) -> Dict[str, Any]: + """Override model_dump to exclude name and description fields. + + Returns: + Dict[str, Any]: Dictionary representation of the model excluding name and description. + """ + + # only modify the method if agent_config is present, otherwise, this is not needed + if self.agent_config is not None: + kwargs["exclude"] = {"agent_config"}.union(kwargs.get("exclude", set())) + model_dumped = super().model_dump(**kwargs) + model_dumped["agent_config"] = yaml.safe_load( + serializable_config_to_yaml(self.agent_config) + ) + return model_dumped + else: + return super().model_dump(**kwargs) + + @classmethod + def _load_class_from_dict( + cls, class_object, data: Dict[str, Any] + ) -> "SerializableConfig": + + # Deserialize agent config but only if it is present + if data["agent_config"] is not None: + agent_config = load_serializable_config_from_yaml( + yaml.dump(data["agent_config"]) + ) + data["agent_config"] = agent_config + + return class_object(**data) diff --git a/openai_sdk_agent_app_sample_code/cookbook/config/agents/rag_only.py b/openai_sdk_agent_app_sample_code/cookbook/config/agents/rag_only.py new file mode 100644 index 0000000..59fbe29 --- /dev/null +++ b/openai_sdk_agent_app_sample_code/cookbook/config/agents/rag_only.py @@ -0,0 +1,25 @@ +from cookbook.config.shared.llm import LLMConfig +from cookbook.config.tools.vector_search_tool import VectorSearchRetrieverTool + + +from pydantic import BaseModel + + +from typing import Any + + +class RAGConfig(BaseModel): + """ + Configuration for a RAG chain with MLflow input example. + + Attributes: + llm_config (LLMConfig): Configuration for the function-calling LLM. + vector_search_retriever_config (VectorSearchRetrieverConfig): Configuration for the Databricks vector search + index. + input_example (Any): Used by MLflow to set the RAG chain's input schema. + """ + + vector_search_retriever_config: VectorSearchRetrieverTool + llm_config: LLMConfig + # Used by MLflow to set the Agent's input schema + input_example: Any diff --git a/openai_sdk_agent_app_sample_code/cookbook/config/data_pipeline/__init__.py b/openai_sdk_agent_app_sample_code/cookbook/config/data_pipeline/__init__.py new file mode 100644 index 0000000..f89bde9 --- /dev/null +++ b/openai_sdk_agent_app_sample_code/cookbook/config/data_pipeline/__init__.py @@ -0,0 +1,49 @@ +from cookbook.config import SerializableConfig, serializable_config_to_yaml +import yaml +from cookbook.config import ( + load_serializable_config_from_yaml, +) +from cookbook.config.data_pipeline.data_pipeline_output import DataPipelineOuputConfig +from cookbook.config.data_pipeline.recursive_text_splitter import ( + RecursiveTextSplitterChunkingConfig, +) +from cookbook.config.data_pipeline.uc_volume_source import UCVolumeSourceConfig + + +from typing import Any, Dict + + +class DataPipelineConfig(SerializableConfig): + source: UCVolumeSourceConfig + output: DataPipelineOuputConfig + chunking_config: RecursiveTextSplitterChunkingConfig + + def model_dump(self, **kwargs) -> Dict[str, Any]: + """Override model_dump to exclude name and description fields. + + Returns: + Dict[str, Any]: Dictionary representation of the model excluding name and description. + """ + model_dumped = super().model_dump(**kwargs) + model_dumped["source"] = yaml.safe_load( + serializable_config_to_yaml(self.source) + ) + model_dumped["output"] = yaml.safe_load( + serializable_config_to_yaml(self.output) + ) + model_dumped["chunking_config"] = yaml.safe_load( + serializable_config_to_yaml(self.chunking_config) + ) + return model_dumped + + @classmethod + def _load_class_from_dict( + cls, class_object, data: Dict[str, Any] + ) -> "SerializableConfig": + # Deserialize sub-configs + data["source"] = load_serializable_config_from_yaml(yaml.dump(data["source"])) + data["output"] = load_serializable_config_from_yaml(yaml.dump(data["output"])) + data["chunking_config"] = load_serializable_config_from_yaml( + yaml.dump(data["chunking_config"]) + ) + return class_object(**data) diff --git a/openai_sdk_agent_app_sample_code/cookbook/config/data_pipeline/data_pipeline_output.py b/openai_sdk_agent_app_sample_code/cookbook/config/data_pipeline/data_pipeline_output.py new file mode 100644 index 0000000..2a2a19b --- /dev/null +++ b/openai_sdk_agent_app_sample_code/cookbook/config/data_pipeline/data_pipeline_output.py @@ -0,0 +1,314 @@ +from cookbook.config import SerializableConfig +from typing import Optional + +from databricks.sdk import WorkspaceClient +from databricks.sdk.errors import NotFound +from databricks.sdk.errors.platform import ResourceDoesNotExist +from databricks.sdk.service.vectorsearch import EndpointType + + +class DataPipelineOuputConfig(SerializableConfig): + """Configuration for managing output locations and naming conventions in the data pipeline. + + This class handles the configuration of table names and vector search endpoints for the data pipeline. + It follows a consistent naming pattern for all generated tables and provides version control capabilities. + + Naming Convention: + {catalog}.{schema}.{base_table_name}_{table_postfix}__{version_suffix} + + Generated Tables: + 1. Parsed docs table: Stores the raw parsed documents + 2. Chunked docs table: Stores the documents split into chunks + 3. Vector index: Stores the vector embeddings for search + + Args: + uc_catalog_name (str): Unity Catalog name where tables will be created + uc_schema_name (str): Schema name within the catalog + base_table_name (str): Core name used as prefix for all generated tables + docs_table_postfix (str, optional): Suffix for the parsed documents table. Defaults to "docs" + chunked_table_postfix (str, optional): Suffix for the chunked documents table. Defaults to "docs_chunked" + vector_index_postfix (str, optional): Suffix for the vector index. Defaults to "docs_chunked_index" + version_suffix (str, optional): Version identifier (e.g., 'v1', 'test') to maintain multiple pipeline versions + vector_search_endpoint (str): Name of the vector search endpoint to use + + Examples: + With version_suffix="v1": + >>> config = DataPipelineOuputConfig( + ... uc_catalog_name="my_catalog", + ... uc_schema_name="my_schema", + ... base_table_name="agent", + ... version_suffix="v1" + ... ) + # Generated tables: + # - my_catalog.my_schema.agent_docs__v1 + # - my_catalog.my_schema.agent_docs_chunked__v1 + # - my_catalog.my_schema.agent_docs_chunked_index__v1 + + Without version_suffix: + # - my_catalog.my_schema.agent_docs + # - my_catalog.my_schema.agent_docs_chunked + # - my_catalog.my_schema.agent_docs_chunked_index + """ + + vector_search_endpoint: str + parsed_docs_table: str + chunked_docs_table: str + vector_index: str + + def __init__( + self, + *, + vector_search_endpoint: str, + parsed_docs_table: Optional[str] = None, + chunked_docs_table: Optional[str] = None, + vector_index: Optional[str] = None, + uc_catalog_name: Optional[str] = None, + uc_schema_name: Optional[str] = None, + base_table_name: Optional[str] = None, + docs_table_postfix: str = "docs", + chunked_table_postfix: str = "docs_chunked", + vector_index_postfix: str = "docs_chunked_index", + version_suffix: Optional[str] = None, + ): + """Initialize a new DataPipelineOuputConfig instance. + + Supports two initialization styles: + 1. Direct table names: + - parsed_docs_table + - chunked_docs_table + - vector_index + + 2. Generated table names using: + - uc_catalog_name + - uc_schema_name + - base_table_name + - [optional] postfixes and version_suffix + + Args: + vector_search_endpoint (str): Name of the vector search endpoint to use + parsed_docs_table (str, optional): Direct table name for parsed docs + chunked_docs_table (str, optional): Direct table name for chunked docs + vector_index (str, optional): Direct name for vector index + uc_catalog_name (str, optional): Unity Catalog name where tables will be created + uc_schema_name (str, optional): Schema name within the catalog + base_table_name (str, optional): Core name used as prefix for all generated tables + docs_table_postfix (str, optional): Suffix for parsed documents table. Defaults to "docs" + chunked_table_postfix (str, optional): Suffix for chunked documents table. Defaults to "docs_chunked" + vector_index_postfix (str, optional): Suffix for vector index. Defaults to "docs_chunked_index" + version_suffix (str, optional): Version identifier for multiple pipeline versions + """ + _validate_not_default(vector_search_endpoint) + + if parsed_docs_table and chunked_docs_table and vector_index: + # Direct table names provided + if any([uc_catalog_name, uc_schema_name, base_table_name]): + raise ValueError( + "Cannot provide both direct table names and table name generation parameters" + ) + elif all([uc_catalog_name, uc_schema_name, base_table_name]): + # Generate table names + _validate_not_default(uc_catalog_name) + _validate_not_default(uc_schema_name) + _validate_not_default(base_table_name) + + parsed_docs_table = _build_table_name( + uc_catalog_name, + uc_schema_name, + base_table_name, + docs_table_postfix, + version_suffix, + ) + chunked_docs_table = _build_table_name( + uc_catalog_name, + uc_schema_name, + base_table_name, + chunked_table_postfix, + version_suffix, + ) + vector_index = _build_table_name( + uc_catalog_name, + uc_schema_name, + base_table_name, + vector_index_postfix, + version_suffix, + escape=False, + ) + else: + raise ValueError( + "Must provide either all direct table names or all table name generation parameters" + ) + + super().__init__( + parsed_docs_table=parsed_docs_table, + chunked_docs_table=chunked_docs_table, + vector_index=vector_index, + vector_search_endpoint=vector_search_endpoint, + ) + + def check_if_vector_search_endpoint_exists(self): + w = WorkspaceClient() + vector_search_endpoints = w.vector_search_endpoints.list_endpoints() + if ( + sum( + [ + self.vector_search_endpoint == ve.name + for ve in vector_search_endpoints + ] + ) + == 0 + ): + return False + else: + return True + + def create_vector_search_endpoint(self): + w = WorkspaceClient() + print( + f"Please wait, creating Vector Search endpoint `{self.vector_search_endpoint}`. This can take up to 20 minutes..." + ) + w.vector_search_endpoints.create_endpoint_and_wait( + self.vector_search_endpoint, endpoint_type=EndpointType.STANDARD + ) + # Make sure vector search endpoint is online and ready. + w.vector_search_endpoints.wait_get_endpoint_vector_search_endpoint_online( + self.vector_search_endpoint + ) + + def create_or_validate_vector_search_endpoint(self): + if not self.check_if_vector_search_endpoint_exists(): + self.create_vector_search_endpoint() + return self.validate_vector_search_endpoint() + + def validate_vector_search_endpoint(self) -> tuple[bool, str]: + """ + Validates that the specified Vector Search endpoint exists + Returns: + tuple[bool, str]: A tuple containing (success, error_message). + If validation passes, returns (True, success_message). If validation fails, returns (False, error_message). + """ + if not self.check_if_vector_search_endpoint_exists(): + msg = f"Vector Search endpoint '{self.vector_search_endpoint}' does not exist. Please either manually create it or call `output_config.create_or_validate_vector_search_endpoint()` to create it." + return (False, msg) + + msg = f"Vector Search endpoint '{self.vector_search_endpoint}' exists." + print(msg) + return (True, msg) + + def validate_catalog_and_schema(self) -> tuple[bool, str]: + """ + Validates that the specified catalog and schema exist + Returns: + tuple[bool, str]: A tuple containing (success, error_message). + If validation passes, returns (True, success_message). If validation fails, returns (False, error_message). + """ + + # Check catalog and schema for parsed_docs_table + parsed_docs_catalog = _get_uc_catalog_name(self.parsed_docs_table) + parsed_docs_schema = _get_uc_schema_name(self.parsed_docs_table) + if not _check_if_catalog_exists(parsed_docs_catalog): + msg = f"Catalog '{parsed_docs_catalog}' does not exist for parsed_docs_table. Please create it first." + return (False, msg) + if not _check_if_schema_exists(parsed_docs_catalog, parsed_docs_schema): + msg = f"Schema '{parsed_docs_schema}' does not exist in catalog '{parsed_docs_catalog}' for parsed_docs_table. Please create it first." + return (False, msg) + + # Check catalog and schema for chunked_docs_table + chunked_docs_catalog = _get_uc_catalog_name(self.chunked_docs_table) + chunked_docs_schema = _get_uc_schema_name(self.chunked_docs_table) + if not _check_if_catalog_exists(chunked_docs_catalog): + msg = f"Catalog '{chunked_docs_catalog}' does not exist for chunked_docs_table. Please create it first." + return (False, msg) + if not _check_if_schema_exists(chunked_docs_catalog, chunked_docs_schema): + msg = f"Schema '{chunked_docs_schema}' does not exist in catalog '{chunked_docs_catalog}' for chunked_docs_table. Please create it first." + return (False, msg) + + # Check catalog and schema for vector_index + vector_index_catalog = _get_uc_catalog_name(self.vector_index) + vector_index_schema = _get_uc_schema_name(self.vector_index) + if not _check_if_catalog_exists(vector_index_catalog): + msg = f"Catalog '{vector_index_catalog}' does not exist for vector_index. Please create it first." + return (False, msg) + if not _check_if_schema_exists(vector_index_catalog, vector_index_schema): + msg = f"Schema '{vector_index_schema}' does not exist in catalog '{vector_index_catalog}' for vector_index. Please create it first." + return (False, msg) + + msg = f"All catalogs and schemas exist for parsed_docs_table, chunked_docs_table, and vector_index." + print(msg) + return (True, msg) + + +def _escape_uc_fqn(uc_fqn: str) -> str: + """ + Escape the fully qualified name (FQN) for a Unity Catalog asset if it contains special characters. + + Args: + uc_fqn (str): The fully qualified name of the asset. + + Returns: + str: The escaped fully qualified name if it contains special characters, otherwise the original FQN. + """ + if "-" in uc_fqn: + parts = uc_fqn.split(".") + escaped_parts = [f"`{part}`" for part in parts] + return ".".join(escaped_parts) + else: + return uc_fqn + + +def _build_table_name( + uc_catalog_name: str, + uc_schema_name: str, + base_table_name: str, + postfix: str, + version_suffix: str = None, + escape: bool = True, +) -> str: + """Helper to build consistent table names + + Args: + postfix: The table name postfix to append + escape: Whether to escape special characters in the table name. Defaults to True. + + Returns: + The constructed table name, optionally escaped + """ + suffix = f"__{version_suffix}" if version_suffix else "" + raw_name = f"{uc_catalog_name}.{uc_schema_name}.{base_table_name}_{postfix}{suffix}" + return _escape_uc_fqn(raw_name) if escape else raw_name + + +def _validate_not_default(value: str) -> str: + if value == "REPLACE_ME": + raise ValueError( + "Please replace the default value 'REPLACE_ME' with your actual configuration" + ) + return value + + +def _get_uc_catalog_name(uc_fqn: str) -> str: + unescaped_uc_fqn = uc_fqn.replace("`", "") + return unescaped_uc_fqn.split(".")[0] + + +def _get_uc_schema_name(uc_fqn: str) -> str: + unescaped_uc_fqn = uc_fqn.replace("`", "") + return unescaped_uc_fqn.split(".")[1] + + +def _check_if_catalog_exists(uc_catalog_name) -> bool: + w = WorkspaceClient() + try: + w.catalogs.get(name=uc_catalog_name) + return True + except (ResourceDoesNotExist, NotFound): + return False + + +def _check_if_schema_exists(uc_catalog_name, uc_schema_name) -> bool: + w = WorkspaceClient() + try: + full_name = f"{uc_catalog_name}.{uc_schema_name}" + w.schemas.get(full_name=full_name) + return True + except (ResourceDoesNotExist, NotFound): + return False diff --git a/openai_sdk_agent_app_sample_code/cookbook/config/data_pipeline/recursive_text_splitter.py b/openai_sdk_agent_app_sample_code/cookbook/config/data_pipeline/recursive_text_splitter.py new file mode 100644 index 0000000..17c15aa --- /dev/null +++ b/openai_sdk_agent_app_sample_code/cookbook/config/data_pipeline/recursive_text_splitter.py @@ -0,0 +1,89 @@ +from cookbook.config import SerializableConfig +from cookbook.databricks_utils import ( + get_workspace_hostname, +) +from cookbook.data_pipeline.recursive_character_text_splitter import ( + EMBEDDING_MODELS, + detect_fmapi_embedding_model_type, + validate_chunk_size, +) + + +from databricks.sdk import WorkspaceClient +from databricks.sdk.errors.platform import ResourceDoesNotExist +from databricks.sdk.service.serving import EndpointStateReady + + +class RecursiveTextSplitterChunkingConfig(SerializableConfig): + """ + Configuration for the Unstructured Data Pipeline. + + Args: + embedding_model_endpoint (str): + Embedding model endpoint hosted on Model Serving. Default is `databricks-gte-large`. This can be an External Model, such as OpenAI or a Databricks hosted model on Foundational Model API. The list of Databricks hosted models can be found here: https://docs.databricks.com/en/machine-learning/foundation-models/index.html + chunk_size_tokens (int): + The size of each chunk of the document in tokens. Default is 1024. + chunk_overlap_tokens (int): + The overlap of tokens between chunks. Default is 256. + """ + + embedding_model_endpoint: str = "databricks-gte-large-en" + chunk_size_tokens: int = 1024 + chunk_overlap_tokens: int = 256 + + def validate_embedding_endpoint(self) -> tuple[bool, str]: + """ + Validates that the specified embedding endpoint exists and is of the correct type + Returns: + tuple[bool, str]: A tuple containing (success, error_message). + If validation passes, returns (True, success_message). If validation fails, returns (False, error_message). + """ + task_type = "llm/v1/embeddings" + w = WorkspaceClient() + browser_url = get_workspace_hostname() + try: + llm_endpoint = w.serving_endpoints.get(name=self.embedding_model_endpoint) + except ResourceDoesNotExist as e: + msg = f"Model serving endpoint {self.embedding_model_endpoint} not found." + return (False, msg) + if llm_endpoint.state.ready != EndpointStateReady.READY: + msg = f"Model serving endpoint {self.embedding_model_endpoint} is not in a READY state. Please visit the status page to debug: {browser_url}/ml/endpoints/{self.embedding_model_endpoint}" + return (False, msg) + if llm_endpoint.task != task_type: + msg = f"Model serving endpoint {self.embedding_model_endpoint} is online & ready, but does not support task type {task_type}. Details at: {browser_url}/ml/endpoints/{self.embedding_model_endpoint}" + return (False, msg) + + msg = f"Validated serving endpoint {self.embedding_model_endpoint} as READY and of type {task_type}. View here: {browser_url}/ml/endpoints/{self.embedding_model_endpoint}" + print(msg) + return (True, msg) + + def validate_chunk_size_and_overlap(self) -> tuple[bool, str]: + """ + Validates that chunk_size and overlap values are valid + Returns: + tuple[bool, str]: A tuple containing (success, error_message). + If validation passes, returns (True, success_message). If validation fails, returns (False, error_message). + """ + # Detect the embedding model and its configuration + embedding_model_name, chunk_spec = detect_fmapi_embedding_model_type( + self.embedding_model_endpoint + ) + + # Update chunk specification based on provided parameters + chunk_spec["chunk_size_tokens"] = self.chunk_size_tokens + chunk_spec["chunk_overlap_tokens"] = self.chunk_overlap_tokens + + if chunk_spec is None or embedding_model_name is None: + # Fall back to using provided embedding_model_name + chunk_spec = EMBEDDING_MODELS.get(embedding_model_name) + if chunk_spec is None: + msg = f"Embedding model `{embedding_model_name}` not found, so can't validate chunking config. Chunking config must be validated for a specific embedding model. Available models: {EMBEDDING_MODELS.keys()}" + return (False, msg) + + # Validate chunk size and overlap + is_valid, msg = validate_chunk_size(chunk_spec) + if not is_valid: + return (False, msg) + else: + print(msg) + return (True, msg) diff --git a/openai_sdk_agent_app_sample_code/cookbook/config/data_pipeline/uc_volume_source.py b/openai_sdk_agent_app_sample_code/cookbook/config/data_pipeline/uc_volume_source.py new file mode 100644 index 0000000..f471165 --- /dev/null +++ b/openai_sdk_agent_app_sample_code/cookbook/config/data_pipeline/uc_volume_source.py @@ -0,0 +1,132 @@ +from cookbook.config import SerializableConfig +from cookbook.databricks_utils import get_volume_url + + +from databricks.sdk import WorkspaceClient +from databricks.sdk.errors import NotFound +from databricks.sdk.errors.platform import ResourceAlreadyExists, ResourceDoesNotExist +from databricks.sdk.service.catalog import VolumeType +from pydantic import Field, computed_field, field_validator + + +class UCVolumeSourceConfig(SerializableConfig): + """ + Source data configuration for the Unstructured Data Pipeline. You can modify this class to add additional configuration settings. + + Args: + uc_catalog_name (str): + Required. Name of the Unity Catalog. + uc_schema_name (str): + Required. Name of the Unity Catalog schema. + uc_volume_name (str): + Required. Name of the Unity Catalog volume. + """ + + @field_validator("uc_catalog_name", "uc_schema_name", "uc_volume_name") + def validate_not_default(cls, value: str) -> str: + if value == "REPLACE_ME": + raise ValueError( + "Please replace the default value 'REPLACE_ME' with your actual configuration" + ) + return value + + uc_catalog_name: str = Field(..., min_length=1) + uc_schema_name: str = Field(..., min_length=1) + uc_volume_name: str = Field(..., min_length=1) + + @computed_field() + def volume_path(self) -> str: + return f"/Volumes/{self.uc_catalog_name}/{self.uc_schema_name}/{self.uc_volume_name}" + + @computed_field() + def volume_uc_fqn(self) -> str: + return f"{self.uc_catalog_name}.{self.uc_schema_name}.{self.uc_volume_name}" + + def check_if_volume_exists(self) -> bool: + w = WorkspaceClient() + try: + # Use the computed field instead of reconstructing the FQN + w.volumes.read(name=self.volume_uc_fqn) + return True + except (ResourceDoesNotExist, NotFound): + return False + + def create_volume(self): + try: + w = WorkspaceClient() + w.volumes.create( + catalog_name=self.uc_catalog_name, + schema_name=self.uc_schema_name, + name=self.uc_volume_name, + volume_type=VolumeType.MANAGED, + ) + except ResourceAlreadyExists: + pass + + def check_if_catalog_exists(self) -> bool: + w = WorkspaceClient() + try: + w.catalogs.get(name=self.uc_catalog_name) + return True + except (ResourceDoesNotExist, NotFound): + return False + + def check_if_schema_exists(self) -> bool: + w = WorkspaceClient() + try: + full_name = f"{self.uc_catalog_name}.{self.uc_schema_name}" + w.schemas.get(full_name=full_name) + return True + except (ResourceDoesNotExist, NotFound): + return False + + def create_or_validate_volume(self) -> tuple[bool, str]: + """ + Validates that the volume exists and creates it if it doesn't + Returns: + tuple[bool, str]: A tuple containing (success, error_message). + If validation passes, returns (True, success_message). If validation fails, returns (False, error_message). + """ + if not self.check_if_catalog_exists(): + msg = f"Catalog '{self.uc_catalog_name}' does not exist. Please create it first." + return (False, msg) + + if not self.check_if_schema_exists(): + msg = f"Schema '{self.uc_schema_name}' does not exist in catalog '{self.uc_catalog_name}'. Please create it first." + return (False, msg) + + if not self.check_if_volume_exists(): + print(f"Volume {self.volume_path} does not exist. Creating...") + try: + self.create_volume() + except Exception as e: + msg = f"Failed to create volume: {str(e)}" + return (False, msg) + msg = f"Successfully created volume {self.volume_path}. View here: {get_volume_url(self.volume_uc_fqn)}" + print(msg) + return (True, msg) + + msg = f"Volume {self.volume_path} exists. View here: {get_volume_url(self.volume_uc_fqn)}" + print(msg) + return (True, msg) + + def list_files(self) -> list[str]: + """ + Lists all files in the Unity Catalog volume using dbutils.fs. + + Returns: + list[str]: A list of file paths in the volume + + Raises: + Exception: If the volume doesn't exist or there's an error accessing it + """ + if not self.check_if_volume_exists(): + raise Exception(f"Volume {self.volume_path} does not exist") + + w = WorkspaceClient() + try: + # List contents using dbutils.fs + files = w.dbutils.fs.ls(self.volume_path) + return [file.name for file in files] + except Exception as e: + raise Exception(f"Failed to list files in volume: {str(e)}") diff --git a/openai_sdk_agent_app_sample_code/cookbook/config/shared/__init__.py b/openai_sdk_agent_app_sample_code/cookbook/config/shared/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/openai_sdk_agent_app_sample_code/cookbook/config/shared/agent_storage_location.py b/openai_sdk_agent_app_sample_code/cookbook/config/shared/agent_storage_location.py new file mode 100644 index 0000000..db490b7 --- /dev/null +++ b/openai_sdk_agent_app_sample_code/cookbook/config/shared/agent_storage_location.py @@ -0,0 +1,118 @@ +from pydantic import ( + field_validator, + FieldValidationInfo, +) +from databricks.sdk import WorkspaceClient +from databricks.sdk.errors.platform import ( + ResourceDoesNotExist, + NotFound, +) +from pydantic import Field +from databricks.sdk import WorkspaceClient +from databricks.sdk.errors.platform import ResourceDoesNotExist +from databricks.sdk.errors import NotFound +from cookbook.config import SerializableConfig +from databricks.sdk import WorkspaceClient + + +class AgentStorageConfig(SerializableConfig): + """ + Source data configuration for the Unstructured Data Pipeline. You can modify this class to add additional configuration settings. + + Args: + uc_model_name (str): + Required. Fully qualified name of the model in format: catalog.schema.model_name + evaluation_set_uc_table (str): + Required. Fully qualified name of the evaluation table in format: catalog.schema.table_name + """ + + uc_model_name: str = Field(..., min_length=1) + evaluation_set_uc_table: str = Field(..., min_length=1) + mlflow_experiment_name: str = Field(None) + + @field_validator("uc_model_name", "evaluation_set_uc_table") + @classmethod + def validate_uc_fqn_format(cls, v: str, info: FieldValidationInfo) -> str: + if v.count(".") != 2: + raise ValueError( + f"{info.field_name} must be in format: catalog.schema.name" + ) + return v + + @classmethod + def escape_uc_fqn(cls, uc_fqn: str) -> str: + """ + Escape the fully qualified name (FQN) for a Unity Catalog asset if it contains special characters. + + Args: + uc_fqn (str): The fully qualified name of the asset. + + Returns: + str: The escaped fully qualified name if it contains special characters, otherwise the original FQN. + """ + if "-" in uc_fqn: + parts = uc_fqn.split(".") + escaped_parts = [f"`{part}`" for part in parts] + return ".".join(escaped_parts) + else: + return uc_fqn + + def check_if_catalog_exists(self, catalog_name: str) -> bool: + w = WorkspaceClient() + try: + w.catalogs.get(name=catalog_name) + return True + except (ResourceDoesNotExist, NotFound): + return False + + def check_if_schema_exists(self, catalog_name: str, schema_name: str) -> bool: + w = WorkspaceClient() + try: + full_name = f"{catalog_name}.{schema_name}" + w.schemas.get(full_name=full_name) + return True + except (ResourceDoesNotExist, NotFound): + return False + + def validate_catalog_and_schema(self) -> tuple[bool, str]: + """ + Validates that the specified catalogs and schemas exist for both uc_model_name and evaluation_set_uc_table + Returns: + tuple[bool, str]: A tuple containing (success, error_message). + If validation passes, returns (True, success_message). If validation fails, returns (False, error_message). + """ + # Extract catalog and schema from uc_model_name + model_catalog, model_schema, _ = self.uc_model_name.split(".") + + # Extract catalog and schema from evaluation_set_uc_table + eval_catalog, eval_schema, _ = self.evaluation_set_uc_table.split(".") + + # Check model catalog and schema + if not self.check_if_catalog_exists(model_catalog): + return ( + False, + f"Model catalog '{model_catalog}' does not exist. Please create it first.", + ) + + if not self.check_if_schema_exists(model_catalog, model_schema): + return ( + False, + f"Model schema '{model_schema}' does not exist in catalog '{model_catalog}'. Please create it first.", + ) + + # Check evaluation table catalog and schema + if not self.check_if_catalog_exists(eval_catalog): + return ( + False, + f"Evaluation catalog '{eval_catalog}' does not exist. Please create it first.", + ) + + if not self.check_if_schema_exists(eval_catalog, eval_schema): + return ( + False, + f"Evaluation schema '{eval_schema}' does not exist in catalog '{eval_catalog}'. Please create it first.", + ) + + msg = f"All catalogs and schemas exist for both model `{self.uc_model_name}` and evaluation table `{self.evaluation_set_uc_table}`." + print(msg) + return (True, msg) diff --git a/openai_sdk_agent_app_sample_code/cookbook/config/shared/llm.py b/openai_sdk_agent_app_sample_code/cookbook/config/shared/llm.py new file mode 100644 index 0000000..9341fd5 --- /dev/null +++ b/openai_sdk_agent_app_sample_code/cookbook/config/shared/llm.py @@ -0,0 +1,42 @@ +from pydantic import BaseModel + +class LLMParametersConfig(BaseModel): + """ + Configuration for LLM response parameters. + + Attributes: + temperature (float): Controls randomness in the response. + max_tokens (int): Maximum number of tokens in the response. + top_p (float): Controls diversity via nucleus sampling. + top_k (int): Limits the number of highest probability tokens considered. + """ + + # Parameters that control how the LLM responds. + temperature: float = None + max_tokens: int = None + + +class LLMConfig(BaseModel): + """ + Configuration for the function-calling LLM. + + Attributes: + llm_endpoint_name (str): Databricks Model Serving endpoint name. + This is the generator LLM where your LLM queries are sent. + Databricks foundational model endpoints can be found here: + https://docs.databricks.com/en/machine-learning/foundation-models/index.html + llm_system_prompt_template (str): Template for the LLM prompt. + This is how the RAG chain combines the user's question and the retrieved context. + llm_parameters (LLMParametersConfig): Parameters that control how the LLM responds. + """ + + # Databricks Model Serving endpoint name + # This is the generator LLM where your LLM queries are sent. + # Databricks foundational model endpoints can be found here: https://docs.databricks.com/en/machine-learning/foundation-models/index.html + llm_endpoint_name: str + + # Define a template for the LLM prompt. This is how the RAG chain combines the user's question and the retrieved context. + llm_system_prompt_template: str + + # Parameters that control how the LLM responds. + llm_parameters: LLMParametersConfig diff --git a/openai_sdk_agent_app_sample_code/cookbook/data_pipeline/__init__.py b/openai_sdk_agent_app_sample_code/cookbook/data_pipeline/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/openai_sdk_agent_app_sample_code/cookbook/data_pipeline/build_retriever_index.py b/openai_sdk_agent_app_sample_code/cookbook/data_pipeline/build_retriever_index.py new file mode 100644 index 0000000..e1e80c4 --- /dev/null +++ b/openai_sdk_agent_app_sample_code/cookbook/data_pipeline/build_retriever_index.py @@ -0,0 +1,123 @@ +from databricks.sdk.service.vectorsearch import ( + VectorSearchIndexesAPI, + DeltaSyncVectorIndexSpecRequest, + EmbeddingSourceColumn, + PipelineType, + VectorIndexType, +) +from databricks.sdk import WorkspaceClient +from databricks.sdk.errors.platform import ResourceDoesNotExist, BadRequest +import time +from cookbook.databricks_utils import get_table_url + + +# %md +# ##### `build_retriever_index` + +# `build_retriever_index` will build the vector search index which is used by our RAG to retrieve relevant documents. + +# Arguments: +# - `chunked_docs_table`: The chunked documents table. There is expected to be a `chunked_text` column, a `chunk_id` column, and a `url` column. +# - `primary_key`: The column to use for the vector index primary key. +# - `embedding_source_column`: The column to compute embeddings for in the vector index. +# - `vector_search_endpoint`: An optional vector search endpoint name. It not defined, defaults to the `{table_id}_vector_search`. +# - `vector_search_index_name`: An optional index name. If not defined, defaults to `{chunked_docs_table}_index`. +# - `embedding_endpoint_name`: An embedding endpoint name. +# - `force_delete_vector_search_endpoint`: Setting this to true will rebuild the vector search endpoint. + + +def build_retriever_index( + vector_search_endpoint: str, + chunked_docs_table_name: str, + vector_search_index_name: str, + embedding_endpoint_name: str, + force_delete_index_before_create=False, + primary_key: str = "chunk_id", # hard coded in the apply_chunking_fn + embedding_source_column: str = "content_chunked", # hard coded in the apply_chunking_fn +) -> tuple[bool, str]: + # Initialize workspace client and vector search API + w = WorkspaceClient() + vsc = w.vector_search_indexes + + def find_index(index_name): + try: + return vsc.get_index(index_name=index_name) + except ResourceDoesNotExist: + return None + + def wait_for_index_to_be_ready(index): + while not index.status.ready: + print( + f"Index {vector_search_index_name} exists, but is not ready, waiting 30 seconds..." + ) + time.sleep(30) + index = find_index(index_name=vector_search_index_name) + + def wait_for_index_to_be_deleted(index): + while index: + print( + f"Waiting for index {vector_search_index_name} to be deleted, waiting 30 seconds..." + ) + time.sleep(30) + index = find_index(index_name=vector_search_index_name) + + existing_index = find_index(index_name=vector_search_index_name) + if existing_index: + print(f"Found existing index {get_table_url(vector_search_index_name)}...") + if force_delete_index_before_create: + print(f"Deleting index {vector_search_index_name}...") + vsc.delete_index(index_name=vector_search_index_name) + wait_for_index_to_be_deleted(existing_index) + create_index = True + else: + wait_for_index_to_be_ready(existing_index) + create_index = False + print( + f"Starting the sync of index {vector_search_index_name}, this can take 15 minutes or much longer if you have a larger number of documents." + ) + # print(existing_index) + try: + vsc.sync_index(index_name=vector_search_index_name) + msg = f"Kicked off index sync for {vector_search_index_name}." + return (False, msg) + except BadRequest as e: + msg = f"Index sync already in progress, so failed to kick off index sync for {vector_search_index_name}. Please wait for the index to finish syncing and try again." + return (True, msg) + else: + print( + f'Creating new vector search index "{vector_search_index_name}" on endpoint "{vector_search_endpoint}"' + ) + create_index = True + + if create_index: + print( + "Computing document embeddings and Vector Search Index. This can take 15 minutes or much longer if you have a larger number of documents." + ) + try: + # Create delta sync index spec using the proper class + delta_sync_spec = DeltaSyncVectorIndexSpecRequest( + source_table=chunked_docs_table_name, + pipeline_type=PipelineType.TRIGGERED, + embedding_source_columns=[ + EmbeddingSourceColumn( + name=embedding_source_column, + embedding_model_endpoint_name=embedding_endpoint_name, + ) + ], + ) + + vsc.create_index( + name=vector_search_index_name, + endpoint_name=vector_search_endpoint, + primary_key=primary_key, + index_type=VectorIndexType.DELTA_SYNC, + delta_sync_index_spec=delta_sync_spec, + ) + msg = ( + f"Successfully created vector search index {vector_search_index_name}." + ) + print(msg) + return (False, msg) + except Exception as e: + msg = f"Vector search index creation failed. Wait 5 minutes and try running this cell again." + return (True, msg) diff --git a/openai_sdk_agent_app_sample_code/cookbook/data_pipeline/chunk_docs.py b/openai_sdk_agent_app_sample_code/cookbook/data_pipeline/chunk_docs.py new file mode 100644 index 0000000..793a721 --- /dev/null +++ b/openai_sdk_agent_app_sample_code/cookbook/data_pipeline/chunk_docs.py @@ -0,0 +1,44 @@ +from typing import Literal, Optional, Any, Callable +from databricks.vector_search.client import VectorSearchClient +from pyspark.sql.functions import explode +import pyspark.sql.functions as func +from typing import Callable +from pyspark.sql.types import StructType, StringType, StructField, MapType, ArrayType +from pyspark.sql import DataFrame, SparkSession + + +def apply_chunking_fn( + parsed_docs_df: DataFrame, + chunking_fn: Callable[[str], list[str]], + propagate_columns: list[str], + doc_column: str = "content", +) -> DataFrame: + # imports here to avoid requiring these libraries in all notebooks since the data pipeline config imports this package + from langchain_text_splitters import RecursiveCharacterTextSplitter + from transformers import AutoTokenizer + import tiktoken + + print( + f"Applying chunking UDF to {parsed_docs_df.count()} documents using Spark - this may take a long time if you have many documents..." + ) + + parser_udf = func.udf( + chunking_fn, returnType=ArrayType(StringType()), useArrow=True + ) + chunked_array_docs = parsed_docs_df.withColumn( + "content_chunked", parser_udf(doc_column) + ) # .drop(doc_column) + chunked_docs = chunked_array_docs.select( + *propagate_columns, explode("content_chunked").alias("content_chunked") + ) + + # Add a primary key: "chunk_id". + chunks_with_ids = chunked_docs.withColumn( + "chunk_id", func.md5(func.col("content_chunked")) + ) + # Reorder for better display. + chunks_with_ids = chunks_with_ids.select( + "chunk_id", "content_chunked", *propagate_columns + ) + + return chunks_with_ids diff --git a/openai_sdk_agent_app_sample_code/cookbook/data_pipeline/default_parser.py b/openai_sdk_agent_app_sample_code/cookbook/data_pipeline/default_parser.py new file mode 100644 index 0000000..277fdc1 --- /dev/null +++ b/openai_sdk_agent_app_sample_code/cookbook/data_pipeline/default_parser.py @@ -0,0 +1,162 @@ +from typing import TypedDict +from datetime import datetime +import warnings +import traceback +import os +from urllib.parse import urlparse + +# PDF libraries +import fitz +import pymupdf4llm + +# HTML libraries +import markdownify +import re + +## DOCX libraries +import pypandoc +import tempfile + +## JSON libraries +import json + + +# Schema of the dict returned by `file_parser(...)` +# This is used to create the output Delta Table's schema. +# Adjust the class if you want to add additional columns from your parser, such as extracting custom metadata. +class ParserReturnValue(TypedDict): + # DO NOT CHANGE THESE NAMES + # Parsed content of the document + content: str # do not change this name + # The status of whether the parser succeeds or fails, used to exclude failed files downstream + parser_status: str # do not change this name + # Unique ID of the document + doc_uri: str # do not change this name + + # OK TO CHANGE THESE NAMES + # Optionally, you can add additional metadata fields here + # example_metadata: str + last_modified: datetime + + +# Parser function. Adjust this function to modify the parsing logic. +def file_parser( + raw_doc_contents_bytes: bytes, + doc_path: str, + modification_time: datetime, + doc_bytes_length: int, +) -> ParserReturnValue: + """ + Parses the content of a PDF document into a string. + + This function takes the raw bytes of a PDF document and its path, attempts to parse the document using PyPDF, + and returns the parsed content and the status of the parsing operation. + + Parameters: + - raw_doc_contents_bytes (bytes): The raw bytes of the document to be parsed (set by Spark when loading the file) + - doc_path (str): The DBFS path of the document, used to verify the file extension (set by Spark when loading the file) + - modification_time (timestamp): The last modification time of the document (set by Spark when loading the file) + - doc_bytes_length (long): The size of the document in bytes (set by Spark when loading the file) + + Returns: + - ParserReturnValue: A dictionary containing the parsed document content and the status of the parsing operation. + The 'contenty will contain the parsed text as a string, and the 'parser_status' key will indicate + whether the parsing was successful or if an error occurred. + """ + try: + from markdownify import markdownify as md + + filename, file_extension = os.path.splitext(doc_path) + + if file_extension == ".pdf": + pdf_doc = fitz.Document(stream=raw_doc_contents_bytes, filetype="pdf") + md_text = pymupdf4llm.to_markdown(pdf_doc) + + parsed_document = { + "content": md_text.strip(), + "parser_status": "SUCCESS", + } + elif file_extension == ".html": + html_content = raw_doc_contents_bytes.decode("utf-8") + + markdown_contents = md( + str(html_content).strip(), heading_style=markdownify.ATX + ) + markdown_stripped = re.sub(r"\n{3,}", "\n\n", markdown_contents.strip()) + + parsed_document = { + "content": markdown_stripped, + "parser_status": "SUCCESS", + } + elif file_extension == ".docx": + with tempfile.NamedTemporaryFile(delete=True) as temp_file: + temp_file.write(raw_doc_contents_bytes) + temp_file_path = temp_file.name + md = pypandoc.convert_file(temp_file_path, "markdown", format="docx") + + parsed_document = { + "content": md.strip(), + "parser_status": "SUCCESS", + } + elif file_extension in [".txt", ".md"]: + parsed_document = { + "content": raw_doc_contents_bytes.decode("utf-8").strip(), + "parser_status": "SUCCESS", + } + elif file_extension in [".json", ".jsonl"]: + # NOTE: This is a placeholder for a JSON parser. It's not a "real" parser, it just returns the raw JSON formatted into XML-like strings that LLMs tend to like. + json_data = json.loads(raw_doc_contents_bytes.decode("utf-8")) + + def flatten_json_to_xml(obj, parent_key=""): + xml_parts = [] + if isinstance(obj, dict): + for key, value in obj.items(): + if isinstance(value, (dict, list)): + xml_parts.append(flatten_json_to_xml(value, key)) + else: + xml_parts.append(f"<{key}>{str(value)}") + elif isinstance(obj, list): + for i, item in enumerate(obj): + if isinstance(item, (dict, list)): + xml_parts.append( + flatten_json_to_xml(item, f"{parent_key}_{i}") + ) + else: + xml_parts.append( + f"<{parent_key}_{i}>{str(item)}" + ) + else: + xml_parts.append(f"<{parent_key}>{str(obj)}") + return "\n".join(xml_parts) + + flattened_content = flatten_json_to_xml(json_data) + parsed_document = { + "content": flattened_content.strip(), + "parser_status": "SUCCESS", + } + else: + raise Exception(f"No supported parser for {doc_path}") + + # Extract the required doc_uri + # convert from `dbfs:/Volumes/catalog/schema/pdf_docs/filename.pdf` to `/Volumes/catalog/schema/pdf_docs/filename.pdf` + modified_path = urlparse(doc_path).path + parsed_document["doc_uri"] = modified_path + + # Sample metadata extraction logic + # if "test" in parsed_document["content + # parsed_document["example_metadata"] = "test" + # else: + # parsed_document["example_metadata"] = "not test" + + # Add the modified time + parsed_document["last_modified"] = modification_time + + return parsed_document + + except Exception as e: + status = f"An error occurred: {e}\n{traceback.format_exc()}" + warnings.warn(status) + return { + "content": "", + "parser_status": f"ERROR: {status}", + } diff --git a/openai_sdk_agent_app_sample_code/cookbook/data_pipeline/parse_docs.py b/openai_sdk_agent_app_sample_code/cookbook/data_pipeline/parse_docs.py new file mode 100644 index 0000000..182de01 --- /dev/null +++ b/openai_sdk_agent_app_sample_code/cookbook/data_pipeline/parse_docs.py @@ -0,0 +1,159 @@ +import traceback +from datetime import datetime +from typing import Any, Callable, TypedDict, Dict +import os +from IPython.display import display_markdown +import warnings +import pyspark.sql.functions as func +from pyspark.sql.types import StructType +from pyspark.sql import DataFrame, SparkSession + + +def _parse_and_extract( + raw_doc_contents_bytes: bytes, + modification_time: datetime, + doc_bytes_length: int, + doc_path: str, + parse_file_udf: Callable[[[dict, Any]], str], +) -> Dict[str, Any]: + """Parses raw bytes & extract metadata.""" + try: + # Run the parser + parser_output_dict = parse_file_udf( + raw_doc_contents_bytes=raw_doc_contents_bytes, + doc_path=doc_path, + modification_time=modification_time, + doc_bytes_length=doc_bytes_length, + ) + + if parser_output_dict.get("parser_status") == "SUCCESS": + return parser_output_dict + else: + raise Exception(parser_output_dict.get("parser_status")) + + except Exception as e: + status = f"An error occurred: {e}\n{traceback.format_exc()}" + warnings.warn(status) + return { + "content": "", + "doc_uri": doc_path, + "parser_status": status, + } + + +def _get_parser_udf( + # extract_metadata_udf: Callable[[[dict, Any]], str], + parse_file_udf: Callable[[[dict, Any]], str], + spark_dataframe_schema: StructType, +): + """Gets the Spark UDF which will parse the files in parallel. + + Arguments: + - extract_metadata_udf: A function that takes parsed content and extracts the metadata + - parse_file_udf: A function that takes the raw file and returns the parsed text. + - spark_dataframe_schema: The resulting schema of the document delta table + """ + # This UDF will load each file, parse the doc, and extract metadata. + parser_udf = func.udf( + lambda raw_doc_contents_bytes, modification_time, doc_bytes_length, doc_path: _parse_and_extract( + raw_doc_contents_bytes, + modification_time, + doc_bytes_length, + doc_path, + parse_file_udf, + ), + returnType=spark_dataframe_schema, + useArrow=True, + ) + return parser_udf + + +def load_files_to_df(spark: SparkSession, source_path: str) -> DataFrame: + """ + Load files from a directory into a Spark DataFrame. + Each row in the DataFrame will contain the path, length, and content of the file; for more + details, see https://spark.apache.org/docs/latest/sql-data-sources-binaryFile.html + """ + + print(f"Loading the raw files from {source_path}...") + # Load the raw riles + raw_files_df = ( + spark.read.format("binaryFile") + .option("recursiveFileLookup", "true") + .load(source_path) + ) + + # Check that files were present and loaded + if raw_files_df.count() == 0: + raise Exception(f"`{source_path}` does not contain any files.") + + # display_markdown( + # f"### Found {raw_files_df.count()} files in {source_path}: ", raw=True + # ) + # raw_files_df.display() + return raw_files_df + + +def apply_parsing_fn( + raw_files_df: DataFrame, + parse_file_fn: Callable[[[dict, Any]], str], + parsed_df_schema: StructType, +) -> DataFrame: + """ + Apply a file-parsing UDF to a DataFrame whose rows correspond to file content/metadata loaded via + https://spark.apache.org/docs/latest/sql-data-sources-binaryFile.html + Returns a DataFrame with the parsed content and metadata. + """ + print( + f"Applying parsing & metadata extraction to {raw_files_df.count()} files using Spark - this may take a long time if you have many documents..." + ) + + parser_udf = _get_parser_udf(parse_file_fn, parsed_df_schema) + + # Run the parsing + parsed_files_staging_df = raw_files_df.withColumn( + "parsing", parser_udf("content", "modificationTime", "length", "path") + ).drop("content") + + # Filter for successfully parsed files + parsed_files_df = parsed_files_staging_df # .filter( + # parsed_files_staging_df.parsing.parser_status == "SUCCESS" + # ) + + # Change the schema to the resulting schema + resulting_fields = [field.name for field in parsed_df_schema.fields] + + parsed_files_df = parsed_files_df.select( + *[func.col(f"parsing.{field}").alias(field) for field in resulting_fields] + ) + return parsed_files_df + + +def check_parsed_df_for_errors(parsed_files_df) -> tuple[bool, str, DataFrame]: + # Check and warn on any errors + errors_df = parsed_files_df.filter(func.col(f"parser_status") != "SUCCESS") + + num_errors = errors_df.count() + if num_errors > 0: + msg = f"{num_errors} documents ({round(errors_df.count()/parsed_files_df.count(), 2)*100}) of documents had parse errors. Please review." + return (True, msg, errors_df) + else: + msg = "All documents were parsed." + print(msg) + return (False, msg, None) + + +def check_parsed_df_for_empty_parsed_files(parsed_files_df): + # Check and warn on any errors + num_empty_df = parsed_files_df.filter( + func.col(f"parser_status") == "SUCCESS" + ).filter(func.col("content") == "") + + num_errors = num_empty_df.count() + if num_errors > 0: + msg = f"{num_errors} documents ({round(num_empty_df.count()/parsed_files_df.count(), 2)*100}) of documents returned empty parsing results. Please review." + return (True, msg, num_empty_df) + else: + msg = "All documents produced non-null parsing results." + print(msg) + return (False, msg, None) diff --git a/openai_sdk_agent_app_sample_code/cookbook/data_pipeline/recursive_character_text_splitter.py b/openai_sdk_agent_app_sample_code/cookbook/data_pipeline/recursive_character_text_splitter.py new file mode 100644 index 0000000..d9f6ed8 --- /dev/null +++ b/openai_sdk_agent_app_sample_code/cookbook/data_pipeline/recursive_character_text_splitter.py @@ -0,0 +1,255 @@ +from typing import Callable, Tuple, Optional +from databricks.sdk import WorkspaceClient +from pydantic import BaseModel + +# %md +# ##### `get_recursive_character_text_splitter` + +# `get_recursive_character_text_splitter` creates a new function that, given an embedding endpoint, returns a callable that can chunk text documents. This utility allows you to write the core business logic of the chunker, without dealing with the details of text splitting. You can decide to write your own, or edit this code if it does not fit your use case. + +# **Arguments:** + +# - `model_serving_endpoint`: The name of the Model Serving endpoint with the embedding model. +# - `embedding_model_name`: The name of the embedding model e.g., `gte-large-en-v1.5`, etc. If `model_serving_endpoint` is an OpenAI External Model or FMAPI model and set to `None`, this will be automatically detected. +# - `chunk_size_tokens`: An optional size for each chunk in tokens. Defaults to `None`, which uses the model's entire context window. +# - `chunk_overlap_tokens`: Tokens that should overlap between chunks. Defaults to `0`. + +# **Returns:** A callable that takes a document (`str`) and produces a list of chunks (`list[str]`). + +# Constants +HF_CACHE_DIR = "/tmp/hf_cache/" + +# Embedding Models Configuration +EMBEDDING_MODELS = { + "gte-large-en-v1.5": { + # "tokenizer": lambda: AutoTokenizer.from_pretrained( + # "Alibaba-NLP/gte-large-en-v1.5", cache_dir=HF_CACHE_DIR + # ), + "context_window": 8192, + "type": "SENTENCE_TRANSFORMER", + }, + "bge-large-en-v1.5": { + # "tokenizer": lambda: AutoTokenizer.from_pretrained( + # "BAAI/bge-large-en-v1.5", cache_dir=HF_CACHE_DIR + # ), + "context_window": 512, + "type": "SENTENCE_TRANSFORMER", + }, + "bge_large_en_v1_5": { + # "tokenizer": lambda: AutoTokenizer.from_pretrained( + # "BAAI/bge-large-en-v1.5", cache_dir=HF_CACHE_DIR + # ), + "context_window": 512, + "type": "SENTENCE_TRANSFORMER", + }, + "text-embedding-ada-002": { + "context_window": 8192, + # "tokenizer": lambda: tiktoken.encoding_for_model("text-embedding-ada-002"), + "type": "OPENAI", + }, + "text-embedding-3-small": { + "context_window": 8192, + # "tokenizer": lambda: tiktoken.encoding_for_model("text-embedding-3-small"), + "type": "OPENAI", + }, + "text-embedding-3-large": { + "context_window": 8192, + # "tokenizer": lambda: tiktoken.encoding_for_model("text-embedding-3-large"), + "type": "OPENAI", + }, +} + + +def get_workspace_client() -> WorkspaceClient: + """Returns a WorkspaceClient instance.""" + return WorkspaceClient() + + +# TODO: this is a cheap hack to avoid importing tokenizer libs at the top level - the datapipeline utils are imported by the agent notebook which won't have these libs loaded & we don't want to since autotokenizer is heavy weight. +def get_embedding_model_tokenizer(endpoint_type: str) -> Optional[dict]: + from transformers import AutoTokenizer + import tiktoken + + # copy here to prevent needing to install tokenizer libraries everywhere this is imported + EMBEDDING_MODELS_W_TOKENIZER = { + "gte-large-en-v1.5": { + "tokenizer": lambda: AutoTokenizer.from_pretrained( + "Alibaba-NLP/gte-large-en-v1.5", cache_dir=HF_CACHE_DIR + ), + "context_window": 8192, + "type": "SENTENCE_TRANSFORMER", + }, + "bge-large-en-v1.5": { + "tokenizer": lambda: AutoTokenizer.from_pretrained( + "BAAI/bge-large-en-v1.5", cache_dir=HF_CACHE_DIR + ), + "context_window": 512, + "type": "SENTENCE_TRANSFORMER", + }, + "bge_large_en_v1_5": { + "tokenizer": lambda: AutoTokenizer.from_pretrained( + "BAAI/bge-large-en-v1.5", cache_dir=HF_CACHE_DIR + ), + "context_window": 512, + "type": "SENTENCE_TRANSFORMER", + }, + "text-embedding-ada-002": { + "context_window": 8192, + "tokenizer": lambda: tiktoken.encoding_for_model("text-embedding-ada-002"), + "type": "OPENAI", + }, + "text-embedding-3-small": { + "context_window": 8192, + "tokenizer": lambda: tiktoken.encoding_for_model("text-embedding-3-small"), + "type": "OPENAI", + }, + "text-embedding-3-large": { + "context_window": 8192, + "tokenizer": lambda: tiktoken.encoding_for_model("text-embedding-3-large"), + "type": "OPENAI", + }, + } + return EMBEDDING_MODELS_W_TOKENIZER.get(endpoint_type).get("tokenizer") + + +def get_embedding_model_config(endpoint_type: str) -> Optional[dict]: + """ + Retrieve embedding model configuration by endpoint type. + """ + + return EMBEDDING_MODELS.get(endpoint_type) + + +def extract_endpoint_type(llm_endpoint) -> Optional[str]: + """ + Extract the endpoint type from the given llm_endpoint object. + """ + try: + return llm_endpoint.config.served_entities[0].external_model.name + except AttributeError: + try: + return llm_endpoint.config.served_entities[0].foundation_model.name + except AttributeError: + return None + + +def detect_fmapi_embedding_model_type( + model_serving_endpoint: str, +) -> Tuple[Optional[str], Optional[dict]]: + """ + Detects the embedding model type and configuration for the given endpoint. + Returns a tuple of (endpoint_type, embedding_config) or (None, None) if not found. + """ + client = get_workspace_client() + + try: + llm_endpoint = client.serving_endpoints.get(name=model_serving_endpoint) + endpoint_type = extract_endpoint_type(llm_endpoint) + except Exception as e: + endpoint_type = None + + embedding_config = ( + get_embedding_model_config(endpoint_type) if endpoint_type else None + ) + + embedding_config["tokenizer"] = ( + get_embedding_model_tokenizer(endpoint_type) if endpoint_type else None + ) + + return (endpoint_type, embedding_config) + + +def validate_chunk_size(chunk_spec: dict): + """ + Validate the chunk size and overlap settings in chunk_spec. + Raises ValueError if any condition is violated. + """ + if ( + chunk_spec["chunk_overlap_tokens"] + chunk_spec["chunk_size_tokens"] + ) > chunk_spec["context_window"]: + msg = ( + f'Proposed chunk_size of {chunk_spec["chunk_size_tokens"]} + overlap of {chunk_spec["chunk_overlap_tokens"]} ' + f'is {chunk_spec["chunk_overlap_tokens"] + chunk_spec["chunk_size_tokens"]} which is greater than context ' + f'window of {chunk_spec["context_window"]} tokens.', + ) + return (False, msg) + elif chunk_spec["chunk_overlap_tokens"] > chunk_spec["chunk_size_tokens"]: + msg = ( + f'Proposed `chunk_overlap_tokens` of {chunk_spec["chunk_overlap_tokens"]} is greater than the ' + f'`chunk_size_tokens` of {chunk_spec["chunk_size_tokens"]}. Reduce the size of `chunk_size_tokens`.', + ) + return (False, msg) + else: + context_usage = ( + round( + (chunk_spec["chunk_size_tokens"] + chunk_spec["chunk_overlap_tokens"]) + / chunk_spec["context_window"], + 2, + ) + * 100 + ) + msg = f'Chunk size in tokens: {chunk_spec["chunk_size_tokens"]} and chunk overlap in tokens: {chunk_spec["chunk_overlap_tokens"]} are valid. Using {round(context_usage, 2)}% ({chunk_spec["chunk_size_tokens"] + chunk_spec["chunk_overlap_tokens"]} tokens) of the {chunk_spec["context_window"]} token context window.' + return (True, msg) + + +def get_recursive_character_text_splitter( + model_serving_endpoint: str, + embedding_model_name: str = None, + chunk_size_tokens: int = None, + chunk_overlap_tokens: int = 0, +) -> Callable[[str], list[str]]: + # imports here to prevent needing to install everywhere + + from langchain_text_splitters import RecursiveCharacterTextSplitter + from transformers import AutoTokenizer + import tiktoken + + try: + # Detect the embedding model and its configuration + embedding_model_name, chunk_spec = detect_fmapi_embedding_model_type( + model_serving_endpoint + ) + + if chunk_spec is None or embedding_model_name is None: + # Fall back to using provided embedding_model_name + chunk_spec = EMBEDDING_MODELS.get(embedding_model_name) + if chunk_spec is None: + raise KeyError + + # Update chunk specification based on provided parameters + chunk_spec["chunk_size_tokens"] = ( + chunk_size_tokens or chunk_spec["context_window"] + ) + chunk_spec["chunk_overlap_tokens"] = chunk_overlap_tokens + + # Validate chunk size and overlap + is_valid, msg = validate_chunk_size(chunk_spec) + if not is_valid: + raise ValueError(msg) + else: + print(msg) + + except KeyError: + raise ValueError( + f"Embedding model `{embedding_model_name}` not found. Available models: {EMBEDDING_MODELS.keys()}" + ) + + def _recursive_character_text_splitter(text: str) -> list[str]: + tokenizer = chunk_spec["tokenizer"]() + if chunk_spec["type"] == "SENTENCE_TRANSFORMER": + splitter = RecursiveCharacterTextSplitter.from_huggingface_tokenizer( + tokenizer, + chunk_size=chunk_spec["chunk_size_tokens"], + chunk_overlap=chunk_spec["chunk_overlap_tokens"], + ) + elif chunk_spec["type"] == "OPENAI": + splitter = RecursiveCharacterTextSplitter.from_tiktoken_encoder( + tokenizer.name, + chunk_size=chunk_spec["chunk_size_tokens"], + chunk_overlap=chunk_spec["chunk_overlap_tokens"], + ) + else: + raise ValueError(f"Unsupported model type: {chunk_spec['type']}") + return splitter.split_text(text) + + return _recursive_character_text_splitter diff --git a/openai_sdk_agent_app_sample_code/cookbook/data_pipeline/utils/__init__.py b/openai_sdk_agent_app_sample_code/cookbook/data_pipeline/utils/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/openai_sdk_agent_app_sample_code/cookbook/data_pipeline/utils/typed_dicts_to_spark_schema.py b/openai_sdk_agent_app_sample_code/cookbook/data_pipeline/utils/typed_dicts_to_spark_schema.py new file mode 100644 index 0000000..195c16e --- /dev/null +++ b/openai_sdk_agent_app_sample_code/cookbook/data_pipeline/utils/typed_dicts_to_spark_schema.py @@ -0,0 +1,103 @@ +from pyspark.sql.types import ( + StructType, + StructField, + StringType, + IntegerType, + DoubleType, + BooleanType, + ArrayType, + TimestampType, + DateType, +) +from typing import TypedDict, get_type_hints, List +from datetime import datetime, date, time + + +def typed_dict_to_spark_fields(typed_dict: type[TypedDict]) -> StructType: + """ + Converts a TypedDict into a list of Spark StructField objects. + + This function maps Python types defined in a TypedDict to their corresponding + Spark SQL data types, facilitating the creation of a Spark DataFrame schema + from Python type annotations. + + Parameters: + - typed_dict (type[TypedDict]): The TypedDict class to be converted. + + Returns: + - StructType: A list of StructField objects representing the Spark schema. + + Raises: + - ValueError: If an unsupported type is encountered or if dictionary types are used. + """ + + # Mapping of type names to Spark type objects + type_mapping = { + str: StringType(), + int: IntegerType(), + float: DoubleType(), + bool: BooleanType(), + list: ArrayType(StringType()), # Default to StringType for arrays + datetime: TimestampType(), + date: DateType(), + } + + def get_spark_type(value_type): + """ + Helper function to map a Python type to a Spark SQL data type. + + This function supports basic Python types, lists of a single type, and raises + an error for unsupported types or dictionaries. + + Parameters: + - value_type: The Python type to be converted. + + Returns: + - DataType: The corresponding Spark SQL data type. + + Raises: + - ValueError: If the type is unsupported or if dictionary types are used. + """ + if value_type in type_mapping: + return type_mapping[value_type] + elif hasattr(value_type, "__origin__") and value_type.__origin__ == list: + # Handle List[type] types + return ArrayType(get_spark_type(value_type.__args__[0])) + elif hasattr(value_type, "__origin__") and value_type.__origin__ == dict: + # Handle Dict[type, type] types (not fully supported) + raise ValueError("Dict types are not fully supported") + else: + raise ValueError(f"Unsupported type: {value_type}") + + # Get the type hints for the TypedDict + type_hints = get_type_hints(typed_dict) + + # Convert the type hints into a list of StructField objects + fields = [ + StructField(key, get_spark_type(value), True) + for key, value in type_hints.items() + ] + + # Create and return the StructType object + return fields + + +def typed_dicts_to_spark_schema(*typed_dicts: type[TypedDict]) -> StructType: + """ + Converts multiple TypedDicts into a Spark schema. + + This function allows for the combination of multiple TypedDicts into a single + Spark DataFrame schema, enabling the creation of complex data structures. + + Parameters: + - *typed_dicts: Variable number of TypedDict classes to be converted. + + Returns: + - StructType: A Spark schema represented as a StructType object, which is a collection + of StructField objects derived from the provided TypedDicts. + """ + fields = [] + for typed_dict in typed_dicts: + fields.extend(typed_dict_to_spark_fields(typed_dict)) + + return StructType(fields) diff --git a/openai_sdk_agent_app_sample_code/cookbook/databricks_utils/__init__.py b/openai_sdk_agent_app_sample_code/cookbook/databricks_utils/__init__.py new file mode 100644 index 0000000..94fd8fb --- /dev/null +++ b/openai_sdk_agent_app_sample_code/cookbook/databricks_utils/__init__.py @@ -0,0 +1,225 @@ +# Helper functions for displaying Delta Table and Volume URLs + +from typing import Optional +import json +import subprocess + +from databricks.sdk import WorkspaceClient +from mlflow.utils import databricks_utils as du + + +def get_databricks_cli_config() -> dict: + """Retrieve the Databricks CLI configuration by running 'databricks auth describe' command. + + Returns: + dict: The parsed JSON configuration from the Databricks CLI, or None if an error occurs + + Note: + Requires the Databricks CLI to be installed and configured + """ + try: + # Run databricks auth describe command and capture output + process = subprocess.run( + ["databricks", "auth", "describe", "-o", "json"], + capture_output=True, + text=True, + check=True, # Raises CalledProcessError if command fails + ) + + # Parse JSON output + return json.loads(process.stdout) + except subprocess.CalledProcessError as e: + print(f"Error running databricks CLI command: {e}") + return None + except json.JSONDecodeError as e: + print(f"Error parsing databricks CLI JSON output: {e}") + return None + except Exception as e: + print(f"Unexpected error getting databricks config from CLI: {e}") + return None + + +def get_workspace_hostname() -> str: + """Get the Databricks workspace hostname. + + Returns: + str: The full workspace hostname (e.g., 'https://my-workspace.cloud.databricks.com') + + Raises: + RuntimeError: If not in a Databricks notebook and unable to get workspace hostname from CLI config + """ + if du.is_in_databricks_notebook(): + return "https://" + du.get_browser_hostname() + else: + cli_config = get_databricks_cli_config() + if cli_config is None: + raise RuntimeError("Could not get Databricks CLI config") + try: + return cli_config["details"]["host"] + except KeyError: + raise RuntimeError( + "Could not find workspace hostname in Databricks CLI config" + ) + + +def get_table_url(table_fqdn: str) -> str: + """Generate the URL for a Unity Catalog table in the Databricks UI. + + Args: + table_fqdn: Fully qualified table name in format 'catalog.schema.table'. + Can optionally include backticks around identifiers. + + Returns: + str: The full URL to view the table in the Databricks UI. + + Example: + >>> get_table_url("main.default.my_table") + 'https://my-workspace.cloud.databricks.com/explore/data/main/default/my_table' + """ + table_fqdn = table_fqdn.replace("`", "") + catalog, schema, table = table_fqdn.split(".") + browser_url = get_workspace_hostname() + url = f"{browser_url}/explore/data/{catalog}/{schema}/{table}" + return url + + +def get_volume_url(volume_fqdn: str) -> str: + """Generate the URL for a Unity Catalog volume in the Databricks UI. + + Args: + volume_fqdn: Fully qualified volume name in format 'catalog.schema.volume'. + Can optionally include backticks around identifiers. + + Returns: + str: The full URL to view the volume in the Databricks UI. + + Example: + >>> get_volume_url("main.default.my_volume") + 'https://my-workspace.cloud.databricks.com/explore/data/volumes/main/default/my_volume' + """ + volume_fqdn = volume_fqdn.replace("`", "") + catalog, schema, volume = volume_fqdn.split(".") + browser_url = get_workspace_hostname() + url = f"{browser_url}/explore/data/volumes/{catalog}/{schema}/{volume}" + return url + + +def get_mlflow_experiment_url(experiment_id: str) -> str: + """Generate the URL for an MLflow experiment in the Databricks UI. + + Args: + experiment_id: The ID of the MLflow experiment + + Returns: + str: The full URL to view the MLflow experiment in the Databricks UI. + + Example: + >>> get_mlflow_experiment_url("") + 'https://my-workspace.cloud.databricks.com/ml/experiments/' + """ + browser_url = get_workspace_hostname() + url = f"{browser_url}/ml/experiments/{experiment_id}" + return url + + +def get_mlflow_experiment_traces_url(experiment_id: str) -> str: + """Generate the URL for the MLflow experiment traces in the Databricks UI.""" + return get_mlflow_experiment_url(experiment_id) + "?compareRunsMode=TRACES" + + +def get_function_url(function_fqdn: str) -> str: + """Generate the URL for a Unity Catalog function in the Databricks UI. + + Args: + function_fqdn: Fully qualified function name in format 'catalog.schema.function'. + Can optionally include backticks around identifiers. + + Returns: + str: The full URL to view the function in the Databricks UI. + + Example: + >>> get_function_url("main.default.my_function") + 'https://my-workspace.cloud.databricks.com/explore/data/functions/main/default/my_function' + """ + function_fqdn = function_fqdn.replace("`", "") + catalog, schema, function = function_fqdn.split(".") + browser_url = get_workspace_hostname() + url = f"{browser_url}/explore/data/functions/{catalog}/{schema}/{function}" + return url + + +def get_cluster_url(cluster_id: str) -> str: + """Generate the URL for a Databricks cluster in the Databricks UI. + + Args: + cluster_id: The ID of the cluster + + Returns: + str: The full URL to view the cluster in the Databricks UI. + + Example: + >>> get_cluster_url("") + 'https://my-workspace.cloud.databricks.com/compute/clusters/' + """ + browser_url = get_workspace_hostname() + url = f"{browser_url}/compute/clusters/{cluster_id}" + return url + + +def get_active_cluster_id_from_databricks_auth() -> Optional[str]: + """Get the active cluster ID from the Databricks CLI authentication configuration. + + Returns: + Optional[str]: The active cluster ID if found, None if not found or if an error occurs + + Note: + This function relies on the Databricks CLI configuration having a cluster_id set + """ + if du.is_in_databricks_notebook(): + raise ValueError( + "Cannot get active cluster ID from the Databricks CLI in a Databricks notebook" + ) + try: + # Get config from the databricks cli + auth_output = get_databricks_cli_config() + + # Safely navigate nested dict + details = auth_output.get("details", {}) + config = details.get("configuration", {}) + cluster = config.get("cluster_id", {}) + cluster_id = cluster.get("value") + + if cluster_id is None: + raise ValueError("Could not find cluster_id in Databricks auth config") + + return cluster_id + + except Exception as e: + print(f"Unexpected error: {e}") + return None + + +def get_active_cluster_id() -> Optional[str]: + """Get the active cluster ID. + + Returns: + Optional[str]: The active cluster ID if found, None if not found or if an error occurs + """ + if du.is_in_databricks_notebook(): + return du.get_active_cluster_id() + else: + return get_active_cluster_id_from_databricks_auth() + + +def get_current_user_info(spark) -> tuple[str, str, str]: + # Get current user's name & email + w = WorkspaceClient() + user_email = w.current_user.me().user_name + user_name = user_email.split("@")[0].replace(".", "_") + + # Get the workspace default UC catalog + default_catalog = spark.sql("select current_catalog() as cur_catalog").collect()[0][ + "cur_catalog" + ] + + return user_email, user_name, default_catalog diff --git a/openai_sdk_agent_app_sample_code/cookbook/databricks_utils/agent_evaluation/__init__.py b/openai_sdk_agent_app_sample_code/cookbook/databricks_utils/agent_evaluation/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/openai_sdk_agent_app_sample_code/cookbook/databricks_utils/agent_evaluation/evaluation_set.py b/openai_sdk_agent_app_sample_code/cookbook/databricks_utils/agent_evaluation/evaluation_set.py new file mode 100644 index 0000000..6cd2e84 --- /dev/null +++ b/openai_sdk_agent_app_sample_code/cookbook/databricks_utils/agent_evaluation/evaluation_set.py @@ -0,0 +1,236 @@ +from typing import List, Mapping, Optional + +import mlflow.entities as mlflow_entities + +from pyspark import sql +from pyspark.sql import functions as F, types as T +from pyspark.sql.window import Window + +from databricks.rag_eval.evaluation import traces + +# Deduplicate the assessment log + +# By default, the assessment log contains one row for every action/click the user does in the Review App. This code translates these logs into a single row for each request. + +_REQUEST_ID = "request_id" +_TIMESTAMP = "timestamp" +_ROW_NUMBER = "row_number" +_SOURCE = "source" +_SOURCE_ID = "source.id" +_STEP_ID = "step_id" +_TEXT_ASSESSMENT = "text_assessment" +_RETRIEVAL_ASSESSMENT = "retrieval_assessment" + + +def _dedup_by_assessment_window( + assessment_log_df: sql.DataFrame, window: Window +) -> sql.DataFrame: + """ + Dedup the assessment logs by taking the first row from each group, defined by the window + :param assessment_log_df: Pyspark DataFrame of the assessment logs + :param window: Pyspark window to group assessments by + :return: Pyspark DataFrame of the deduped assessment logs + """ + return ( + assessment_log_df.withColumn(_ROW_NUMBER, F.row_number().over(window)) + .filter(F.col(_ROW_NUMBER) == 1) + .drop(_ROW_NUMBER) + ) + + +def _dedup_assessment_log(assessment_log_df: sql.DataFrame) -> sql.DataFrame: + """ + Dedup the assessment logs to get the latest assessments. + :param assessment_log_df: Pyspark DataFrame of the assessment logs + :return: Pyspark DataFrame of the deduped assessment logs + """ + # Dedup the text assessments + text_assessment_window = Window.partitionBy(_REQUEST_ID, _SOURCE_ID).orderBy( + F.col(_TIMESTAMP).desc() + ) + deduped_text_assessment_df = _dedup_by_assessment_window( + # Filter rows with null text assessments + assessment_log_df.filter(F.col(_TEXT_ASSESSMENT).isNotNull()), + text_assessment_window, + ) + + # Dedup the retrieval assessments + retrieval_assessment_window = Window.partitionBy( + _REQUEST_ID, + _SOURCE_ID, + f"{_RETRIEVAL_ASSESSMENT}.position", + f"{_RETRIEVAL_ASSESSMENT}.{_STEP_ID}", + ).orderBy(F.col(_TIMESTAMP).desc()) + deduped_retrieval_assessment_df = _dedup_by_assessment_window( + # Filter rows with null retrieval assessments + assessment_log_df.filter(F.col(_RETRIEVAL_ASSESSMENT).isNotNull()), + retrieval_assessment_window, + ) + + # Collect retrieval assessments from the same request/step/source into a single list + nested_retrieval_assessment_df = ( + deduped_retrieval_assessment_df.groupBy(_REQUEST_ID, _SOURCE_ID, _STEP_ID).agg( + F.any_value(_TIMESTAMP).alias(_TIMESTAMP), + F.any_value(_SOURCE).alias(_SOURCE), + F.collect_list(_RETRIEVAL_ASSESSMENT).alias("retrieval_assessments"), + ) + # Drop the old retrieval assessment, source id, and text assessment columns + .drop(_RETRIEVAL_ASSESSMENT, "id", _TEXT_ASSESSMENT) + ) + + # Join the deduped text assessments with the nested deduped retrieval assessments + deduped_assessment_log_df = deduped_text_assessment_df.alias("a").join( + nested_retrieval_assessment_df.alias("b"), + (F.col(f"a.{_REQUEST_ID}") == F.col(f"b.{_REQUEST_ID}")) + & (F.col(f"a.{_SOURCE_ID}") == F.col(f"b.{_SOURCE_ID}")), + "full_outer", + ) + + # Coalesce columns from both dataframes in case a request does not have either assessment + return deduped_assessment_log_df.select( + F.coalesce(F.col(f"a.{_REQUEST_ID}"), F.col(f"b.{_REQUEST_ID}")).alias( + _REQUEST_ID + ), + F.coalesce(F.col(f"a.{_STEP_ID}"), F.col(f"b.{_STEP_ID}")).alias(_STEP_ID), + F.coalesce(F.col(f"a.{_TIMESTAMP}"), F.col(f"b.{_TIMESTAMP}")).alias( + _TIMESTAMP + ), + F.coalesce(F.col(f"a.{_SOURCE}"), F.col(f"b.{_SOURCE}")).alias(_SOURCE), + F.col(f"a.{_TEXT_ASSESSMENT}").alias(_TEXT_ASSESSMENT), + F.col("b.retrieval_assessments").alias(_RETRIEVAL_ASSESSMENT), + # F.col("schema_version") + ) + + ## Attach ground truth + + +def attach_ground_truth(request_log_df, deduped_assessment_log_df): + suggested_output_col = F.col(f"{_TEXT_ASSESSMENT}.suggested_output") + is_correct_col = F.col(f"{_TEXT_ASSESSMENT}.ratings.answer_correct.value") + # Extract out the thumbs up/down rating and the suggested output + rating_log_df = ( + deduped_assessment_log_df.withColumn("is_correct", is_correct_col) + .withColumn( + "suggested_output", + F.when(suggested_output_col == "", None).otherwise(suggested_output_col), + ) + .withColumn("source_user", F.col("source.id")) + .select( + "request_id", + "is_correct", + "suggested_output", + "source_user", + _RETRIEVAL_ASSESSMENT, + ) + ) + # Join the request log with the ratings from above + raw_requests_with_feedback_df = request_log_df.join( + rating_log_df, + request_log_df.databricks_request_id == rating_log_df.request_id, + "left", + ) + + raw_requests_with_feedback_df = raw_requests_with_feedback_df.drop("request_id") + return raw_requests_with_feedback_df + +_EXPECTED_RETRIEVAL_CONTEXT_SCHEMA = T.ArrayType( + T.StructType( + [ + T.StructField("doc_uri", T.StringType()), + T.StructField("content", T.StringType()), + ] + ) +) + + +def extract_retrieved_chunks_from_trace(trace_str: str) -> List[Mapping[str, str]]: + """Helper to extract the retrieved chunks from a trace string""" + trace = mlflow_entities.Trace.from_json(trace_str) + chunks = traces.extract_retrieval_context_from_trace(trace) + return [{"doc_uri": chunk.doc_uri, "content": chunk.content} for chunk in chunks] + + +@F.udf(_EXPECTED_RETRIEVAL_CONTEXT_SCHEMA) +def construct_expected_retrieval_context( + trace_str: Optional[str], chunk_at_i_relevance: Optional[List[str]] +) -> Optional[List[Mapping[str, str]]]: + """Helper to construct the expected retrieval context. Any retrieved chunks that are not relevant are dropped.""" + if chunk_at_i_relevance is None or trace_str is None: + return None + retrieved_chunks = extract_retrieved_chunks_from_trace(trace_str) + expected_retrieval_context = [ + chunk + for chunk, rating in zip(retrieved_chunks, chunk_at_i_relevance) + if rating == "true" + ] + return expected_retrieval_context if len(expected_retrieval_context) else None + + +# ================================= + + +def identify_potential_eval_set_records(raw_requests_with_feedback_df): + # For thumbs up, use either the suggested output or the response, in that order + positive_feedback_df = ( + raw_requests_with_feedback_df.where(F.col("is_correct") == F.lit("positive")) + .withColumn( + "expected_response", + F.when( + F.col("suggested_output") != None, F.col("suggested_output") + ).otherwise(F.col("response")), + ) + .withColumn("source_tag", F.lit("thumbs_up")) + ) + + # For thumbs down, use the suggested output if there is one + negative_feedback_df = ( + raw_requests_with_feedback_df.where(F.col("is_correct") == F.lit("negative")) + .withColumn("expected_response", F.col("suggested_output")) + .withColumn("source_tag", F.lit("thumbs_down_edited")) + ) + + # For no feedback or IDK, there is no expected response. + no_or_unknown_feedback_df = ( + raw_requests_with_feedback_df.where( + (F.col("is_correct").isNull()) + | ( + (F.col("is_correct") != F.lit("negative")) + & (F.col("is_correct") != F.lit("positive")) + ) + ) + .withColumn("expected_response", F.lit(None)) + .withColumn("source_tag", F.lit("no_feedback_provided")) + ) + # Join the above feedback tables and select the relevant columns for the eval harness + requests_with_feedback_df = positive_feedback_df.unionByName( + negative_feedback_df + ).unionByName(no_or_unknown_feedback_df) + # Get the thumbs up/down for each retrieved chunk + requests_with_feedback_df = requests_with_feedback_df.withColumn( + "chunk_at_i_relevance", + F.transform( + F.col(_RETRIEVAL_ASSESSMENT), lambda x: x.ratings.answer_correct.value + ), + ).drop(_RETRIEVAL_ASSESSMENT) + + requests_with_feedback_df = requests_with_feedback_df.withColumnRenamed( + "databricks_request_id", "request_id" + ) + + # Add the expected retrieved context column + requests_with_feedback_df = requests_with_feedback_df.withColumn( + "expected_retrieved_context", + construct_expected_retrieval_context( + F.col("trace"), F.col("chunk_at_i_relevance") + ), + ) + return requests_with_feedback_df + +def create_potential_evaluation_set(request_log_df, assessment_log_df): + raw_requests_with_feedback_df = attach_ground_truth( + request_log_df, assessment_log_df + ) + requests_with_feedback_df = identify_potential_eval_set_records( + raw_requests_with_feedback_df + ) + return requests_with_feedback_df \ No newline at end of file diff --git a/openai_sdk_agent_app_sample_code/cookbook/databricks_utils/agent_framework/__init__.py b/openai_sdk_agent_app_sample_code/cookbook/databricks_utils/agent_framework/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/openai_sdk_agent_app_sample_code/cookbook/databricks_utils/agent_framework/get_inference_tables.py b/openai_sdk_agent_app_sample_code/cookbook/databricks_utils/agent_framework/get_inference_tables.py new file mode 100644 index 0000000..1d1183c --- /dev/null +++ b/openai_sdk_agent_app_sample_code/cookbook/databricks_utils/agent_framework/get_inference_tables.py @@ -0,0 +1,35 @@ +from databricks.sdk import WorkspaceClient +from databricks import agents + +def get_inference_tables(uc_model_fqn): + w = WorkspaceClient() + + deployment = agents.get_deployments(uc_model_fqn) + if len(deployment) == 0: + raise ValueError(f"No deployments found for model {uc_model_fqn}") + endpoint = w.serving_endpoints.get(deployment[0].endpoint_name) + + + try: + endpoint_config = endpoint.config.auto_capture_config + except AttributeError as e: + endpoint_config = endpoint.pending_config.auto_capture_config + + inference_table_name = endpoint_config.state.payload_table.name + inference_table_catalog = endpoint_config.catalog_name + inference_table_schema = endpoint_config.schema_name + + # Cleanly formatted tables + assessment_log_table_name = f"{inference_table_name}_assessment_logs" + request_log_table_name = f"{inference_table_name}_request_logs" + + return { + 'uc_catalog_name': inference_table_catalog, + 'uc_schema_name': inference_table_schema, + 'table_names': { + 'raw_payload_logs': inference_table_name, + 'assessment_logs': assessment_log_table_name, + 'request_logs': request_log_table_name, + } + + } diff --git a/openai_sdk_agent_app_sample_code/cookbook/databricks_utils/install_cluster_library.py b/openai_sdk_agent_app_sample_code/cookbook/databricks_utils/install_cluster_library.py new file mode 100644 index 0000000..e7a0074 --- /dev/null +++ b/openai_sdk_agent_app_sample_code/cookbook/databricks_utils/install_cluster_library.py @@ -0,0 +1,107 @@ +from typing import List + +from databricks.sdk import WorkspaceClient +from databricks.sdk.service.compute import ( + Library, + LibraryFullStatus, + LibraryInstallStatus, + PythonPyPiLibrary, +) +import time + + +def parse_requirements(requirements_path: str) -> List[str]: + """Parse requirements.txt file and return list of package specifications.""" + packages = [] + with open(requirements_path) as f: + for line in f: + line = line.strip() + if line and not line.startswith("#"): + packages.append(line) + return packages + + +def wait_for_library_installation( + w: WorkspaceClient, cluster_id: str, timeout_minutes: int = 20 +): + """Wait for all libraries to be installed or fail.""" + start_time = time.time() + timeout_seconds = timeout_minutes * 60 + final_states = { + LibraryInstallStatus.INSTALLED, + LibraryInstallStatus.FAILED, + LibraryInstallStatus.SKIPPED, + } + + while True: + if time.time() - start_time > timeout_seconds: + print( + f"Timeout after {timeout_minutes} minutes waiting for library installation" + ) + break + + status: List[LibraryFullStatus] = w.libraries.cluster_status(cluster_id) + all_finished = True + + for lib in status: + if lib.status not in final_states: + all_finished = False + break + + if all_finished: + break + + print("Installation in progress, waiting 15 seconds...") + time.sleep(15) # Check every 15 seconds + + # Print final status + status = w.libraries.cluster_status(cluster_id) + for lib in status: + if lib.library.pypi: + status_msg = ( + f"Package: {lib.library.pypi.package} - Status: {lib.status.value}" + ) + if lib.messages: + status_msg += f" - Messages: {', '.join(lib.messages)}" + print(status_msg) + + +def install_requirements(cluster_id: str, requirements_path: str): + """Install all packages from requirements.txt into specified cluster.""" + # Initialize workspace client + w = WorkspaceClient() + + # Parse requirements file + packages = parse_requirements(requirements_path) + + # Get current library status + current_status = w.libraries.cluster_status(cluster_id) + existing_packages = { + lib.library.pypi.package: lib.status.value + for lib in current_status + if lib.library.pypi + } + + # Filter out already installed packages + libraries = [] + for package in packages: + if ( + package not in existing_packages + or existing_packages[package] != LibraryInstallStatus.INSTALLED.value + ): + libraries.append(Library(pypi=PythonPyPiLibrary(package=package))) + else: + print(f"Package {package} is already installed, skipping...") + + if not libraries: + print("All packages are already installed.") + return + + # Install libraries + package_names = [lib.pypi.package for lib in libraries] + print(f"Installing {len(libraries)} packages: {', '.join(package_names)}") + w.libraries.install(cluster_id, libraries=libraries) + + # Wait for installation to complete + print("Waiting for installation to complete...") + wait_for_library_installation(w, cluster_id) diff --git a/openai_sdk_agent_app_sample_code/cookbook/tools/__init__.py b/openai_sdk_agent_app_sample_code/cookbook/tools/__init__.py new file mode 100644 index 0000000..6fc89bd --- /dev/null +++ b/openai_sdk_agent_app_sample_code/cookbook/tools/__init__.py @@ -0,0 +1,45 @@ +from cookbook.config import SerializableConfig +from mlflow.models.resources import DatabricksResource + + +from typing import Any, List + + +class Tool(SerializableConfig): + """Base class for all tools""" + + def __call__(self, **kwargs) -> Any: + """Execute the tool with validated inputs""" + raise NotImplementedError( + "__call__ must be implemented by Tool subclasses. This method should execute " + "the tool's functionality with the provided validated inputs and return the result." + ) + + name: str + description: str + + def get_json_schema(self) -> dict: + """Returns an OpenAPI-compatible JSON schema for the tool.""" + return { + "type": "function", + "function": { + "name": self.name, + "description": self.description, + "parameters": self._get_parameters_schema(), + }, + } + + def _get_parameters_schema(self) -> dict: + """Returns the JSON schema for the tool's parameters.""" + raise NotImplementedError( + "_get_parameters_schema must be implemented by Tool subclasses. This method should " + "return an OpenAPI-compatible JSON schema dict describing the tool's input parameters. " + "The schema should include parameter names, types, descriptions, and any validation rules." + ) + + def get_resource_dependencies(self) -> List[DatabricksResource]: + """Returns a list of Databricks resources (mlflow.models.resources.* objects) that the tool uses. Used to securely provision credentials for these resources when the tool is deployed to Model Serving.""" + raise NotImplementedError( + "get_resource_dependencies must be implemented by Tool subclasses. This method should " + "return a list of mlflow.models.resources.* objects that the tool depends on." + ) diff --git a/openai_sdk_agent_app_sample_code/cookbook/tools/local_function.py b/openai_sdk_agent_app_sample_code/cookbook/tools/local_function.py new file mode 100644 index 0000000..afbc719 --- /dev/null +++ b/openai_sdk_agent_app_sample_code/cookbook/tools/local_function.py @@ -0,0 +1,165 @@ +from cookbook.tools import Tool + +from mlflow.models.resources import DatabricksResource +from pydantic import BaseModel, Field, create_model +from unitycatalog.ai.core.utils.docstring_utils import parse_docstring +from typing import Optional + +import inspect +from typing import Any, Callable, List, Type, get_type_hints +import importlib +import mlflow + + +class LocalFunctionTool(Tool): + """Tool implementation that wraps a function""" + + # func: Callable + func_path: str + name: str + description: str + _input_schema: Type[BaseModel] + + def _process_function( + self, func: Callable, name: Optional[str], description: Optional[str] + ) -> tuple[str, str, Type[BaseModel]]: + """Process a function to extract name, description and input schema. + + Args: + func: The function to process + name: Optional override for the function name + description: Optional override for the function description + + Returns: + Tuple of (processed_name, processed_description, processed_input_schema) + """ + processed_name = name or func.__name__ + + # Validate function has type annotations + if not all(get_type_hints(func).values()): + raise ValueError( + f"Tool '{processed_name}' must have complete type annotations for all parameters " + "and return value." + ) + + # Parse the docstring and get description + docstring = inspect.getdoc(func) + if not docstring: + raise ValueError( + f"Tool '{processed_name}' must have a docstring with Google-style formatting." + ) + + doc_info = parse_docstring(docstring) + processed_description = description or doc_info.description + + # Ensure we have parameter documentation + if not doc_info.params: + raise ValueError( + f"Tool '{processed_name}' must have documented parameters in Google-style format. " + "Example:\n Args:\n param_name: description" + ) + + # Validate all parameters are documented + sig_params = set(inspect.signature(func).parameters.keys()) + doc_params = set(doc_info.params.keys()) + if sig_params != doc_params: + missing = sig_params - doc_params + extra = doc_params - sig_params + raise ValueError( + f"Tool '{processed_name}' parameter documentation mismatch. " + f"Missing docs for: {missing if missing else 'none'}. " + f"Extra docs for: {extra if extra else 'none'}." + ) + + # Create the input schema + processed_input_schema = self._create_schema_from_function( + func, doc_info.params + ) + + return processed_name, processed_description, processed_input_schema + + def __init__( + self, + name: Optional[str] = None, + description: Optional[str] = None, + *, + func: Optional[Callable] = None, + func_path: Optional[str] = None, + ): + if func is not None and func_path is not None: + raise ValueError("Only one of func or func_path can be provided") + + if func is not None: + # Process the function to get name, description and input schema + processed_name, processed_description, processed_input_schema = ( + self._process_function(func, name, description) + ) + + # Serialize the function's location + func_path = f"{func.__module__}.{func.__name__}" + + # Now call parent class constructor with processed values + super().__init__( + func_path=func_path, + name=processed_name, + description=processed_description, + ) + + self._input_schema = processed_input_schema + + self._loaded_callable = None + self.load_func() + elif func_path is not None: + + super().__init__( + func_path=func_path, + name=name, + description=description, + # _input_schema=None, + ) + + self._loaded_callable = None + self.load_func() + + _, _, processed_input_schema = self._process_function( + self._loaded_callable, name, description + ) + + self._input_schema = processed_input_schema + + @staticmethod + def _create_schema_from_function( + func: Callable, param_descriptions: dict[str, str] + ) -> Type[BaseModel]: + """Creates a Pydantic model from function signature and parsed docstring""" + sig = inspect.signature(func) + type_hints = get_type_hints(func) + + fields = {} + for name, param in sig.parameters.items(): + fields[name] = ( + type_hints.get(name, Any), + Field(description=param_descriptions.get(name, f"Parameter: {name}")), + ) + + return create_model(f"{func.__name__.title()}Inputs", **fields) + + def load_func(self): + if self._loaded_callable is None: + module_name, func_name = self.func_path.rsplit(".", 1) + module = importlib.import_module(module_name) + self._loaded_callable = getattr(module, func_name) + + @mlflow.trace(span_type="TOOL", name="local_function") + def __call__(self, **kwargs) -> Any: + """Execute the tool's function with validated inputs""" + self.load_func() + validated_inputs = self._input_schema(**kwargs) + return self._loaded_callable(**validated_inputs.model_dump()) + + def _get_parameters_schema(self) -> dict: + """Returns the JSON schema for the tool's parameters.""" + return self._input_schema.model_json_schema() + + def get_resource_dependencies(self) -> List[DatabricksResource]: + return [] diff --git a/openai_sdk_agent_app_sample_code/cookbook/tools/uc_tool.py b/openai_sdk_agent_app_sample_code/cookbook/tools/uc_tool.py new file mode 100644 index 0000000..aaba7d3 --- /dev/null +++ b/openai_sdk_agent_app_sample_code/cookbook/tools/uc_tool.py @@ -0,0 +1,172 @@ +from cookbook.tools import Tool +from cookbook.databricks_utils import get_function_url + + +from cookbook.tools.uc_tool_utils import ( + _parse_SparkException_from_tool_execution, + _parse_ParseException_from_tool_execution, +) +import mlflow +from databricks.sdk import WorkspaceClient +from databricks.sdk.errors import ResourceDoesNotExist +from mlflow.models.resources import DatabricksFunction, DatabricksResource +from pydantic import Field, model_validator +from pyspark.errors import SparkRuntimeException +from pyspark.errors.exceptions.connect import ParseException +from unitycatalog.ai.core.databricks import DatabricksFunctionClient +from unitycatalog.ai.openai.toolkit import UCFunctionToolkit +from dataclasses import asdict + +import json +from typing import Any, Dict, List, Union + +ERROR_INSTRUCTIONS_KEY = "error_instructions" +ERROR_STATUS_KEY = "error" + + +class UCTool(Tool): + """Configuration for a Unity Catalog function tool. + + This class defines the configuration for a Unity Catalog function that can be used + as a tool in an agent system. + + Args: + uc_function_name: Unity Catalog location of the function in format: catalog.schema.function_name. + Example: my_catalog.my_schema.my_function + + Returns: + UCTool: A configured Unity Catalog function tool object. + """ + + uc_function_name: str + """Unity Catalog location of the function in format: catalog.schema.function_name.""" + + error_prompt: str = ( + f"""The tool call generated an Exception, detailed in `{ERROR_STATUS_KEY}`. Think step-by-step following these instructions to determine your next step.\n""" + "[1] Is the error due to a problem with the input parameters?\n" + "[2] Could it succeed if retried with exactly the same inputs?\n" + "[3] Could it succeed if retried with modified parameters using the input we already have from the user?\n" + "[4] Could it succeed if retried with modified parameters informed by collecting additional input from the user? What specific input would we need from the user?\n" + """Based on your thinking, if the error is due to a problem with the input parameters, either call this tool again in a way that avoids this exception or collect additional information from the user to modify the inputs to avoid this exception.""" + ) + + # Optional b/c we set these automatically in model_post_init from the UC function itself. + # Suggest not overriding these, but rather updating the UC function's metadata directly. + name: str = Field(default=None) # Make it optional in the constructor + description: str = Field(default=None) # Make it optional in the constructor + + @model_validator(mode="after") + def validate_uc_function_name(self) -> "UCTool": + """Validates that the UC function exists and is accessible. + + Checks that the function name is properly formatted and exists in Unity Catalog + with proper permissions. + + Returns: + UCTool: The validated tool instance. + + Raises: + ValueError: If function name is invalid or function is not accessible. + """ + parts = self.uc_function_name.split(".") + if len(parts) != 3: + raise ValueError( + f"uc_function_name must be in format: catalog.schema.function_name; got `{self.uc_function_name}`" + ) + + # Validate that the function exists in Unity Catalog & user has EXECUTE permission on the function + # Docs: https://databricks-sdk-py.readthedocs.io/en/stable/workspace/catalog/functions.html#get + w = WorkspaceClient() + try: + w.functions.get(name=self.uc_function_name) + except ResourceDoesNotExist: + raise ValueError( + f"Function `{self.uc_function_name}` not found in Unity Catalog or you do not have permission to access it. Ensure the function exists, and you have EXECUTE permission on the function, USE CATALOG and USE SCHEMA permissions on the catalog and schema. If function exists, you can verify permissions here: {get_function_url(self.uc_function_name)}." + ) + + return self + + def model_post_init(self, __context: Any) -> None: + + # Initialize the UC clients + self._uc_client = DatabricksFunctionClient() + self._toolkit = UCFunctionToolkit( + function_names=[self.uc_function_name], client=self._uc_client + ) + + # OK to use [0] position b/c we know that there is only one function initialized in the toolkit. + self.name = self._toolkit.tools[0]["function"]["name"] + self.description = self._toolkit.tools[0]["function"]["description"] + + def _get_parameters_schema(self) -> dict: + """Gets the parameter schema for the UC function. + + Returns: + dict: JSON schema describing the function's parameters. + """ + # OK to use [0] position b/c we know that there is only one function initialized in the toolkit. + return self._toolkit.tools[0]["function"]["parameters"] + + @mlflow.trace(span_type="TOOL", name="uc_tool") + def __call__(self, **kwargs) -> Dict[str, str]: + # annotate the span with the tool name + span = mlflow.get_current_active_span() + if span: # TODO: Hack, when mlflow tracing is disabled, span == None. + span.set_attributes({"uc_tool_name": self.uc_function_name}) + + # trace the function call + traced_exec_function = mlflow.trace( + span_type="FUNCTION", name="_uc_client.execute_function" + )(self._uc_client.execute_function) + + # convert input args to json + args_json = json.loads(json.dumps(kwargs, default=str)) + + # TODO: Add in Ben's code parser + + # Try to execute the function & return its value as a dict + try: + result = traced_exec_function( + function_name=self.uc_function_name, parameters=args_json + ) + return asdict(result) + + # Parse the error into a format that's easier for the LLM to understand w/ out any of the Spark runtime error noise + except SparkRuntimeException as tool_exception: + return { + ERROR_STATUS_KEY: _parse_SparkException_from_tool_execution( + tool_exception + ), + ERROR_INSTRUCTIONS_KEY: self.error_prompt, + } + except ParseException as tool_exception: + return { + ERROR_STATUS_KEY: _parse_ParseException_from_tool_execution( + tool_exception + ), + ERROR_INSTRUCTIONS_KEY: self.error_prompt, + } + except Exception as tool_exception: + # some other type of error that is unknown, parse into the same format as the Spark exceptions + # will first try to parse using the SparkException parsing code, if that fails, will then try the generic one + return { + ERROR_STATUS_KEY: _parse_SparkException_from_tool_execution( + tool_exception + ), + ERROR_INSTRUCTIONS_KEY: self.error_prompt, + } + + def model_dump(self, **kwargs) -> Dict[str, Any]: + """Override model_dump to exclude name and description fields. + + Returns: + Dict[str, Any]: Dictionary representation of the model excluding name and description. + """ + kwargs["exclude"] = {"name", "description"}.union(kwargs.get("exclude", set())) + return super().model_dump(**kwargs) + + def get_resource_dependencies(self) -> List[DatabricksResource]: + return [DatabricksFunction(function_name=self.uc_function_name)] + + def _remove_udfbody_from_stack_trace(self, stack_trace: str) -> str: + return stack_trace.replace('File "",', "").strip() diff --git a/openai_sdk_agent_app_sample_code/cookbook/tools/uc_tool_utils.py b/openai_sdk_agent_app_sample_code/cookbook/tools/uc_tool_utils.py new file mode 100644 index 0000000..c4f7825 --- /dev/null +++ b/openai_sdk_agent_app_sample_code/cookbook/tools/uc_tool_utils.py @@ -0,0 +1,132 @@ +import mlflow +from pyspark.errors import SparkRuntimeException +from pyspark.errors.exceptions.connect import ParseException +import re + +import logging +from typing import Dict, Union + +ERROR_KEY = "error_message" +STACK_TRACE_KEY = "stack_trace" + + +@mlflow.trace(span_type="PARSER") +def _remove_udfbody_from_pyspark_stack_trace(stack_trace: str) -> str: + return stack_trace.replace('File "",', "").strip() + + +@mlflow.trace(span_type="PARSER") +def _parse_PySpark_exception_dumped_as_string(error_msg: str) -> Dict[str, str]: + # Extract error section between == Error == and == Stacktrace == + error = error_msg.split("== Error ==")[1].split("== Stacktrace ==")[0].strip() + + # Extract stacktrace section after == Stacktrace == and before SQL + stack_trace = error_msg.split("== Stacktrace ==")[1].split("== SQL")[0].strip() + + # Remove SQLSTATE and anything after it from the stack trace + if "SQLSTATE" in stack_trace: + stack_trace = stack_trace.split("SQLSTATE")[0].strip() + + return { + STACK_TRACE_KEY: _remove_udfbody_from_pyspark_stack_trace(stack_trace), + ERROR_KEY: error, + } + + +@mlflow.trace(span_type="PARSER") +def _parse_PySpark_exception_from_known_structure( + tool_exception: SparkRuntimeException, +) -> Dict[str, str]: + raw_stack_trace = tool_exception.getMessageParameters()["stack"] + return { + STACK_TRACE_KEY: _remove_udfbody_from_pyspark_stack_trace(raw_stack_trace), + ERROR_KEY: tool_exception.getMessageParameters()["error"], + } + + +@mlflow.trace(span_type="PARSER") +def _parse_generic_tool_exception(tool_exception: Exception) -> Dict[str, str]: + return { + STACK_TRACE_KEY: None, + ERROR_KEY: str(tool_exception), + } + + +@mlflow.trace(span_type="PARSER") +def _parse_SparkException_from_tool_execution( + tool_exception: Union[SparkRuntimeException, Exception], +) -> Dict[str, str]: + error_info_to_return: Union[Dict, str] = None + + # First attempt: first try to parse from the known structure + try: + logging.info( + f"Trying to parse spark exception {tool_exception} using its provided structured data." + ) + # remove the from the stack trace which the LLM knows nothing about + # raw_stack_trace = tool_exception.getMessageParameters()["stack"] + return _parse_PySpark_exception_from_known_structure(tool_exception) + + except Exception as e: + # 2nd attempt: that failed, let's try to parse the SparkException's raw formatting + logging.info( + f"Error parsing spark exception using its provided structured data: {e}, will now try to parse its string output..." + ) + + logging.info( + f"Trying to parse spark exception {tool_exception} using its raw string output." + ) + try: + raw_error_msg = str(tool_exception) + return _parse_PySpark_exception_dumped_as_string(raw_error_msg) + except Exception as e: + # Last attempt: if that fails, just use the raw error + logging.info( + f"Error parsing spark exception using its raw string formatting: {e}, will just return the raw error message." + ) + + logging.info(f"returning the raw error message: {str(tool_exception)}.") + return _parse_generic_tool_exception(tool_exception) + + +# TODO: this might be over fit to python code execution tool, need to test it more +@mlflow.trace(span_type="PARSER") +def _parse_ParseException_from_tool_execution( + tool_exception: ParseException, +) -> Dict[str, str]: + try: + error_msg = tool_exception.getMessage() + # Extract the main error message (remove SQLSTATE and position info) + error = error_msg.split("SQLSTATE:")[0].strip() + if "[PARSE_SYNTAX_ERROR]" in error: + error = error.split("[PARSE_SYNTAX_ERROR]")[1].strip() + + # Pattern to match "line X, pos Y" + pattern = r"line (\d+), pos (\d+)" + match = re.search(pattern, error_msg) + + if match: + line_num = match.group(1) + pos_num = match.group(2) + line_info = f"(line {line_num}, pos {pos_num})" + error = error + " " + line_info + + # Extract the SQL section with the error pointer + sql_section = ( + error_msg.split("== SQL ==")[1].split("JVM stacktrace:")[0].strip() + if "== SQL ==" in error_msg + else "" + ) + + # Remove the SELECT statement from the error message + select_pattern = r"SELECT\s+`[^`]+`\.`[^`]+`\.`[^`]+`\('" + # error_without_sql_parts = sql_section.replace(select_pattern, "").strip() + error_without_sql_parts = re.sub(select_pattern, "", sql_section).strip() + + return {STACK_TRACE_KEY: error_without_sql_parts, ERROR_KEY: error} + except Exception as e: + logging.info(f"Error parsing ParseException: {e}") + return { + STACK_TRACE_KEY: None, + ERROR_KEY: str(tool_exception), + } diff --git a/openai_sdk_agent_app_sample_code/cookbook/tools/vector_search.py b/openai_sdk_agent_app_sample_code/cookbook/tools/vector_search.py new file mode 100644 index 0000000..d2c82c6 --- /dev/null +++ b/openai_sdk_agent_app_sample_code/cookbook/tools/vector_search.py @@ -0,0 +1,455 @@ +import mlflow +from mlflow.entities import Document +from mlflow.models.resources import ( + DatabricksVectorSearchIndex, + DatabricksServingEndpoint, + DatabricksResource, +) + +import json +from typing import Literal, Any, Dict, List, Union +from pydantic import BaseModel, model_validator +from databricks.sdk import WorkspaceClient +from databricks.sdk.service.vectorsearch import VectorIndexType +from databricks.sdk.errors import ResourceDoesNotExist +from cookbook.tools import Tool +from dataclasses import asdict + +FilterDict = Dict[str, Union[str, int, float, List[Union[str, int, float]]]] + +# Change this to True to use the source table's metadata for the filterable columns. +# This causes deployment to fail since the deployed model doesn't have access to the source table. +USE_SOURCE_TABLE_FOR_FILTERS_METADATA = False + + +class VectorSearchSchema(BaseModel): + """Configuration for the schema used in the retriever's response. + + This class defines the schema configuration for how the vector search retriever + structures and returns results. + + Args: + primary_key: The column name in the retriever's response referred to the unique key. + If using Databricks vector search with delta sync, this should be the column + of the delta table that acts as the primary key. + chunk_text: The column name in the retriever's response that contains the + returned chunk. + document_uri: The template of the chunk returned by the retriever - used to format + the chunk for presentation to the LLM & to display chunk's from the same + document_uri together in Agent Evaluation Review App. + additional_metadata_columns: Additional metadata columns to present to the LLM. + filterable_columns: List of columns that can be used as filters by the LLM. + + Returns: + VectorSearchSchema: A configured schema object for the vector search retriever. + """ + + _primary_key: str | None = None + """The column name in the retriever's response referred to the unique key. + If using Databricks vector search with delta sync, this should be the column + of the delta table that acts as the primary key, and will be set by reading the index's metadata.""" + + chunk_text: str + """The column name in the retriever's response that contains the returned chunk.""" + + document_uri: str + """The template of the chunk returned by the retriever - used to format + the chunk for presentation to the LLM & to display chunk's from the same + document_uri together in Agent Evaluation Review App.""" + + additional_metadata_columns: List[str] = [] + """Additional metadata columns to present to the LLM.""" + + @property + def all_columns(self) -> List[str]: + cols = [ + self.primary_key, + self.chunk_text, + self.document_uri, + ] + self.additional_metadata_columns + # de-duplicate + return list(set(cols)) + + @property + def primary_key(self) -> str: + """The primary key field, which must be set by VectorSearchRetrieverConfig""" + if self._primary_key is None: + raise ValueError("primary_key must be set by VectorSearchRetrieverConfig") + return self._primary_key + + +class VectorSearchParameters(BaseModel): + """Configuration for the input schema (parameters) used in the retriever. + + This class defines the configuration parameters for how the vector search retriever + performs searches and returns results. + + Args: + num_results: The number of chunks to return for each query. For example, + setting this to 5 will return the top 5 most relevant search results. + query_type: The type of search to use - either 'ann' for semantic similarity + using embeddings only, or 'hybrid' which combines keyword and semantic + similarity search. + + Returns: + VectorSearchParameters: A configured parameters object for the vector search retriever. + """ + + num_results: int = 5 + """The number of chunks to return for each query.""" + + query_type: Literal["ann", "hybrid"] = "ann" + """The type of search to use - either 'ann' for semantic similarity using embeddings only, + or 'hybrid' which combines keyword and semantic similarity search.""" + + +class VectorSearchRetrieverTool(Tool): + """Configuration for a Databricks Vector Search retriever. + + This class defines the configuration for a Vector Search retriever that can be used + either deterministically in a fixed RAG chain or as a tool. + + Args: + vector_search_index: Unity Catalog location of the Vector Search index. + Example: catalog.schema.vector_index. + vector_search_schema: Schema configuration for the retriever. + doc_similarity_threshold: Threshold (0-1) for the retrieved document's similarity score. Used + to exclude dissimilar results. Increase if retriever returns irrelevant content. + vector_search_parameters: Parameters passed to index.similarity_search(...). + See https://docs.databricks.com/en/generative-ai/create-query-vector-search.html#query-a-vector-search-endpoint for details. + retriever_query_parameter_prompt: Description of the query parameter for the retriever. + + Returns: + VectorSearchRetrieverConfig: A configured retriever config object. + """ + + vector_search_index: str + """Unity Catalog location of the Vector Search index. + Example: catalog.schema.vector_index.""" + + filterable_columns: List[str] = [] + """List of columns that can be used as filters by the LLM. Columns will be validated against the source table & metadata about each column loaded from the Unity Catalog to improve the LLM's ability to filter.""" + + vector_search_schema: VectorSearchSchema + """Schema configuration for the retriever.""" + + doc_similarity_threshold: float = 0.0 + """Threshold (0-1) for the retrieved document's similarity score. + Used to exclude dissimilar results. Increase if retriever returns irrelevant content.""" + + vector_search_parameters: VectorSearchParameters = VectorSearchParameters() + """Parameters passed to index.similarity_search(...). + See https://docs.databricks.com/en/generative-ai/create-query-vector-search.html#query-a-vector-search-endpoint for details.""" + + retriever_query_parameter_prompt: str = "query to look up in retriever" + retriever_filter_parameter_prompt: str = ( + "optional filters to apply to the search. An array of objects, each specifying a field name and the filters to apply to that field." + ) + + name: str + description: str + + def __init__(self, **data): + """Initialize the WorkspaceClient and set the MLflow retriever schema.""" + super().__init__(**data) + mlflow.models.set_retriever_schema( + name=self.vector_search_index, + primary_key=self.vector_search_schema.primary_key, + text_column=self.vector_search_schema.chunk_text, + doc_uri=self.vector_search_schema.document_uri, + ) + + def _validate_columns_exist( + self, columns: List[str], source_table: str, table_columns: set, context: str + ) -> None: + """Helper method to validate that columns exist in the source table. + + Args: + columns: List of columns to validate + source_table: Name of the source table + table_columns: Set of available columns in the table + context: Context string for error message (e.g. "filterable columns", "chunk_text") + """ + for col in columns: + if col not in table_columns: + raise ValueError( + f"Column '{col}' specified in {context} not found in source table {source_table}. " + f"Available columns: {', '.join(sorted(table_columns))}" + ) + + def _get_index_info(self): + w = WorkspaceClient() + return w.vector_search_indexes.get_index(self.vector_search_index) + + def _check_if_index_exists(self): + w = WorkspaceClient() + try: + index_info = w.vector_search_indexes.get_index(self.vector_search_index) + return index_info is not None + except ResourceDoesNotExist as e: + return False + + @property + def filterable_columns_descriptions_for_llm(self) -> str: + """Returns a formatted description of all filterable columns for use in prompts.""" + if USE_SOURCE_TABLE_FOR_FILTERS_METADATA: + # Present the LLM with the source table's metadata for the filterable columns. + # TODO: be able to get this data directly from the index's metadata + # Get source table info + index_info = self._get_index_info() + if index_info.index_type != VectorIndexType.DELTA_SYNC: + raise ValueError( + f"Unsupported index type: {index_info.index_type}. Only DELTA_SYNC is supported." + ) + + w = WorkspaceClient() + source_table = index_info.delta_sync_index_spec.source_table + table_info = w.tables.get(source_table) + + # Create mapping of column name to description and type + column_info = { + col.name: (col.type_text, col.comment if col.comment else None) + for col in table_info.columns + } + # print(column_info) + + # Build descriptions list + descriptions = [] + for col in self.filterable_columns: + type_text, desc = column_info.get(col, (None, None)) + formatted_desc = f"(`{col}`, {type_text}" + ( + f", '{desc}'" + ")" if desc else "" + ) + descriptions.append(formatted_desc) + return ", ".join(descriptions) + + else: + # just use the column names as metadata + return ", ".join(str(col) for col in self.filterable_columns) + + @model_validator(mode="after") + def validate_index_and_columns(self): + """Validates the index exists and all columns after the model is fully initialized""" + + # Check that index exists + if not self._check_if_index_exists(): + raise ValueError( + f"Vector search index {self.vector_search_index} does not exist." + ) + + index_info = self._get_index_info() + + # Set primary key from index if not already set + if not self.vector_search_schema._primary_key: + if index_info.primary_key: + self.vector_search_schema._primary_key = index_info.primary_key + else: + raise ValueError( + f"Could not find primary key in index {self.vector_search_index}" + ) + + # TODO: Validate all configured schema columns exist in the index. Currently, this data is not available in the index metadata. + + return self + + @model_validator(mode="after") + def validate_threshold(self): + if not 0 <= self.doc_similarity_threshold <= 1: + raise ValueError("doc_similarity_threshold must be between 0 and 1") + return self + + def _get_parameters_schema(self) -> dict: + schema = { + "type": "object", + "required": ["query"], + "additionalProperties": False, + "properties": { + "query": { + # "default": None, + "description": self.retriever_query_parameter_prompt, + "type": "string", + }, + }, + } + + if self.filterable_columns: + schema["properties"]["filters"] = { + # "default": None, + "description": self.retriever_filter_parameter_prompt, + "type": "array", + "items": { + "type": "object", + "properties": { + "field": { + "type": "string", + "enum": self.filterable_columns, + "description": "The fields to apply the filter to. Can use any of the following as filters, where each is (`field_name`, field_type, 'field_description'): " + + self.filterable_columns_descriptions_for_llm + + "For string fields, only use LIKE filter; for numeric fields, either provide a number to achieve == or use <, <=, >, >= filters; for array fields, either provide an array of 1+ values to achieve IN or use NOT to exclude.", + }, + "filter": { + "anyOf": [ + {"type": "string"}, + {"type": "number"}, + { + "type": "array", + "items": { + "anyOf": [ + {"type": "string"}, + {"type": "number"}, + ] + }, + }, + { + "type": "object", + "properties": { + "<": {"type": "number"}, + "<=": {"type": "number"}, + ">": {"type": "number"}, + ">=": {"type": "number"}, + "LIKE": {"type": "string"}, + "NOT": { + "anyOf": [ + {"type": "string"}, + {"type": "number"}, + ] + }, + }, + "additionalProperties": False, + "minProperties": 1, + "maxProperties": 1, + }, + ] + }, + }, + "required": ["field", "filter"], + "additionalProperties": False, + }, + } + + return schema + + @mlflow.trace(span_type="RETRIEVER", name="vector_search_retriever") + def __call__(self, query: str, filters: Dict[Any, Any] = None) -> List[Document]: + """ + Performs vector search to retrieve relevant chunks. + + Args: + query: Search query. + filters: Optional filters to apply to the search. Should follow the LLM-generated filter pattern of a list of field/filter pairs that will be converted to Databricks Vector Search filter format. + + Returns: + List of retrieved Documents. + """ + span = mlflow.get_current_active_span() + if span: # TODO: Hack, when mlflow tracing is disabled, span == None. + span.set_attributes({"vector_search_index": self.vector_search_index}) + + w = WorkspaceClient() + + traced_search = mlflow.trace( + w.vector_search_indexes.query_index, + name="_workspace_client.vector_search_indexes.query_index", + span_type="FUNCTION", + ) + + # Parse filters written by the LLM into Vector Search compatible format + vs_filters = json.dumps(self.parse_filters(filters)) if filters else None + + results = traced_search( + index_name=self.vector_search_index, + query_text=query, + filters_json=vs_filters, + columns=self.vector_search_schema.all_columns, + **self.vector_search_parameters.model_dump(exclude_none=True), + ) + + # We turn the config into a dict and pass it here + return self.convert_vector_search_to_documents( + results.as_dict(), self.doc_similarity_threshold + ) + + @mlflow.trace(span_type="PARSER") + def convert_vector_search_to_documents( + self, vs_results, vector_search_threshold + ) -> List[Document]: + column_names = [] + for column in vs_results["manifest"]["columns"]: + column_names.append(column) + + docs = [] + if vs_results["result"]["row_count"] > 0: + for item in vs_results["result"]["data_array"]: + metadata = {} + score = item[-1] + if score >= vector_search_threshold: + metadata["similarity_score"] = score + for i, field in enumerate(item[0:-1]): + metadata[column_names[i]["name"]] = field + # put contents of the chunk into page_content + page_content = metadata[self.vector_search_schema.chunk_text] + del metadata[self.vector_search_schema.chunk_text] + + # put the primary key into id + id = metadata[self.vector_search_schema.primary_key] + del metadata[self.vector_search_schema.primary_key] + + doc = Document(page_content=page_content, metadata=metadata, id=id) + docs.append(asdict(doc)) + + return docs + + @mlflow.trace(span_type="PARSER") + def parse_filters(self, filters: List[Dict[str, Any]]) -> Dict[str, Any]: + """ + Parse input filters into Vector Search compatible format. + + Args: + filters: List of input filters in the new format. + + Returns: + Filters in Vector Search compatible format. + """ + vs_filters = {} + for filter_item in filters: + suggested_field = filter_item["field"] + suggested_filter = filter_item["filter"] + + if isinstance(suggested_filter, list): + # vs_filters[key] = {"OR": value} + vs_filters[suggested_field] = suggested_filter + elif isinstance(suggested_filter, dict): + operator, operand = next(iter(suggested_filter.items())) + vs_filters[suggested_field + " " + operator] = operand + # if operator in ["<", "<=", ">", ">="]: + # vs_filters[f"{key} {operator}"] = operand + # elif operator.upper() == "LIKE": + # vs_filters[f"{key} LIKE"] = operand + # elif operator.upper() == "NOT": + # vs_filters[f"{key} !="] = operand + else: + vs_filters[suggested_field] = suggested_filter + return vs_filters + + def get_resource_dependencies(self) -> List[DatabricksResource]: + dependencies = [ + DatabricksVectorSearchIndex(index_name=self.vector_search_index) + ] + + # Get the embedding model endpoint + index_info = self._get_index_info() + if index_info.index_type == VectorIndexType.DELTA_SYNC: + # Only DELTA_SYNC indexes have embedding model endpoints + for ( + embedding_source_col + ) in index_info.delta_sync_index_spec.embedding_source_columns: + endpoint_name = embedding_source_col.embedding_model_endpoint_name + if endpoint_name is not None: + dependencies.append( + DatabricksServingEndpoint(endpoint_name=endpoint_name), + ) + else: + print( + f"Could not identify the embedding model endpoint resource for {self.vector_search_index}. Please manually add the embedding model endpoint to `databricks_resources`." + ) + return dependencies diff --git a/openai_sdk_agent_app_sample_code/environment.yaml b/openai_sdk_agent_app_sample_code/environment.yaml new file mode 100644 index 0000000..76883b2 --- /dev/null +++ b/openai_sdk_agent_app_sample_code/environment.yaml @@ -0,0 +1,4 @@ +client: "1" +dependencies: + - --index-url https://pypi.org/simple + - -r requirements.txt \ No newline at end of file diff --git a/openai_sdk_agent_app_sample_code/poetry.lock b/openai_sdk_agent_app_sample_code/poetry.lock new file mode 100644 index 0000000..6b4240b --- /dev/null +++ b/openai_sdk_agent_app_sample_code/poetry.lock @@ -0,0 +1,4969 @@ +# This file is automatically @generated by Poetry 1.8.4 and should not be changed by hand. + +[[package]] +name = "alembic" +version = "1.14.0" +description = "A database migration tool for SQLAlchemy." +optional = false +python-versions = ">=3.8" +files = [ + {file = "alembic-1.14.0-py3-none-any.whl", hash = "sha256:99bd884ca390466db5e27ffccff1d179ec5c05c965cfefc0607e69f9e411cb25"}, + {file = "alembic-1.14.0.tar.gz", hash = "sha256:b00892b53b3642d0b8dbedba234dbf1924b69be83a9a769d5a624b01094e304b"}, +] + +[package.dependencies] +Mako = "*" +SQLAlchemy = ">=1.3.0" +typing-extensions = ">=4" + +[package.extras] +tz = ["backports.zoneinfo"] + +[[package]] +name = "annotated-types" +version = "0.7.0" +description = "Reusable constraint types to use with typing.Annotated" +optional = false +python-versions = ">=3.8" +files = [ + {file = "annotated_types-0.7.0-py3-none-any.whl", hash = "sha256:1f02e8b43a8fbbc3f3e0d4f0f4bfc8131bcb4eebe8849b8e5c773f3a1c582a53"}, + {file = "annotated_types-0.7.0.tar.gz", hash = "sha256:aff07c09a53a08bc8cfccb9c85b05f1aa9a2a6f23728d790723543408344ce89"}, +] + +[[package]] +name = "anyio" +version = "4.6.2.post1" +description = "High level compatibility layer for multiple asynchronous event loop implementations" +optional = false +python-versions = ">=3.9" +files = [ + {file = "anyio-4.6.2.post1-py3-none-any.whl", hash = "sha256:6d170c36fba3bdd840c73d3868c1e777e33676a69c3a72cf0a0d5d6d8009b61d"}, + {file = "anyio-4.6.2.post1.tar.gz", hash = "sha256:4c8bc31ccdb51c7f7bd251f51c609e038d63e34219b44aa86e47576389880b4c"}, +] + +[package.dependencies] +idna = ">=2.8" +sniffio = ">=1.1" + +[package.extras] +doc = ["Sphinx (>=7.4,<8.0)", "packaging", "sphinx-autodoc-typehints (>=1.2.0)", "sphinx-rtd-theme"] +test = ["anyio[trio]", "coverage[toml] (>=7)", "exceptiongroup (>=1.2.0)", "hypothesis (>=4.0)", "psutil (>=5.9)", "pytest (>=7.0)", "pytest-mock (>=3.6.1)", "trustme", "truststore (>=0.9.1)", "uvloop (>=0.21.0b1)"] +trio = ["trio (>=0.26.1)"] + +[[package]] +name = "appnope" +version = "0.1.4" +description = "Disable App Nap on macOS >= 10.9" +optional = false +python-versions = ">=3.6" +files = [ + {file = "appnope-0.1.4-py2.py3-none-any.whl", hash = "sha256:502575ee11cd7a28c0205f379b525beefebab9d161b7c964670864014ed7213c"}, + {file = "appnope-0.1.4.tar.gz", hash = "sha256:1de3860566df9caf38f01f86f65e0e13e379af54f9e4bee1e66b48f2efffd1ee"}, +] + +[[package]] +name = "asttokens" +version = "2.4.1" +description = "Annotate AST trees with source code positions" +optional = false +python-versions = "*" +files = [ + {file = "asttokens-2.4.1-py2.py3-none-any.whl", hash = "sha256:051ed49c3dcae8913ea7cd08e46a606dba30b79993209636c4875bc1d637bc24"}, + {file = "asttokens-2.4.1.tar.gz", hash = "sha256:b03869718ba9a6eb027e134bfdf69f38a236d681c83c160d510768af11254ba0"}, +] + +[package.dependencies] +six = ">=1.12.0" + +[package.extras] +astroid = ["astroid (>=1,<2)", "astroid (>=2,<4)"] +test = ["astroid (>=1,<2)", "astroid (>=2,<4)", "pytest"] + +[[package]] +name = "azure-core" +version = "1.32.0" +description = "Microsoft Azure Core Library for Python" +optional = false +python-versions = ">=3.8" +files = [ + {file = "azure_core-1.32.0-py3-none-any.whl", hash = "sha256:eac191a0efb23bfa83fddf321b27b122b4ec847befa3091fa736a5c32c50d7b4"}, + {file = "azure_core-1.32.0.tar.gz", hash = "sha256:22b3c35d6b2dae14990f6c1be2912bf23ffe50b220e708a28ab1bb92b1c730e5"}, +] + +[package.dependencies] +requests = ">=2.21.0" +six = ">=1.11.0" +typing-extensions = ">=4.6.0" + +[package.extras] +aio = ["aiohttp (>=3.0)"] + +[[package]] +name = "azure-storage-blob" +version = "12.24.0" +description = "Microsoft Azure Blob Storage Client Library for Python" +optional = false +python-versions = ">=3.8" +files = [ + {file = "azure_storage_blob-12.24.0-py3-none-any.whl", hash = "sha256:4f0bb4592ea79a2d986063696514c781c9e62be240f09f6397986e01755bc071"}, + {file = "azure_storage_blob-12.24.0.tar.gz", hash = "sha256:eaaaa1507c8c363d6e1d1342bd549938fdf1adec9b1ada8658c8f5bf3aea844e"}, +] + +[package.dependencies] +azure-core = ">=1.30.0" +cryptography = ">=2.1.4" +isodate = ">=0.6.1" +typing-extensions = ">=4.6.0" + +[package.extras] +aio = ["azure-core[aio] (>=1.30.0)"] + +[[package]] +name = "azure-storage-file-datalake" +version = "12.18.0" +description = "Microsoft Azure File DataLake Storage Client Library for Python" +optional = false +python-versions = ">=3.8" +files = [ + {file = "azure_storage_file_datalake-12.18.0-py3-none-any.whl", hash = "sha256:e7fe222af632a60c98ba50aa965c217451476ba74ecb87d246a5d746be3916e2"}, + {file = "azure_storage_file_datalake-12.18.0.tar.gz", hash = "sha256:2b2b88c1c11b2158ee1fad373df2f576acfcd27b83ad1fa3b4989bbdd63dd72a"}, +] + +[package.dependencies] +azure-core = ">=1.30.0" +azure-storage-blob = ">=12.24.0" +isodate = ">=0.6.1" +typing-extensions = ">=4.6.0" + +[package.extras] +aio = ["azure-core[aio] (>=1.30.0)"] + +[[package]] +name = "beautifulsoup4" +version = "4.12.3" +description = "Screen-scraping library" +optional = false +python-versions = ">=3.6.0" +files = [ + {file = "beautifulsoup4-4.12.3-py3-none-any.whl", hash = "sha256:b80878c9f40111313e55da8ba20bdba06d8fa3969fc68304167741bbf9e082ed"}, + {file = "beautifulsoup4-4.12.3.tar.gz", hash = "sha256:74e3d1928edc070d21748185c46e3fb33490f22f52a3addee9aee0f4f7781051"}, +] + +[package.dependencies] +soupsieve = ">1.2" + +[package.extras] +cchardet = ["cchardet"] +chardet = ["chardet"] +charset-normalizer = ["charset-normalizer"] +html5lib = ["html5lib"] +lxml = ["lxml"] + +[[package]] +name = "blinker" +version = "1.9.0" +description = "Fast, simple object-to-object and broadcast signaling" +optional = false +python-versions = ">=3.9" +files = [ + {file = "blinker-1.9.0-py3-none-any.whl", hash = "sha256:ba0efaa9080b619ff2f3459d1d500c57bddea4a6b424b60a91141db6fd2f08bc"}, + {file = "blinker-1.9.0.tar.gz", hash = "sha256:b4ce2265a7abece45e7cc896e98dbebe6cead56bcf805a3d23136d145f5445bf"}, +] + +[[package]] +name = "boto3" +version = "1.35.64" +description = "The AWS SDK for Python" +optional = false +python-versions = ">=3.8" +files = [ + {file = "boto3-1.35.64-py3-none-any.whl", hash = "sha256:cdacf03fc750caa3aa0dbf6158166def9922c9d67b4160999ff8fc350662facc"}, + {file = "boto3-1.35.64.tar.gz", hash = "sha256:bc3fc12b41fa2c91e51ab140f74fb1544408a2b1e00f88a4c2369a66d18ddf20"}, +] + +[package.dependencies] +botocore = ">=1.35.64,<1.36.0" +jmespath = ">=0.7.1,<2.0.0" +s3transfer = ">=0.10.0,<0.11.0" + +[package.extras] +crt = ["botocore[crt] (>=1.21.0,<2.0a0)"] + +[[package]] +name = "botocore" +version = "1.35.64" +description = "Low-level, data-driven core of boto 3." +optional = false +python-versions = ">=3.8" +files = [ + {file = "botocore-1.35.64-py3-none-any.whl", hash = "sha256:bbd96bf7f442b1d5e35b36f501076e4a588c83d8d84a1952e9ee1d767e5efb3e"}, + {file = "botocore-1.35.64.tar.gz", hash = "sha256:2f95c83f31c9e38a66995c88810fc638c829790e125032ba00ab081a2cf48cb9"}, +] + +[package.dependencies] +jmespath = ">=0.7.1,<2.0.0" +python-dateutil = ">=2.1,<3.0.0" +urllib3 = {version = ">=1.25.4,<2.2.0 || >2.2.0,<3", markers = "python_version >= \"3.10\""} + +[package.extras] +crt = ["awscrt (==0.22.0)"] + +[[package]] +name = "cachetools" +version = "5.5.0" +description = "Extensible memoizing collections and decorators" +optional = false +python-versions = ">=3.7" +files = [ + {file = "cachetools-5.5.0-py3-none-any.whl", hash = "sha256:02134e8439cdc2ffb62023ce1debca2944c3f289d66bb17ead3ab3dede74b292"}, + {file = "cachetools-5.5.0.tar.gz", hash = "sha256:2cc24fb4cbe39633fb7badd9db9ca6295d766d9c2995f245725a46715d050f2a"}, +] + +[[package]] +name = "certifi" +version = "2024.8.30" +description = "Python package for providing Mozilla's CA Bundle." +optional = false +python-versions = ">=3.6" +files = [ + {file = "certifi-2024.8.30-py3-none-any.whl", hash = "sha256:922820b53db7a7257ffbda3f597266d435245903d80737e34f8a45ff3e3230d8"}, + {file = "certifi-2024.8.30.tar.gz", hash = "sha256:bec941d2aa8195e248a60b31ff9f0558284cf01a52591ceda73ea9afffd69fd9"}, +] + +[[package]] +name = "cffi" +version = "1.17.1" +description = "Foreign Function Interface for Python calling C code." +optional = false +python-versions = ">=3.8" +files = [ + {file = "cffi-1.17.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:df8b1c11f177bc2313ec4b2d46baec87a5f3e71fc8b45dab2ee7cae86d9aba14"}, + {file = "cffi-1.17.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:8f2cdc858323644ab277e9bb925ad72ae0e67f69e804f4898c070998d50b1a67"}, + {file = "cffi-1.17.1-cp310-cp310-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:edae79245293e15384b51f88b00613ba9f7198016a5948b5dddf4917d4d26382"}, + {file = "cffi-1.17.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:45398b671ac6d70e67da8e4224a065cec6a93541bb7aebe1b198a61b58c7b702"}, + {file = "cffi-1.17.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ad9413ccdeda48c5afdae7e4fa2192157e991ff761e7ab8fdd8926f40b160cc3"}, + {file = "cffi-1.17.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5da5719280082ac6bd9aa7becb3938dc9f9cbd57fac7d2871717b1feb0902ab6"}, + {file = "cffi-1.17.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2bb1a08b8008b281856e5971307cc386a8e9c5b625ac297e853d36da6efe9c17"}, + {file = "cffi-1.17.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:045d61c734659cc045141be4bae381a41d89b741f795af1dd018bfb532fd0df8"}, + {file = "cffi-1.17.1-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:6883e737d7d9e4899a8a695e00ec36bd4e5e4f18fabe0aca0efe0a4b44cdb13e"}, + {file = "cffi-1.17.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:6b8b4a92e1c65048ff98cfe1f735ef8f1ceb72e3d5f0c25fdb12087a23da22be"}, + {file = "cffi-1.17.1-cp310-cp310-win32.whl", hash = "sha256:c9c3d058ebabb74db66e431095118094d06abf53284d9c81f27300d0e0d8bc7c"}, + {file = "cffi-1.17.1-cp310-cp310-win_amd64.whl", hash = "sha256:0f048dcf80db46f0098ccac01132761580d28e28bc0f78ae0d58048063317e15"}, + {file = "cffi-1.17.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:a45e3c6913c5b87b3ff120dcdc03f6131fa0065027d0ed7ee6190736a74cd401"}, + {file = "cffi-1.17.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:30c5e0cb5ae493c04c8b42916e52ca38079f1b235c2f8ae5f4527b963c401caf"}, + {file = "cffi-1.17.1-cp311-cp311-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f75c7ab1f9e4aca5414ed4d8e5c0e303a34f4421f8a0d47a4d019ceff0ab6af4"}, + {file = "cffi-1.17.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a1ed2dd2972641495a3ec98445e09766f077aee98a1c896dcb4ad0d303628e41"}, + {file = "cffi-1.17.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:46bf43160c1a35f7ec506d254e5c890f3c03648a4dbac12d624e4490a7046cd1"}, + {file = "cffi-1.17.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a24ed04c8ffd54b0729c07cee15a81d964e6fee0e3d4d342a27b020d22959dc6"}, + {file = "cffi-1.17.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:610faea79c43e44c71e1ec53a554553fa22321b65fae24889706c0a84d4ad86d"}, + {file = "cffi-1.17.1-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:a9b15d491f3ad5d692e11f6b71f7857e7835eb677955c00cc0aefcd0669adaf6"}, + {file = "cffi-1.17.1-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:de2ea4b5833625383e464549fec1bc395c1bdeeb5f25c4a3a82b5a8c756ec22f"}, + {file = "cffi-1.17.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:fc48c783f9c87e60831201f2cce7f3b2e4846bf4d8728eabe54d60700b318a0b"}, + {file = "cffi-1.17.1-cp311-cp311-win32.whl", hash = "sha256:85a950a4ac9c359340d5963966e3e0a94a676bd6245a4b55bc43949eee26a655"}, + {file = "cffi-1.17.1-cp311-cp311-win_amd64.whl", hash = "sha256:caaf0640ef5f5517f49bc275eca1406b0ffa6aa184892812030f04c2abf589a0"}, + {file = "cffi-1.17.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:805b4371bf7197c329fcb3ead37e710d1bca9da5d583f5073b799d5c5bd1eee4"}, + {file = "cffi-1.17.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:733e99bc2df47476e3848417c5a4540522f234dfd4ef3ab7fafdf555b082ec0c"}, + {file = "cffi-1.17.1-cp312-cp312-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1257bdabf294dceb59f5e70c64a3e2f462c30c7ad68092d01bbbfb1c16b1ba36"}, + {file = "cffi-1.17.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:da95af8214998d77a98cc14e3a3bd00aa191526343078b530ceb0bd710fb48a5"}, + {file = "cffi-1.17.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d63afe322132c194cf832bfec0dc69a99fb9bb6bbd550f161a49e9e855cc78ff"}, + {file = "cffi-1.17.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f79fc4fc25f1c8698ff97788206bb3c2598949bfe0fef03d299eb1b5356ada99"}, + {file = "cffi-1.17.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b62ce867176a75d03a665bad002af8e6d54644fad99a3c70905c543130e39d93"}, + {file = "cffi-1.17.1-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:386c8bf53c502fff58903061338ce4f4950cbdcb23e2902d86c0f722b786bbe3"}, + {file = "cffi-1.17.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:4ceb10419a9adf4460ea14cfd6bc43d08701f0835e979bf821052f1805850fe8"}, + {file = "cffi-1.17.1-cp312-cp312-win32.whl", hash = "sha256:a08d7e755f8ed21095a310a693525137cfe756ce62d066e53f502a83dc550f65"}, + {file = "cffi-1.17.1-cp312-cp312-win_amd64.whl", hash = "sha256:51392eae71afec0d0c8fb1a53b204dbb3bcabcb3c9b807eedf3e1e6ccf2de903"}, + {file = "cffi-1.17.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:f3a2b4222ce6b60e2e8b337bb9596923045681d71e5a082783484d845390938e"}, + {file = "cffi-1.17.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:0984a4925a435b1da406122d4d7968dd861c1385afe3b45ba82b750f229811e2"}, + {file = "cffi-1.17.1-cp313-cp313-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d01b12eeeb4427d3110de311e1774046ad344f5b1a7403101878976ecd7a10f3"}, + {file = "cffi-1.17.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:706510fe141c86a69c8ddc029c7910003a17353970cff3b904ff0686a5927683"}, + {file = "cffi-1.17.1-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:de55b766c7aa2e2a3092c51e0483d700341182f08e67c63630d5b6f200bb28e5"}, + {file = "cffi-1.17.1-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c59d6e989d07460165cc5ad3c61f9fd8f1b4796eacbd81cee78957842b834af4"}, + {file = "cffi-1.17.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dd398dbc6773384a17fe0d3e7eeb8d1a21c2200473ee6806bb5e6a8e62bb73dd"}, + {file = "cffi-1.17.1-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:3edc8d958eb099c634dace3c7e16560ae474aa3803a5df240542b305d14e14ed"}, + {file = "cffi-1.17.1-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:72e72408cad3d5419375fc87d289076ee319835bdfa2caad331e377589aebba9"}, + {file = "cffi-1.17.1-cp313-cp313-win32.whl", hash = "sha256:e03eab0a8677fa80d646b5ddece1cbeaf556c313dcfac435ba11f107ba117b5d"}, + {file = "cffi-1.17.1-cp313-cp313-win_amd64.whl", hash = "sha256:f6a16c31041f09ead72d69f583767292f750d24913dadacf5756b966aacb3f1a"}, + {file = "cffi-1.17.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:636062ea65bd0195bc012fea9321aca499c0504409f413dc88af450b57ffd03b"}, + {file = "cffi-1.17.1-cp38-cp38-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c7eac2ef9b63c79431bc4b25f1cd649d7f061a28808cbc6c47b534bd789ef964"}, + {file = "cffi-1.17.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e221cf152cff04059d011ee126477f0d9588303eb57e88923578ace7baad17f9"}, + {file = "cffi-1.17.1-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:31000ec67d4221a71bd3f67df918b1f88f676f1c3b535a7eb473255fdc0b83fc"}, + {file = "cffi-1.17.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:6f17be4345073b0a7b8ea599688f692ac3ef23ce28e5df79c04de519dbc4912c"}, + {file = "cffi-1.17.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0e2b1fac190ae3ebfe37b979cc1ce69c81f4e4fe5746bb401dca63a9062cdaf1"}, + {file = "cffi-1.17.1-cp38-cp38-win32.whl", hash = "sha256:7596d6620d3fa590f677e9ee430df2958d2d6d6de2feeae5b20e82c00b76fbf8"}, + {file = "cffi-1.17.1-cp38-cp38-win_amd64.whl", hash = "sha256:78122be759c3f8a014ce010908ae03364d00a1f81ab5c7f4a7a5120607ea56e1"}, + {file = "cffi-1.17.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:b2ab587605f4ba0bf81dc0cb08a41bd1c0a5906bd59243d56bad7668a6fc6c16"}, + {file = "cffi-1.17.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:28b16024becceed8c6dfbc75629e27788d8a3f9030691a1dbf9821a128b22c36"}, + {file = "cffi-1.17.1-cp39-cp39-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1d599671f396c4723d016dbddb72fe8e0397082b0a77a4fab8028923bec050e8"}, + {file = "cffi-1.17.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ca74b8dbe6e8e8263c0ffd60277de77dcee6c837a3d0881d8c1ead7268c9e576"}, + {file = "cffi-1.17.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f7f5baafcc48261359e14bcd6d9bff6d4b28d9103847c9e136694cb0501aef87"}, + {file = "cffi-1.17.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:98e3969bcff97cae1b2def8ba499ea3d6f31ddfdb7635374834cf89a1a08ecf0"}, + {file = "cffi-1.17.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cdf5ce3acdfd1661132f2a9c19cac174758dc2352bfe37d98aa7512c6b7178b3"}, + {file = "cffi-1.17.1-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:9755e4345d1ec879e3849e62222a18c7174d65a6a92d5b346b1863912168b595"}, + {file = "cffi-1.17.1-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:f1e22e8c4419538cb197e4dd60acc919d7696e5ef98ee4da4e01d3f8cfa4cc5a"}, + {file = "cffi-1.17.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:c03e868a0b3bc35839ba98e74211ed2b05d2119be4e8a0f224fba9384f1fe02e"}, + {file = "cffi-1.17.1-cp39-cp39-win32.whl", hash = "sha256:e31ae45bc2e29f6b2abd0de1cc3b9d5205aa847cafaecb8af1476a609a2f6eb7"}, + {file = "cffi-1.17.1-cp39-cp39-win_amd64.whl", hash = "sha256:d016c76bdd850f3c626af19b0542c9677ba156e4ee4fccfdd7848803533ef662"}, + {file = "cffi-1.17.1.tar.gz", hash = "sha256:1c39c6016c32bc48dd54561950ebd6836e1670f2ae46128f67cf49e789c52824"}, +] + +[package.dependencies] +pycparser = "*" + +[[package]] +name = "charset-normalizer" +version = "3.4.0" +description = "The Real First Universal Charset Detector. Open, modern and actively maintained alternative to Chardet." +optional = false +python-versions = ">=3.7.0" +files = [ + {file = "charset_normalizer-3.4.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:4f9fc98dad6c2eaa32fc3af1417d95b5e3d08aff968df0cd320066def971f9a6"}, + {file = "charset_normalizer-3.4.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:0de7b687289d3c1b3e8660d0741874abe7888100efe14bd0f9fd7141bcbda92b"}, + {file = "charset_normalizer-3.4.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:5ed2e36c3e9b4f21dd9422f6893dec0abf2cca553af509b10cd630f878d3eb99"}, + {file = "charset_normalizer-3.4.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:40d3ff7fc90b98c637bda91c89d51264a3dcf210cade3a2c6f838c7268d7a4ca"}, + {file = "charset_normalizer-3.4.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1110e22af8ca26b90bd6364fe4c763329b0ebf1ee213ba32b68c73de5752323d"}, + {file = "charset_normalizer-3.4.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:86f4e8cca779080f66ff4f191a685ced73d2f72d50216f7112185dc02b90b9b7"}, + {file = "charset_normalizer-3.4.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7f683ddc7eedd742e2889d2bfb96d69573fde1d92fcb811979cdb7165bb9c7d3"}, + {file = "charset_normalizer-3.4.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:27623ba66c183eca01bf9ff833875b459cad267aeeb044477fedac35e19ba907"}, + {file = "charset_normalizer-3.4.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:f606a1881d2663630ea5b8ce2efe2111740df4b687bd78b34a8131baa007f79b"}, + {file = "charset_normalizer-3.4.0-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:0b309d1747110feb25d7ed6b01afdec269c647d382c857ef4663bbe6ad95a912"}, + {file = "charset_normalizer-3.4.0-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:136815f06a3ae311fae551c3df1f998a1ebd01ddd424aa5603a4336997629e95"}, + {file = "charset_normalizer-3.4.0-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:14215b71a762336254351b00ec720a8e85cada43b987da5a042e4ce3e82bd68e"}, + {file = "charset_normalizer-3.4.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:79983512b108e4a164b9c8d34de3992f76d48cadc9554c9e60b43f308988aabe"}, + {file = "charset_normalizer-3.4.0-cp310-cp310-win32.whl", hash = "sha256:c94057af19bc953643a33581844649a7fdab902624d2eb739738a30e2b3e60fc"}, + {file = "charset_normalizer-3.4.0-cp310-cp310-win_amd64.whl", hash = "sha256:55f56e2ebd4e3bc50442fbc0888c9d8c94e4e06a933804e2af3e89e2f9c1c749"}, + {file = "charset_normalizer-3.4.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:0d99dd8ff461990f12d6e42c7347fd9ab2532fb70e9621ba520f9e8637161d7c"}, + {file = "charset_normalizer-3.4.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:c57516e58fd17d03ebe67e181a4e4e2ccab1168f8c2976c6a334d4f819fe5944"}, + {file = "charset_normalizer-3.4.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:6dba5d19c4dfab08e58d5b36304b3f92f3bd5d42c1a3fa37b5ba5cdf6dfcbcee"}, + {file = "charset_normalizer-3.4.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bf4475b82be41b07cc5e5ff94810e6a01f276e37c2d55571e3fe175e467a1a1c"}, + {file = "charset_normalizer-3.4.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ce031db0408e487fd2775d745ce30a7cd2923667cf3b69d48d219f1d8f5ddeb6"}, + {file = "charset_normalizer-3.4.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8ff4e7cdfdb1ab5698e675ca622e72d58a6fa2a8aa58195de0c0061288e6e3ea"}, + {file = "charset_normalizer-3.4.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3710a9751938947e6327ea9f3ea6332a09bf0ba0c09cae9cb1f250bd1f1549bc"}, + {file = "charset_normalizer-3.4.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:82357d85de703176b5587dbe6ade8ff67f9f69a41c0733cf2425378b49954de5"}, + {file = "charset_normalizer-3.4.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:47334db71978b23ebcf3c0f9f5ee98b8d65992b65c9c4f2d34c2eaf5bcaf0594"}, + {file = "charset_normalizer-3.4.0-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:8ce7fd6767a1cc5a92a639b391891bf1c268b03ec7e021c7d6d902285259685c"}, + {file = "charset_normalizer-3.4.0-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:f1a2f519ae173b5b6a2c9d5fa3116ce16e48b3462c8b96dfdded11055e3d6365"}, + {file = "charset_normalizer-3.4.0-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:63bc5c4ae26e4bc6be6469943b8253c0fd4e4186c43ad46e713ea61a0ba49129"}, + {file = "charset_normalizer-3.4.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:bcb4f8ea87d03bc51ad04add8ceaf9b0f085ac045ab4d74e73bbc2dc033f0236"}, + {file = "charset_normalizer-3.4.0-cp311-cp311-win32.whl", hash = "sha256:9ae4ef0b3f6b41bad6366fb0ea4fc1d7ed051528e113a60fa2a65a9abb5b1d99"}, + {file = "charset_normalizer-3.4.0-cp311-cp311-win_amd64.whl", hash = "sha256:cee4373f4d3ad28f1ab6290684d8e2ebdb9e7a1b74fdc39e4c211995f77bec27"}, + {file = "charset_normalizer-3.4.0-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:0713f3adb9d03d49d365b70b84775d0a0d18e4ab08d12bc46baa6132ba78aaf6"}, + {file = "charset_normalizer-3.4.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:de7376c29d95d6719048c194a9cf1a1b0393fbe8488a22008610b0361d834ecf"}, + {file = "charset_normalizer-3.4.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:4a51b48f42d9358460b78725283f04bddaf44a9358197b889657deba38f329db"}, + {file = "charset_normalizer-3.4.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b295729485b06c1a0683af02a9e42d2caa9db04a373dc38a6a58cdd1e8abddf1"}, + {file = "charset_normalizer-3.4.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ee803480535c44e7f5ad00788526da7d85525cfefaf8acf8ab9a310000be4b03"}, + {file = "charset_normalizer-3.4.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3d59d125ffbd6d552765510e3f31ed75ebac2c7470c7274195b9161a32350284"}, + {file = "charset_normalizer-3.4.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8cda06946eac330cbe6598f77bb54e690b4ca93f593dee1568ad22b04f347c15"}, + {file = "charset_normalizer-3.4.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:07afec21bbbbf8a5cc3651aa96b980afe2526e7f048fdfb7f1014d84acc8b6d8"}, + {file = "charset_normalizer-3.4.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:6b40e8d38afe634559e398cc32b1472f376a4099c75fe6299ae607e404c033b2"}, + {file = "charset_normalizer-3.4.0-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:b8dcd239c743aa2f9c22ce674a145e0a25cb1566c495928440a181ca1ccf6719"}, + {file = "charset_normalizer-3.4.0-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:84450ba661fb96e9fd67629b93d2941c871ca86fc38d835d19d4225ff946a631"}, + {file = "charset_normalizer-3.4.0-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:44aeb140295a2f0659e113b31cfe92c9061622cadbc9e2a2f7b8ef6b1e29ef4b"}, + {file = "charset_normalizer-3.4.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:1db4e7fefefd0f548d73e2e2e041f9df5c59e178b4c72fbac4cc6f535cfb1565"}, + {file = "charset_normalizer-3.4.0-cp312-cp312-win32.whl", hash = "sha256:5726cf76c982532c1863fb64d8c6dd0e4c90b6ece9feb06c9f202417a31f7dd7"}, + {file = "charset_normalizer-3.4.0-cp312-cp312-win_amd64.whl", hash = "sha256:b197e7094f232959f8f20541ead1d9862ac5ebea1d58e9849c1bf979255dfac9"}, + {file = "charset_normalizer-3.4.0-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:dd4eda173a9fcccb5f2e2bd2a9f423d180194b1bf17cf59e3269899235b2a114"}, + {file = "charset_normalizer-3.4.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:e9e3c4c9e1ed40ea53acf11e2a386383c3304212c965773704e4603d589343ed"}, + {file = "charset_normalizer-3.4.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:92a7e36b000bf022ef3dbb9c46bfe2d52c047d5e3f3343f43204263c5addc250"}, + {file = "charset_normalizer-3.4.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:54b6a92d009cbe2fb11054ba694bc9e284dad30a26757b1e372a1fdddaf21920"}, + {file = "charset_normalizer-3.4.0-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1ffd9493de4c922f2a38c2bf62b831dcec90ac673ed1ca182fe11b4d8e9f2a64"}, + {file = "charset_normalizer-3.4.0-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:35c404d74c2926d0287fbd63ed5d27eb911eb9e4a3bb2c6d294f3cfd4a9e0c23"}, + {file = "charset_normalizer-3.4.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4796efc4faf6b53a18e3d46343535caed491776a22af773f366534056c4e1fbc"}, + {file = "charset_normalizer-3.4.0-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e7fdd52961feb4c96507aa649550ec2a0d527c086d284749b2f582f2d40a2e0d"}, + {file = "charset_normalizer-3.4.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:92db3c28b5b2a273346bebb24857fda45601aef6ae1c011c0a997106581e8a88"}, + {file = "charset_normalizer-3.4.0-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:ab973df98fc99ab39080bfb0eb3a925181454d7c3ac8a1e695fddfae696d9e90"}, + {file = "charset_normalizer-3.4.0-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:4b67fdab07fdd3c10bb21edab3cbfe8cf5696f453afce75d815d9d7223fbe88b"}, + {file = "charset_normalizer-3.4.0-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:aa41e526a5d4a9dfcfbab0716c7e8a1b215abd3f3df5a45cf18a12721d31cb5d"}, + {file = "charset_normalizer-3.4.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:ffc519621dce0c767e96b9c53f09c5d215578e10b02c285809f76509a3931482"}, + {file = "charset_normalizer-3.4.0-cp313-cp313-win32.whl", hash = "sha256:f19c1585933c82098c2a520f8ec1227f20e339e33aca8fa6f956f6691b784e67"}, + {file = "charset_normalizer-3.4.0-cp313-cp313-win_amd64.whl", hash = "sha256:707b82d19e65c9bd28b81dde95249b07bf9f5b90ebe1ef17d9b57473f8a64b7b"}, + {file = "charset_normalizer-3.4.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:dbe03226baf438ac4fda9e2d0715022fd579cb641c4cf639fa40d53b2fe6f3e2"}, + {file = "charset_normalizer-3.4.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:dd9a8bd8900e65504a305bf8ae6fa9fbc66de94178c420791d0293702fce2df7"}, + {file = "charset_normalizer-3.4.0-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b8831399554b92b72af5932cdbbd4ddc55c55f631bb13ff8fe4e6536a06c5c51"}, + {file = "charset_normalizer-3.4.0-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a14969b8691f7998e74663b77b4c36c0337cb1df552da83d5c9004a93afdb574"}, + {file = "charset_normalizer-3.4.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dcaf7c1524c0542ee2fc82cc8ec337f7a9f7edee2532421ab200d2b920fc97cf"}, + {file = "charset_normalizer-3.4.0-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:425c5f215d0eecee9a56cdb703203dda90423247421bf0d67125add85d0c4455"}, + {file = "charset_normalizer-3.4.0-cp37-cp37m-musllinux_1_2_aarch64.whl", hash = "sha256:d5b054862739d276e09928de37c79ddeec42a6e1bfc55863be96a36ba22926f6"}, + {file = "charset_normalizer-3.4.0-cp37-cp37m-musllinux_1_2_i686.whl", hash = "sha256:f3e73a4255342d4eb26ef6df01e3962e73aa29baa3124a8e824c5d3364a65748"}, + {file = "charset_normalizer-3.4.0-cp37-cp37m-musllinux_1_2_ppc64le.whl", hash = "sha256:2f6c34da58ea9c1a9515621f4d9ac379871a8f21168ba1b5e09d74250de5ad62"}, + {file = "charset_normalizer-3.4.0-cp37-cp37m-musllinux_1_2_s390x.whl", hash = "sha256:f09cb5a7bbe1ecae6e87901a2eb23e0256bb524a79ccc53eb0b7629fbe7677c4"}, + {file = "charset_normalizer-3.4.0-cp37-cp37m-musllinux_1_2_x86_64.whl", hash = "sha256:0099d79bdfcf5c1f0c2c72f91516702ebf8b0b8ddd8905f97a8aecf49712c621"}, + {file = "charset_normalizer-3.4.0-cp37-cp37m-win32.whl", hash = "sha256:9c98230f5042f4945f957d006edccc2af1e03ed5e37ce7c373f00a5a4daa6149"}, + {file = "charset_normalizer-3.4.0-cp37-cp37m-win_amd64.whl", hash = "sha256:62f60aebecfc7f4b82e3f639a7d1433a20ec32824db2199a11ad4f5e146ef5ee"}, + {file = "charset_normalizer-3.4.0-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:af73657b7a68211996527dbfeffbb0864e043d270580c5aef06dc4b659a4b578"}, + {file = "charset_normalizer-3.4.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:cab5d0b79d987c67f3b9e9c53f54a61360422a5a0bc075f43cab5621d530c3b6"}, + {file = "charset_normalizer-3.4.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:9289fd5dddcf57bab41d044f1756550f9e7cf0c8e373b8cdf0ce8773dc4bd417"}, + {file = "charset_normalizer-3.4.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6b493a043635eb376e50eedf7818f2f322eabbaa974e948bd8bdd29eb7ef2a51"}, + {file = "charset_normalizer-3.4.0-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9fa2566ca27d67c86569e8c85297aaf413ffab85a8960500f12ea34ff98e4c41"}, + {file = "charset_normalizer-3.4.0-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a8e538f46104c815be19c975572d74afb53f29650ea2025bbfaef359d2de2f7f"}, + {file = "charset_normalizer-3.4.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6fd30dc99682dc2c603c2b315bded2799019cea829f8bf57dc6b61efde6611c8"}, + {file = "charset_normalizer-3.4.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2006769bd1640bdf4d5641c69a3d63b71b81445473cac5ded39740a226fa88ab"}, + {file = "charset_normalizer-3.4.0-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:dc15e99b2d8a656f8e666854404f1ba54765871104e50c8e9813af8a7db07f12"}, + {file = "charset_normalizer-3.4.0-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:ab2e5bef076f5a235c3774b4f4028a680432cded7cad37bba0fd90d64b187d19"}, + {file = "charset_normalizer-3.4.0-cp38-cp38-musllinux_1_2_ppc64le.whl", hash = "sha256:4ec9dd88a5b71abfc74e9df5ebe7921c35cbb3b641181a531ca65cdb5e8e4dea"}, + {file = "charset_normalizer-3.4.0-cp38-cp38-musllinux_1_2_s390x.whl", hash = "sha256:43193c5cda5d612f247172016c4bb71251c784d7a4d9314677186a838ad34858"}, + {file = "charset_normalizer-3.4.0-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:aa693779a8b50cd97570e5a0f343538a8dbd3e496fa5dcb87e29406ad0299654"}, + {file = "charset_normalizer-3.4.0-cp38-cp38-win32.whl", hash = "sha256:7706f5850360ac01d80c89bcef1640683cc12ed87f42579dab6c5d3ed6888613"}, + {file = "charset_normalizer-3.4.0-cp38-cp38-win_amd64.whl", hash = "sha256:c3e446d253bd88f6377260d07c895816ebf33ffffd56c1c792b13bff9c3e1ade"}, + {file = "charset_normalizer-3.4.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:980b4f289d1d90ca5efcf07958d3eb38ed9c0b7676bf2831a54d4f66f9c27dfa"}, + {file = "charset_normalizer-3.4.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:f28f891ccd15c514a0981f3b9db9aa23d62fe1a99997512b0491d2ed323d229a"}, + {file = "charset_normalizer-3.4.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:a8aacce6e2e1edcb6ac625fb0f8c3a9570ccc7bfba1f63419b3769ccf6a00ed0"}, + {file = "charset_normalizer-3.4.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bd7af3717683bea4c87acd8c0d3d5b44d56120b26fd3f8a692bdd2d5260c620a"}, + {file = "charset_normalizer-3.4.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5ff2ed8194587faf56555927b3aa10e6fb69d931e33953943bc4f837dfee2242"}, + {file = "charset_normalizer-3.4.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e91f541a85298cf35433bf66f3fab2a4a2cff05c127eeca4af174f6d497f0d4b"}, + {file = "charset_normalizer-3.4.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:309a7de0a0ff3040acaebb35ec45d18db4b28232f21998851cfa709eeff49d62"}, + {file = "charset_normalizer-3.4.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:285e96d9d53422efc0d7a17c60e59f37fbf3dfa942073f666db4ac71e8d726d0"}, + {file = "charset_normalizer-3.4.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:5d447056e2ca60382d460a604b6302d8db69476fd2015c81e7c35417cfabe4cd"}, + {file = "charset_normalizer-3.4.0-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:20587d20f557fe189b7947d8e7ec5afa110ccf72a3128d61a2a387c3313f46be"}, + {file = "charset_normalizer-3.4.0-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:130272c698667a982a5d0e626851ceff662565379baf0ff2cc58067b81d4f11d"}, + {file = "charset_normalizer-3.4.0-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:ab22fbd9765e6954bc0bcff24c25ff71dcbfdb185fcdaca49e81bac68fe724d3"}, + {file = "charset_normalizer-3.4.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:7782afc9b6b42200f7362858f9e73b1f8316afb276d316336c0ec3bd73312742"}, + {file = "charset_normalizer-3.4.0-cp39-cp39-win32.whl", hash = "sha256:2de62e8801ddfff069cd5c504ce3bc9672b23266597d4e4f50eda28846c322f2"}, + {file = "charset_normalizer-3.4.0-cp39-cp39-win_amd64.whl", hash = "sha256:95c3c157765b031331dd4db3c775e58deaee050a3042fcad72cbc4189d7c8dca"}, + {file = "charset_normalizer-3.4.0-py3-none-any.whl", hash = "sha256:fe9f97feb71aa9896b81973a7bbada8c49501dc73e58a10fcef6663af95e5079"}, + {file = "charset_normalizer-3.4.0.tar.gz", hash = "sha256:223217c3d4f82c3ac5e29032b3f1c2eb0fb591b72161f86d93f5719079dae93e"}, +] + +[[package]] +name = "click" +version = "8.1.7" +description = "Composable command line interface toolkit" +optional = false +python-versions = ">=3.7" +files = [ + {file = "click-8.1.7-py3-none-any.whl", hash = "sha256:ae74fb96c20a0277a1d615f1e4d73c8414f5a98db8b799a7931d1582f3390c28"}, + {file = "click-8.1.7.tar.gz", hash = "sha256:ca9853ad459e787e2192211578cc907e7594e294c7ccc834310722b41b9ca6de"}, +] + +[package.dependencies] +colorama = {version = "*", markers = "platform_system == \"Windows\""} + +[[package]] +name = "cloudpickle" +version = "3.1.0" +description = "Pickler class to extend the standard pickle.Pickler functionality" +optional = false +python-versions = ">=3.8" +files = [ + {file = "cloudpickle-3.1.0-py3-none-any.whl", hash = "sha256:fe11acda67f61aaaec473e3afe030feb131d78a43461b718185363384f1ba12e"}, + {file = "cloudpickle-3.1.0.tar.gz", hash = "sha256:81a929b6e3c7335c863c771d673d105f02efdb89dfaba0c90495d1c64796601b"}, +] + +[[package]] +name = "colorama" +version = "0.4.6" +description = "Cross-platform colored terminal text." +optional = false +python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,!=3.6.*,>=2.7" +files = [ + {file = "colorama-0.4.6-py2.py3-none-any.whl", hash = "sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6"}, + {file = "colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44"}, +] + +[[package]] +name = "comm" +version = "0.2.2" +description = "Jupyter Python Comm implementation, for usage in ipykernel, xeus-python etc." +optional = false +python-versions = ">=3.8" +files = [ + {file = "comm-0.2.2-py3-none-any.whl", hash = "sha256:e6fb86cb70ff661ee8c9c14e7d36d6de3b4066f1441be4063df9c5009f0a64d3"}, + {file = "comm-0.2.2.tar.gz", hash = "sha256:3fd7a84065306e07bea1773df6eb8282de51ba82f77c72f9c85716ab11fe980e"}, +] + +[package.dependencies] +traitlets = ">=4" + +[package.extras] +test = ["pytest"] + +[[package]] +name = "contourpy" +version = "1.3.1" +description = "Python library for calculating contours of 2D quadrilateral grids" +optional = false +python-versions = ">=3.10" +files = [ + {file = "contourpy-1.3.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:a045f341a77b77e1c5de31e74e966537bba9f3c4099b35bf4c2e3939dd54cdab"}, + {file = "contourpy-1.3.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:500360b77259914f7805af7462e41f9cb7ca92ad38e9f94d6c8641b089338124"}, + {file = "contourpy-1.3.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b2f926efda994cdf3c8d3fdb40b9962f86edbc4457e739277b961eced3d0b4c1"}, + {file = "contourpy-1.3.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:adce39d67c0edf383647a3a007de0a45fd1b08dedaa5318404f1a73059c2512b"}, + {file = "contourpy-1.3.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:abbb49fb7dac584e5abc6636b7b2a7227111c4f771005853e7d25176daaf8453"}, + {file = "contourpy-1.3.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a0cffcbede75c059f535725c1680dfb17b6ba8753f0c74b14e6a9c68c29d7ea3"}, + {file = "contourpy-1.3.1-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:ab29962927945d89d9b293eabd0d59aea28d887d4f3be6c22deaefbb938a7277"}, + {file = "contourpy-1.3.1-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:974d8145f8ca354498005b5b981165b74a195abfae9a8129df3e56771961d595"}, + {file = "contourpy-1.3.1-cp310-cp310-win32.whl", hash = "sha256:ac4578ac281983f63b400f7fe6c101bedc10651650eef012be1ccffcbacf3697"}, + {file = "contourpy-1.3.1-cp310-cp310-win_amd64.whl", hash = "sha256:174e758c66bbc1c8576992cec9599ce8b6672b741b5d336b5c74e35ac382b18e"}, + {file = "contourpy-1.3.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:3e8b974d8db2c5610fb4e76307e265de0edb655ae8169e8b21f41807ccbeec4b"}, + {file = "contourpy-1.3.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:20914c8c973f41456337652a6eeca26d2148aa96dd7ac323b74516988bea89fc"}, + {file = "contourpy-1.3.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:19d40d37c1c3a4961b4619dd9d77b12124a453cc3d02bb31a07d58ef684d3d86"}, + {file = "contourpy-1.3.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:113231fe3825ebf6f15eaa8bc1f5b0ddc19d42b733345eae0934cb291beb88b6"}, + {file = "contourpy-1.3.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4dbbc03a40f916a8420e420d63e96a1258d3d1b58cbdfd8d1f07b49fcbd38e85"}, + {file = "contourpy-1.3.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3a04ecd68acbd77fa2d39723ceca4c3197cb2969633836ced1bea14e219d077c"}, + {file = "contourpy-1.3.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:c414fc1ed8ee1dbd5da626cf3710c6013d3d27456651d156711fa24f24bd1291"}, + {file = "contourpy-1.3.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:31c1b55c1f34f80557d3830d3dd93ba722ce7e33a0b472cba0ec3b6535684d8f"}, + {file = "contourpy-1.3.1-cp311-cp311-win32.whl", hash = "sha256:f611e628ef06670df83fce17805c344710ca5cde01edfdc72751311da8585375"}, + {file = "contourpy-1.3.1-cp311-cp311-win_amd64.whl", hash = "sha256:b2bdca22a27e35f16794cf585832e542123296b4687f9fd96822db6bae17bfc9"}, + {file = "contourpy-1.3.1-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:0ffa84be8e0bd33410b17189f7164c3589c229ce5db85798076a3fa136d0e509"}, + {file = "contourpy-1.3.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:805617228ba7e2cbbfb6c503858e626ab528ac2a32a04a2fe88ffaf6b02c32bc"}, + {file = "contourpy-1.3.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ade08d343436a94e633db932e7e8407fe7de8083967962b46bdfc1b0ced39454"}, + {file = "contourpy-1.3.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:47734d7073fb4590b4a40122b35917cd77be5722d80683b249dac1de266aac80"}, + {file = "contourpy-1.3.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2ba94a401342fc0f8b948e57d977557fbf4d515f03c67682dd5c6191cb2d16ec"}, + {file = "contourpy-1.3.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:efa874e87e4a647fd2e4f514d5e91c7d493697127beb95e77d2f7561f6905bd9"}, + {file = "contourpy-1.3.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:1bf98051f1045b15c87868dbaea84f92408337d4f81d0e449ee41920ea121d3b"}, + {file = "contourpy-1.3.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:61332c87493b00091423e747ea78200659dc09bdf7fd69edd5e98cef5d3e9a8d"}, + {file = "contourpy-1.3.1-cp312-cp312-win32.whl", hash = "sha256:e914a8cb05ce5c809dd0fe350cfbb4e881bde5e2a38dc04e3afe1b3e58bd158e"}, + {file = "contourpy-1.3.1-cp312-cp312-win_amd64.whl", hash = "sha256:08d9d449a61cf53033612cb368f3a1b26cd7835d9b8cd326647efe43bca7568d"}, + {file = "contourpy-1.3.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:a761d9ccfc5e2ecd1bf05534eda382aa14c3e4f9205ba5b1684ecfe400716ef2"}, + {file = "contourpy-1.3.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:523a8ee12edfa36f6d2a49407f705a6ef4c5098de4f498619787e272de93f2d5"}, + {file = "contourpy-1.3.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ece6df05e2c41bd46776fbc712e0996f7c94e0d0543af1656956d150c4ca7c81"}, + {file = "contourpy-1.3.1-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:573abb30e0e05bf31ed067d2f82500ecfdaec15627a59d63ea2d95714790f5c2"}, + {file = "contourpy-1.3.1-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a9fa36448e6a3a1a9a2ba23c02012c43ed88905ec80163f2ffe2421c7192a5d7"}, + {file = "contourpy-1.3.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3ea9924d28fc5586bf0b42d15f590b10c224117e74409dd7a0be3b62b74a501c"}, + {file = "contourpy-1.3.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:5b75aa69cb4d6f137b36f7eb2ace9280cfb60c55dc5f61c731fdf6f037f958a3"}, + {file = "contourpy-1.3.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:041b640d4ec01922083645a94bb3b2e777e6b626788f4095cf21abbe266413c1"}, + {file = "contourpy-1.3.1-cp313-cp313-win32.whl", hash = "sha256:36987a15e8ace5f58d4d5da9dca82d498c2bbb28dff6e5d04fbfcc35a9cb3a82"}, + {file = "contourpy-1.3.1-cp313-cp313-win_amd64.whl", hash = "sha256:a7895f46d47671fa7ceec40f31fae721da51ad34bdca0bee83e38870b1f47ffd"}, + {file = "contourpy-1.3.1-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:9ddeb796389dadcd884c7eb07bd14ef12408aaae358f0e2ae24114d797eede30"}, + {file = "contourpy-1.3.1-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:19c1555a6801c2f084c7ddc1c6e11f02eb6a6016ca1318dd5452ba3f613a1751"}, + {file = "contourpy-1.3.1-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:841ad858cff65c2c04bf93875e384ccb82b654574a6d7f30453a04f04af71342"}, + {file = "contourpy-1.3.1-cp313-cp313t-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4318af1c925fb9a4fb190559ef3eec206845f63e80fb603d47f2d6d67683901c"}, + {file = "contourpy-1.3.1-cp313-cp313t-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:14c102b0eab282427b662cb590f2e9340a9d91a1c297f48729431f2dcd16e14f"}, + {file = "contourpy-1.3.1-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:05e806338bfeaa006acbdeba0ad681a10be63b26e1b17317bfac3c5d98f36cda"}, + {file = "contourpy-1.3.1-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:4d76d5993a34ef3df5181ba3c92fabb93f1eaa5729504fb03423fcd9f3177242"}, + {file = "contourpy-1.3.1-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:89785bb2a1980c1bd87f0cb1517a71cde374776a5f150936b82580ae6ead44a1"}, + {file = "contourpy-1.3.1-cp313-cp313t-win32.whl", hash = "sha256:8eb96e79b9f3dcadbad2a3891672f81cdcab7f95b27f28f1c67d75f045b6b4f1"}, + {file = "contourpy-1.3.1-cp313-cp313t-win_amd64.whl", hash = "sha256:287ccc248c9e0d0566934e7d606201abd74761b5703d804ff3df8935f523d546"}, + {file = "contourpy-1.3.1-pp310-pypy310_pp73-macosx_10_15_x86_64.whl", hash = "sha256:b457d6430833cee8e4b8e9b6f07aa1c161e5e0d52e118dc102c8f9bd7dd060d6"}, + {file = "contourpy-1.3.1-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cb76c1a154b83991a3cbbf0dfeb26ec2833ad56f95540b442c73950af2013750"}, + {file = "contourpy-1.3.1-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:44a29502ca9c7b5ba389e620d44f2fbe792b1fb5734e8b931ad307071ec58c53"}, + {file = "contourpy-1.3.1.tar.gz", hash = "sha256:dfd97abd83335045a913e3bcc4a09c0ceadbe66580cf573fe961f4a825efa699"}, +] + +[package.dependencies] +numpy = ">=1.23" + +[package.extras] +bokeh = ["bokeh", "selenium"] +docs = ["furo", "sphinx (>=7.2)", "sphinx-copybutton"] +mypy = ["contourpy[bokeh,docs]", "docutils-stubs", "mypy (==1.11.1)", "types-Pillow"] +test = ["Pillow", "contourpy[test-no-images]", "matplotlib"] +test-no-images = ["pytest", "pytest-cov", "pytest-rerunfailures", "pytest-xdist", "wurlitzer"] + +[[package]] +name = "cryptography" +version = "43.0.3" +description = "cryptography is a package which provides cryptographic recipes and primitives to Python developers." +optional = false +python-versions = ">=3.7" +files = [ + {file = "cryptography-43.0.3-cp37-abi3-macosx_10_9_universal2.whl", hash = "sha256:bf7a1932ac4176486eab36a19ed4c0492da5d97123f1406cf15e41b05e787d2e"}, + {file = "cryptography-43.0.3-cp37-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:63efa177ff54aec6e1c0aefaa1a241232dcd37413835a9b674b6e3f0ae2bfd3e"}, + {file = "cryptography-43.0.3-cp37-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7e1ce50266f4f70bf41a2c6dc4358afadae90e2a1e5342d3c08883df1675374f"}, + {file = "cryptography-43.0.3-cp37-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:443c4a81bb10daed9a8f334365fe52542771f25aedaf889fd323a853ce7377d6"}, + {file = "cryptography-43.0.3-cp37-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:74f57f24754fe349223792466a709f8e0c093205ff0dca557af51072ff47ab18"}, + {file = "cryptography-43.0.3-cp37-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:9762ea51a8fc2a88b70cf2995e5675b38d93bf36bd67d91721c309df184f49bd"}, + {file = "cryptography-43.0.3-cp37-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:81ef806b1fef6b06dcebad789f988d3b37ccaee225695cf3e07648eee0fc6b73"}, + {file = "cryptography-43.0.3-cp37-abi3-win32.whl", hash = "sha256:cbeb489927bd7af4aa98d4b261af9a5bc025bd87f0e3547e11584be9e9427be2"}, + {file = "cryptography-43.0.3-cp37-abi3-win_amd64.whl", hash = "sha256:f46304d6f0c6ab8e52770addfa2fc41e6629495548862279641972b6215451cd"}, + {file = "cryptography-43.0.3-cp39-abi3-macosx_10_9_universal2.whl", hash = "sha256:8ac43ae87929a5982f5948ceda07001ee5e83227fd69cf55b109144938d96984"}, + {file = "cryptography-43.0.3-cp39-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:846da004a5804145a5f441b8530b4bf35afbf7da70f82409f151695b127213d5"}, + {file = "cryptography-43.0.3-cp39-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0f996e7268af62598f2fc1204afa98a3b5712313a55c4c9d434aef49cadc91d4"}, + {file = "cryptography-43.0.3-cp39-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:f7b178f11ed3664fd0e995a47ed2b5ff0a12d893e41dd0494f406d1cf555cab7"}, + {file = "cryptography-43.0.3-cp39-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:c2e6fc39c4ab499049df3bdf567f768a723a5e8464816e8f009f121a5a9f4405"}, + {file = "cryptography-43.0.3-cp39-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:e1be4655c7ef6e1bbe6b5d0403526601323420bcf414598955968c9ef3eb7d16"}, + {file = "cryptography-43.0.3-cp39-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:df6b6c6d742395dd77a23ea3728ab62f98379eff8fb61be2744d4679ab678f73"}, + {file = "cryptography-43.0.3-cp39-abi3-win32.whl", hash = "sha256:d56e96520b1020449bbace2b78b603442e7e378a9b3bd68de65c782db1507995"}, + {file = "cryptography-43.0.3-cp39-abi3-win_amd64.whl", hash = "sha256:0c580952eef9bf68c4747774cde7ec1d85a6e61de97281f2dba83c7d2c806362"}, + {file = "cryptography-43.0.3-pp310-pypy310_pp73-macosx_10_9_x86_64.whl", hash = "sha256:d03b5621a135bffecad2c73e9f4deb1a0f977b9a8ffe6f8e002bf6c9d07b918c"}, + {file = "cryptography-43.0.3-pp310-pypy310_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:a2a431ee15799d6db9fe80c82b055bae5a752bef645bba795e8e52687c69efe3"}, + {file = "cryptography-43.0.3-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:281c945d0e28c92ca5e5930664c1cefd85efe80e5c0d2bc58dd63383fda29f83"}, + {file = "cryptography-43.0.3-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:f18c716be16bc1fea8e95def49edf46b82fccaa88587a45f8dc0ff6ab5d8e0a7"}, + {file = "cryptography-43.0.3-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:4a02ded6cd4f0a5562a8887df8b3bd14e822a90f97ac5e544c162899bc467664"}, + {file = "cryptography-43.0.3-pp39-pypy39_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:53a583b6637ab4c4e3591a15bc9db855b8d9dee9a669b550f311480acab6eb08"}, + {file = "cryptography-43.0.3-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:1ec0bcf7e17c0c5669d881b1cd38c4972fade441b27bda1051665faaa89bdcaa"}, + {file = "cryptography-43.0.3-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:2ce6fae5bdad59577b44e4dfed356944fbf1d925269114c28be377692643b4ff"}, + {file = "cryptography-43.0.3.tar.gz", hash = "sha256:315b9001266a492a6ff443b61238f956b214dbec9910a081ba5b6646a055a805"}, +] + +[package.dependencies] +cffi = {version = ">=1.12", markers = "platform_python_implementation != \"PyPy\""} + +[package.extras] +docs = ["sphinx (>=5.3.0)", "sphinx-rtd-theme (>=1.1.1)"] +docstest = ["pyenchant (>=1.6.11)", "readme-renderer", "sphinxcontrib-spelling (>=4.0.1)"] +nox = ["nox"] +pep8test = ["check-sdist", "click", "mypy", "ruff"] +sdist = ["build"] +ssh = ["bcrypt (>=3.1.5)"] +test = ["certifi", "cryptography-vectors (==43.0.3)", "pretend", "pytest (>=6.2.0)", "pytest-benchmark", "pytest-cov", "pytest-xdist"] +test-randomorder = ["pytest-randomly"] + +[[package]] +name = "cycler" +version = "0.12.1" +description = "Composable style cycles" +optional = false +python-versions = ">=3.8" +files = [ + {file = "cycler-0.12.1-py3-none-any.whl", hash = "sha256:85cef7cff222d8644161529808465972e51340599459b8ac3ccbac5a854e0d30"}, + {file = "cycler-0.12.1.tar.gz", hash = "sha256:88bb128f02ba341da8ef447245a9e138fae777f6a23943da4540077d3601eb1c"}, +] + +[package.extras] +docs = ["ipython", "matplotlib", "numpydoc", "sphinx"] +tests = ["pytest", "pytest-cov", "pytest-xdist"] + +[[package]] +name = "databricks-agents" +version = "0.10.0" +description = "Mosaic AI Agent Framework SDK" +optional = false +python-versions = "*" +files = [ + {file = "databricks_agents-0.10.0-py3-none-any.whl", hash = "sha256:6c86ddf6f62fef6caf078ffe132fee3f48c7e4f978724da058f4bf64361102b3"}, +] + +[package.dependencies] +databricks-sdk = ">=0.23.0" +jinja2 = ">=3.0.0" +mlflow = ">=2.17.0" +mlflow-skinny = {version = "*", extras = ["databricks"]} +tiktoken = "*" +tqdm = "*" +urllib3 = ">=2.0" + +[[package]] +name = "databricks-connect" +version = "15.1.0" +description = "Databricks Connect Client" +optional = false +python-versions = ">=3.10" +files = [ + {file = "databricks_connect-15.1.0-py2.py3-none-any.whl", hash = "sha256:5032276e08280bd654384f368e96f4a1bca5f17a0bb10e1af9aca37d08fc6038"}, +] + +[package.dependencies] +databricks-sdk = ">=0.29.0" +googleapis-common-protos = ">=1.56.4" +grpcio = ">=1.56.0" +grpcio-status = ">=1.56.0" +numpy = ">=1.15,<2" +packaging = ">=23.2" +pandas = ">=1.0.5" +py4j = "0.10.9.7" +pyarrow = ">=4.0.0" +six = "*" + +[package.extras] +connect = ["googleapis-common-protos (>=1.56.4)", "grpcio (>=1.56.0)", "grpcio-status (>=1.56.0)", "numpy (>=1.15,<2)", "pandas (>=1.0.5)", "pyarrow (>=4.0.0)"] +ml = ["numpy (>=1.15,<2)"] +mllib = ["numpy (>=1.15,<2)"] +pandas-on-spark = ["numpy (>=1.15,<2)", "pandas (>=1.0.5)", "pyarrow (>=4.0.0)"] +sql = ["numpy (>=1.15,<2)", "pandas (>=1.0.5)", "pyarrow (>=4.0.0)"] + +[[package]] +name = "databricks-sdk" +version = "0.36.0" +description = "Databricks SDK for Python (Beta)" +optional = false +python-versions = ">=3.7" +files = [ + {file = "databricks_sdk-0.36.0-py3-none-any.whl", hash = "sha256:e6105a2752c7980de35f7c7e3c4d63389c0763c9ef7bf7e2813e464acef907e9"}, + {file = "databricks_sdk-0.36.0.tar.gz", hash = "sha256:d8c46348cbd3e0b56991a6b7a59d7a6e0437947f6387bef832e6fe092e2dd427"}, +] + +[package.dependencies] +google-auth = ">=2.0,<3.0" +httpx = {version = "*", optional = true, markers = "extra == \"openai\""} +langchain-openai = {version = "*", optional = true, markers = "python_version > \"3.7\" and extra == \"openai\""} +openai = {version = "*", optional = true, markers = "extra == \"openai\""} +requests = ">=2.28.1,<3" + +[package.extras] +dev = ["autoflake", "databricks-connect", "httpx", "ipython", "ipywidgets", "isort", "langchain-openai", "openai", "pycodestyle", "pyfakefs", "pytest", "pytest-cov", "pytest-mock", "pytest-rerunfailures", "pytest-xdist", "requests-mock", "wheel", "yapf"] +notebook = ["ipython (>=8,<9)", "ipywidgets (>=8,<9)"] +openai = ["httpx", "langchain-openai", "openai"] + +[[package]] +name = "databricks-vectorsearch" +version = "0.42" +description = "Databricks Vector Search Client" +optional = false +python-versions = ">=3.7" +files = [ + {file = "databricks_vectorsearch-0.42-py3-none-any.whl", hash = "sha256:536f4398abaa7fb52ee7bbd80ab84a4988ca32888ec42cb0aa1b37c0525ec339"}, +] + +[package.dependencies] +deprecation = ">=2" +mlflow-skinny = ">=2.11.3,<3" +protobuf = ">=3.12.0,<5" +requests = ">=2" + +[[package]] +name = "debugpy" +version = "1.8.8" +description = "An implementation of the Debug Adapter Protocol for Python" +optional = false +python-versions = ">=3.8" +files = [ + {file = "debugpy-1.8.8-cp310-cp310-macosx_14_0_x86_64.whl", hash = "sha256:e59b1607c51b71545cb3496876544f7186a7a27c00b436a62f285603cc68d1c6"}, + {file = "debugpy-1.8.8-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a6531d952b565b7cb2fbd1ef5df3d333cf160b44f37547a4e7cf73666aca5d8d"}, + {file = "debugpy-1.8.8-cp310-cp310-win32.whl", hash = "sha256:b01f4a5e5c5fb1d34f4ccba99a20ed01eabc45a4684f4948b5db17a319dfb23f"}, + {file = "debugpy-1.8.8-cp310-cp310-win_amd64.whl", hash = "sha256:535f4fb1c024ddca5913bb0eb17880c8f24ba28aa2c225059db145ee557035e9"}, + {file = "debugpy-1.8.8-cp311-cp311-macosx_14_0_universal2.whl", hash = "sha256:c399023146e40ae373753a58d1be0a98bf6397fadc737b97ad612886b53df318"}, + {file = "debugpy-1.8.8-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:09cc7b162586ea2171eea055985da2702b0723f6f907a423c9b2da5996ad67ba"}, + {file = "debugpy-1.8.8-cp311-cp311-win32.whl", hash = "sha256:eea8821d998ebeb02f0625dd0d76839ddde8cbf8152ebbe289dd7acf2cdc6b98"}, + {file = "debugpy-1.8.8-cp311-cp311-win_amd64.whl", hash = "sha256:d4483836da2a533f4b1454dffc9f668096ac0433de855f0c22cdce8c9f7e10c4"}, + {file = "debugpy-1.8.8-cp312-cp312-macosx_14_0_universal2.whl", hash = "sha256:0cc94186340be87b9ac5a707184ec8f36547fb66636d1029ff4f1cc020e53996"}, + {file = "debugpy-1.8.8-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:64674e95916e53c2e9540a056e5f489e0ad4872645399d778f7c598eacb7b7f9"}, + {file = "debugpy-1.8.8-cp312-cp312-win32.whl", hash = "sha256:5c6e885dbf12015aed73770f29dec7023cb310d0dc2ba8bfbeb5c8e43f80edc9"}, + {file = "debugpy-1.8.8-cp312-cp312-win_amd64.whl", hash = "sha256:19ffbd84e757a6ca0113574d1bf5a2298b3947320a3e9d7d8dc3377f02d9f864"}, + {file = "debugpy-1.8.8-cp313-cp313-macosx_14_0_universal2.whl", hash = "sha256:705cd123a773d184860ed8dae99becd879dfec361098edbefb5fc0d3683eb804"}, + {file = "debugpy-1.8.8-cp313-cp313-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:890fd16803f50aa9cb1a9b9b25b5ec321656dd6b78157c74283de241993d086f"}, + {file = "debugpy-1.8.8-cp313-cp313-win32.whl", hash = "sha256:90244598214bbe704aa47556ec591d2f9869ff9e042e301a2859c57106649add"}, + {file = "debugpy-1.8.8-cp313-cp313-win_amd64.whl", hash = "sha256:4b93e4832fd4a759a0c465c967214ed0c8a6e8914bced63a28ddb0dd8c5f078b"}, + {file = "debugpy-1.8.8-cp38-cp38-macosx_14_0_x86_64.whl", hash = "sha256:143ef07940aeb8e7316de48f5ed9447644da5203726fca378f3a6952a50a9eae"}, + {file = "debugpy-1.8.8-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f95651bdcbfd3b27a408869a53fbefcc2bcae13b694daee5f1365b1b83a00113"}, + {file = "debugpy-1.8.8-cp38-cp38-win32.whl", hash = "sha256:26b461123a030e82602a750fb24d7801776aa81cd78404e54ab60e8b5fecdad5"}, + {file = "debugpy-1.8.8-cp38-cp38-win_amd64.whl", hash = "sha256:f3cbf1833e644a3100eadb6120f25be8a532035e8245584c4f7532937edc652a"}, + {file = "debugpy-1.8.8-cp39-cp39-macosx_14_0_x86_64.whl", hash = "sha256:53709d4ec586b525724819dc6af1a7703502f7e06f34ded7157f7b1f963bb854"}, + {file = "debugpy-1.8.8-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3a9c013077a3a0000e83d97cf9cc9328d2b0bbb31f56b0e99ea3662d29d7a6a2"}, + {file = "debugpy-1.8.8-cp39-cp39-win32.whl", hash = "sha256:ffe94dd5e9a6739a75f0b85316dc185560db3e97afa6b215628d1b6a17561cb2"}, + {file = "debugpy-1.8.8-cp39-cp39-win_amd64.whl", hash = "sha256:5c0e5a38c7f9b481bf31277d2f74d2109292179081f11108e668195ef926c0f9"}, + {file = "debugpy-1.8.8-py2.py3-none-any.whl", hash = "sha256:ec684553aba5b4066d4de510859922419febc710df7bba04fe9e7ef3de15d34f"}, + {file = "debugpy-1.8.8.zip", hash = "sha256:e6355385db85cbd666be703a96ab7351bc9e6c61d694893206f8001e22aee091"}, +] + +[[package]] +name = "decorator" +version = "5.1.1" +description = "Decorators for Humans" +optional = false +python-versions = ">=3.5" +files = [ + {file = "decorator-5.1.1-py3-none-any.whl", hash = "sha256:b8c3f85900b9dc423225913c5aace94729fe1fa9763b38939a95226f02d37186"}, + {file = "decorator-5.1.1.tar.gz", hash = "sha256:637996211036b6385ef91435e4fae22989472f9d571faba8927ba8253acbc330"}, +] + +[[package]] +name = "deprecated" +version = "1.2.15" +description = "Python @deprecated decorator to deprecate old python classes, functions or methods." +optional = false +python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,>=2.7" +files = [ + {file = "Deprecated-1.2.15-py2.py3-none-any.whl", hash = "sha256:353bc4a8ac4bfc96800ddab349d89c25dec1079f65fd53acdcc1e0b975b21320"}, + {file = "deprecated-1.2.15.tar.gz", hash = "sha256:683e561a90de76239796e6b6feac66b99030d2dd3fcf61ef996330f14bbb9b0d"}, +] + +[package.dependencies] +wrapt = ">=1.10,<2" + +[package.extras] +dev = ["PyTest", "PyTest-Cov", "bump2version (<1)", "jinja2 (>=3.0.3,<3.1.0)", "setuptools", "sphinx (<2)", "tox"] + +[[package]] +name = "deprecation" +version = "2.1.0" +description = "A library to handle automated deprecations" +optional = false +python-versions = "*" +files = [ + {file = "deprecation-2.1.0-py2.py3-none-any.whl", hash = "sha256:a10811591210e1fb0e768a8c25517cabeabcba6f0bf96564f8ff45189f90b14a"}, + {file = "deprecation-2.1.0.tar.gz", hash = "sha256:72b3bde64e5d778694b0cf68178aed03d15e15477116add3fb773e581f9518ff"}, +] + +[package.dependencies] +packaging = "*" + +[[package]] +name = "distro" +version = "1.9.0" +description = "Distro - an OS platform information API" +optional = false +python-versions = ">=3.6" +files = [ + {file = "distro-1.9.0-py3-none-any.whl", hash = "sha256:7bffd925d65168f85027d8da9af6bddab658135b840670a223589bc0c8ef02b2"}, + {file = "distro-1.9.0.tar.gz", hash = "sha256:2fa77c6fd8940f116ee1d6b94a2f90b13b5ea8d019b98bc8bafdcabcdd9bdbed"}, +] + +[[package]] +name = "docker" +version = "7.1.0" +description = "A Python library for the Docker Engine API." +optional = false +python-versions = ">=3.8" +files = [ + {file = "docker-7.1.0-py3-none-any.whl", hash = "sha256:c96b93b7f0a746f9e77d325bcfb87422a3d8bd4f03136ae8a85b37f1898d5fc0"}, + {file = "docker-7.1.0.tar.gz", hash = "sha256:ad8c70e6e3f8926cb8a92619b832b4ea5299e2831c14284663184e200546fa6c"}, +] + +[package.dependencies] +pywin32 = {version = ">=304", markers = "sys_platform == \"win32\""} +requests = ">=2.26.0" +urllib3 = ">=1.26.0" + +[package.extras] +dev = ["coverage (==7.2.7)", "pytest (==7.4.2)", "pytest-cov (==4.1.0)", "pytest-timeout (==2.1.0)", "ruff (==0.1.8)"] +docs = ["myst-parser (==0.18.0)", "sphinx (==5.1.1)"] +ssh = ["paramiko (>=2.4.3)"] +websockets = ["websocket-client (>=1.3.0)"] + +[[package]] +name = "executing" +version = "2.1.0" +description = "Get the currently executing AST node of a frame, and other information" +optional = false +python-versions = ">=3.8" +files = [ + {file = "executing-2.1.0-py2.py3-none-any.whl", hash = "sha256:8d63781349375b5ebccc3142f4b30350c0cd9c79f921cde38be2be4637e98eaf"}, + {file = "executing-2.1.0.tar.gz", hash = "sha256:8ea27ddd260da8150fa5a708269c4a10e76161e2496ec3e587da9e3c0fe4b9ab"}, +] + +[package.extras] +tests = ["asttokens (>=2.1.0)", "coverage", "coverage-enable-subprocess", "ipython", "littleutils", "pytest", "rich"] + +[[package]] +name = "filelock" +version = "3.16.1" +description = "A platform independent file lock." +optional = false +python-versions = ">=3.8" +files = [ + {file = "filelock-3.16.1-py3-none-any.whl", hash = "sha256:2082e5703d51fbf98ea75855d9d5527e33d8ff23099bec374a134febee6946b0"}, + {file = "filelock-3.16.1.tar.gz", hash = "sha256:c249fbfcd5db47e5e2d6d62198e565475ee65e4831e2561c8e313fa7eb961435"}, +] + +[package.extras] +docs = ["furo (>=2024.8.6)", "sphinx (>=8.0.2)", "sphinx-autodoc-typehints (>=2.4.1)"] +testing = ["covdefaults (>=2.3)", "coverage (>=7.6.1)", "diff-cover (>=9.2)", "pytest (>=8.3.3)", "pytest-asyncio (>=0.24)", "pytest-cov (>=5)", "pytest-mock (>=3.14)", "pytest-timeout (>=2.3.1)", "virtualenv (>=20.26.4)"] +typing = ["typing-extensions (>=4.12.2)"] + +[[package]] +name = "flask" +version = "3.1.0" +description = "A simple framework for building complex web applications." +optional = false +python-versions = ">=3.9" +files = [ + {file = "flask-3.1.0-py3-none-any.whl", hash = "sha256:d667207822eb83f1c4b50949b1623c8fc8d51f2341d65f72e1a1815397551136"}, + {file = "flask-3.1.0.tar.gz", hash = "sha256:5f873c5184c897c8d9d1b05df1e3d01b14910ce69607a117bd3277098a5836ac"}, +] + +[package.dependencies] +blinker = ">=1.9" +click = ">=8.1.3" +itsdangerous = ">=2.2" +Jinja2 = ">=3.1.2" +Werkzeug = ">=3.1" + +[package.extras] +async = ["asgiref (>=3.2)"] +dotenv = ["python-dotenv"] + +[[package]] +name = "fonttools" +version = "4.55.0" +description = "Tools to manipulate font files" +optional = false +python-versions = ">=3.8" +files = [ + {file = "fonttools-4.55.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:51c029d4c0608a21a3d3d169dfc3fb776fde38f00b35ca11fdab63ba10a16f61"}, + {file = "fonttools-4.55.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:bca35b4e411362feab28e576ea10f11268b1aeed883b9f22ed05675b1e06ac69"}, + {file = "fonttools-4.55.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9ce4ba6981e10f7e0ccff6348e9775ce25ffadbee70c9fd1a3737e3e9f5fa74f"}, + {file = "fonttools-4.55.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:31d00f9852a6051dac23294a4cf2df80ced85d1d173a61ba90a3d8f5abc63c60"}, + {file = "fonttools-4.55.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:e198e494ca6e11f254bac37a680473a311a88cd40e58f9cc4dc4911dfb686ec6"}, + {file = "fonttools-4.55.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:7208856f61770895e79732e1dcbe49d77bd5783adf73ae35f87fcc267df9db81"}, + {file = "fonttools-4.55.0-cp310-cp310-win32.whl", hash = "sha256:e7e6a352ff9e46e8ef8a3b1fe2c4478f8a553e1b5a479f2e899f9dc5f2055880"}, + {file = "fonttools-4.55.0-cp310-cp310-win_amd64.whl", hash = "sha256:636caaeefe586d7c84b5ee0734c1a5ab2dae619dc21c5cf336f304ddb8f6001b"}, + {file = "fonttools-4.55.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:fa34aa175c91477485c44ddfbb51827d470011e558dfd5c7309eb31bef19ec51"}, + {file = "fonttools-4.55.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:37dbb3fdc2ef7302d3199fb12468481cbebaee849e4b04bc55b77c24e3c49189"}, + {file = "fonttools-4.55.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b5263d8e7ef3c0ae87fbce7f3ec2f546dc898d44a337e95695af2cd5ea21a967"}, + {file = "fonttools-4.55.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f307f6b5bf9e86891213b293e538d292cd1677e06d9faaa4bf9c086ad5f132f6"}, + {file = "fonttools-4.55.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:f0a4b52238e7b54f998d6a56b46a2c56b59c74d4f8a6747fb9d4042190f37cd3"}, + {file = "fonttools-4.55.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:3e569711464f777a5d4ef522e781dc33f8095ab5efd7548958b36079a9f2f88c"}, + {file = "fonttools-4.55.0-cp311-cp311-win32.whl", hash = "sha256:2b3ab90ec0f7b76c983950ac601b58949f47aca14c3f21eed858b38d7ec42b05"}, + {file = "fonttools-4.55.0-cp311-cp311-win_amd64.whl", hash = "sha256:aa046f6a63bb2ad521004b2769095d4c9480c02c1efa7d7796b37826508980b6"}, + {file = "fonttools-4.55.0-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:838d2d8870f84fc785528a692e724f2379d5abd3fc9dad4d32f91cf99b41e4a7"}, + {file = "fonttools-4.55.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:f46b863d74bab7bb0d395f3b68d3f52a03444964e67ce5c43ce43a75efce9246"}, + {file = "fonttools-4.55.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:33b52a9cfe4e658e21b1f669f7309b4067910321757fec53802ca8f6eae96a5a"}, + {file = "fonttools-4.55.0-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:732a9a63d6ea4a81b1b25a1f2e5e143761b40c2e1b79bb2b68e4893f45139a40"}, + {file = "fonttools-4.55.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:7dd91ac3fcb4c491bb4763b820bcab6c41c784111c24172616f02f4bc227c17d"}, + {file = "fonttools-4.55.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:1f0e115281a32ff532118aa851ef497a1b7cda617f4621c1cdf81ace3e36fb0c"}, + {file = "fonttools-4.55.0-cp312-cp312-win32.whl", hash = "sha256:6c99b5205844f48a05cb58d4a8110a44d3038c67ed1d79eb733c4953c628b0f6"}, + {file = "fonttools-4.55.0-cp312-cp312-win_amd64.whl", hash = "sha256:f8c8c76037d05652510ae45be1cd8fb5dd2fd9afec92a25374ac82255993d57c"}, + {file = "fonttools-4.55.0-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:8118dc571921dc9e4b288d9cb423ceaf886d195a2e5329cc427df82bba872cd9"}, + {file = "fonttools-4.55.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:01124f2ca6c29fad4132d930da69158d3f49b2350e4a779e1efbe0e82bd63f6c"}, + {file = "fonttools-4.55.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:81ffd58d2691f11f7c8438796e9f21c374828805d33e83ff4b76e4635633674c"}, + {file = "fonttools-4.55.0-cp313-cp313-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5435e5f1eb893c35c2bc2b9cd3c9596b0fcb0a59e7a14121562986dd4c47b8dd"}, + {file = "fonttools-4.55.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:d12081729280c39d001edd0f4f06d696014c26e6e9a0a55488fabc37c28945e4"}, + {file = "fonttools-4.55.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:a7ad1f1b98ab6cb927ab924a38a8649f1ffd7525c75fe5b594f5dab17af70e18"}, + {file = "fonttools-4.55.0-cp313-cp313-win32.whl", hash = "sha256:abe62987c37630dca69a104266277216de1023cf570c1643bb3a19a9509e7a1b"}, + {file = "fonttools-4.55.0-cp313-cp313-win_amd64.whl", hash = "sha256:2863555ba90b573e4201feaf87a7e71ca3b97c05aa4d63548a4b69ea16c9e998"}, + {file = "fonttools-4.55.0-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:00f7cf55ad58a57ba421b6a40945b85ac7cc73094fb4949c41171d3619a3a47e"}, + {file = "fonttools-4.55.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:f27526042efd6f67bfb0cc2f1610fa20364396f8b1fc5edb9f45bb815fb090b2"}, + {file = "fonttools-4.55.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e8e67974326af6a8879dc2a4ec63ab2910a1c1a9680ccd63e4a690950fceddbe"}, + {file = "fonttools-4.55.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:61dc0a13451143c5e987dec5254d9d428f3c2789a549a7cf4f815b63b310c1cc"}, + {file = "fonttools-4.55.0-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:b2e526b325a903868c62155a6a7e24df53f6ce4c5c3160214d8fe1be2c41b478"}, + {file = "fonttools-4.55.0-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:b7ef9068a1297714e6fefe5932c33b058aa1d45a2b8be32a4c6dee602ae22b5c"}, + {file = "fonttools-4.55.0-cp38-cp38-win32.whl", hash = "sha256:55718e8071be35dff098976bc249fc243b58efa263768c611be17fe55975d40a"}, + {file = "fonttools-4.55.0-cp38-cp38-win_amd64.whl", hash = "sha256:553bd4f8cc327f310c20158e345e8174c8eed49937fb047a8bda51daf2c353c8"}, + {file = "fonttools-4.55.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:3f901cef813f7c318b77d1c5c14cf7403bae5cb977cede023e22ba4316f0a8f6"}, + {file = "fonttools-4.55.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:8c9679fc0dd7e8a5351d321d8d29a498255e69387590a86b596a45659a39eb0d"}, + {file = "fonttools-4.55.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:dd2820a8b632f3307ebb0bf57948511c2208e34a4939cf978333bc0a3f11f838"}, + {file = "fonttools-4.55.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:23bbbb49bec613a32ed1b43df0f2b172313cee690c2509f1af8fdedcf0a17438"}, + {file = "fonttools-4.55.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:a656652e1f5d55b9728937a7e7d509b73d23109cddd4e89ee4f49bde03b736c6"}, + {file = "fonttools-4.55.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:f50a1f455902208486fbca47ce33054208a4e437b38da49d6721ce2fef732fcf"}, + {file = "fonttools-4.55.0-cp39-cp39-win32.whl", hash = "sha256:161d1ac54c73d82a3cded44202d0218ab007fde8cf194a23d3dd83f7177a2f03"}, + {file = "fonttools-4.55.0-cp39-cp39-win_amd64.whl", hash = "sha256:ca7fd6987c68414fece41c96836e945e1f320cda56fc96ffdc16e54a44ec57a2"}, + {file = "fonttools-4.55.0-py3-none-any.whl", hash = "sha256:12db5888cd4dd3fcc9f0ee60c6edd3c7e1fd44b7dd0f31381ea03df68f8a153f"}, + {file = "fonttools-4.55.0.tar.gz", hash = "sha256:7636acc6ab733572d5e7eec922b254ead611f1cdad17be3f0be7418e8bfaca71"}, +] + +[package.extras] +all = ["brotli (>=1.0.1)", "brotlicffi (>=0.8.0)", "fs (>=2.2.0,<3)", "lxml (>=4.0)", "lz4 (>=1.7.4.2)", "matplotlib", "munkres", "pycairo", "scipy", "skia-pathops (>=0.5.0)", "sympy", "uharfbuzz (>=0.23.0)", "unicodedata2 (>=15.1.0)", "xattr", "zopfli (>=0.1.4)"] +graphite = ["lz4 (>=1.7.4.2)"] +interpolatable = ["munkres", "pycairo", "scipy"] +lxml = ["lxml (>=4.0)"] +pathops = ["skia-pathops (>=0.5.0)"] +plot = ["matplotlib"] +repacker = ["uharfbuzz (>=0.23.0)"] +symfont = ["sympy"] +type1 = ["xattr"] +ufo = ["fs (>=2.2.0,<3)"] +unicode = ["unicodedata2 (>=15.1.0)"] +woff = ["brotli (>=1.0.1)", "brotlicffi (>=0.8.0)", "zopfli (>=0.1.4)"] + +[[package]] +name = "fsspec" +version = "2024.10.0" +description = "File-system specification" +optional = false +python-versions = ">=3.8" +files = [ + {file = "fsspec-2024.10.0-py3-none-any.whl", hash = "sha256:03b9a6785766a4de40368b88906366755e2819e758b83705c88cd7cb5fe81871"}, + {file = "fsspec-2024.10.0.tar.gz", hash = "sha256:eda2d8a4116d4f2429db8550f2457da57279247dd930bb12f821b58391359493"}, +] + +[package.extras] +abfs = ["adlfs"] +adl = ["adlfs"] +arrow = ["pyarrow (>=1)"] +dask = ["dask", "distributed"] +dev = ["pre-commit", "ruff"] +doc = ["numpydoc", "sphinx", "sphinx-design", "sphinx-rtd-theme", "yarl"] +dropbox = ["dropbox", "dropboxdrivefs", "requests"] +full = ["adlfs", "aiohttp (!=4.0.0a0,!=4.0.0a1)", "dask", "distributed", "dropbox", "dropboxdrivefs", "fusepy", "gcsfs", "libarchive-c", "ocifs", "panel", "paramiko", "pyarrow (>=1)", "pygit2", "requests", "s3fs", "smbprotocol", "tqdm"] +fuse = ["fusepy"] +gcs = ["gcsfs"] +git = ["pygit2"] +github = ["requests"] +gs = ["gcsfs"] +gui = ["panel"] +hdfs = ["pyarrow (>=1)"] +http = ["aiohttp (!=4.0.0a0,!=4.0.0a1)"] +libarchive = ["libarchive-c"] +oci = ["ocifs"] +s3 = ["s3fs"] +sftp = ["paramiko"] +smb = ["smbprotocol"] +ssh = ["paramiko"] +test = ["aiohttp (!=4.0.0a0,!=4.0.0a1)", "numpy", "pytest", "pytest-asyncio (!=0.22.0)", "pytest-benchmark", "pytest-cov", "pytest-mock", "pytest-recording", "pytest-rerunfailures", "requests"] +test-downstream = ["aiobotocore (>=2.5.4,<3.0.0)", "dask-expr", "dask[dataframe,test]", "moto[server] (>4,<5)", "pytest-timeout", "xarray"] +test-full = ["adlfs", "aiohttp (!=4.0.0a0,!=4.0.0a1)", "cloudpickle", "dask", "distributed", "dropbox", "dropboxdrivefs", "fastparquet", "fusepy", "gcsfs", "jinja2", "kerchunk", "libarchive-c", "lz4", "notebook", "numpy", "ocifs", "pandas", "panel", "paramiko", "pyarrow", "pyarrow (>=1)", "pyftpdlib", "pygit2", "pytest", "pytest-asyncio (!=0.22.0)", "pytest-benchmark", "pytest-cov", "pytest-mock", "pytest-recording", "pytest-rerunfailures", "python-snappy", "requests", "smbprotocol", "tqdm", "urllib3", "zarr", "zstandard"] +tqdm = ["tqdm"] + +[[package]] +name = "gitdb" +version = "4.0.11" +description = "Git Object Database" +optional = false +python-versions = ">=3.7" +files = [ + {file = "gitdb-4.0.11-py3-none-any.whl", hash = "sha256:81a3407ddd2ee8df444cbacea00e2d038e40150acfa3001696fe0dcf1d3adfa4"}, + {file = "gitdb-4.0.11.tar.gz", hash = "sha256:bf5421126136d6d0af55bc1e7c1af1c397a34f5b7bd79e776cd3e89785c2b04b"}, +] + +[package.dependencies] +smmap = ">=3.0.1,<6" + +[[package]] +name = "gitpython" +version = "3.1.43" +description = "GitPython is a Python library used to interact with Git repositories" +optional = false +python-versions = ">=3.7" +files = [ + {file = "GitPython-3.1.43-py3-none-any.whl", hash = "sha256:eec7ec56b92aad751f9912a73404bc02ba212a23adb2c7098ee668417051a1ff"}, + {file = "GitPython-3.1.43.tar.gz", hash = "sha256:35f314a9f878467f5453cc1fee295c3e18e52f1b99f10f6cf5b1682e968a9e7c"}, +] + +[package.dependencies] +gitdb = ">=4.0.1,<5" + +[package.extras] +doc = ["sphinx (==4.3.2)", "sphinx-autodoc-typehints", "sphinx-rtd-theme", "sphinxcontrib-applehelp (>=1.0.2,<=1.0.4)", "sphinxcontrib-devhelp (==1.0.2)", "sphinxcontrib-htmlhelp (>=2.0.0,<=2.0.1)", "sphinxcontrib-qthelp (==1.0.3)", "sphinxcontrib-serializinghtml (==1.1.5)"] +test = ["coverage[toml]", "ddt (>=1.1.1,!=1.4.3)", "mock", "mypy", "pre-commit", "pytest (>=7.3.1)", "pytest-cov", "pytest-instafail", "pytest-mock", "pytest-sugar", "typing-extensions"] + +[[package]] +name = "google-api-core" +version = "2.23.0" +description = "Google API client core library" +optional = false +python-versions = ">=3.7" +files = [ + {file = "google_api_core-2.23.0-py3-none-any.whl", hash = "sha256:c20100d4c4c41070cf365f1d8ddf5365915291b5eb11b83829fbd1c999b5122f"}, + {file = "google_api_core-2.23.0.tar.gz", hash = "sha256:2ceb087315e6af43f256704b871d99326b1f12a9d6ce99beaedec99ba26a0ace"}, +] + +[package.dependencies] +google-auth = ">=2.14.1,<3.0.dev0" +googleapis-common-protos = ">=1.56.2,<2.0.dev0" +proto-plus = [ + {version = ">=1.25.0,<2.0.0dev", markers = "python_version >= \"3.13\""}, + {version = ">=1.22.3,<2.0.0dev", markers = "python_version < \"3.13\""}, +] +protobuf = ">=3.19.5,<3.20.0 || >3.20.0,<3.20.1 || >3.20.1,<4.21.0 || >4.21.0,<4.21.1 || >4.21.1,<4.21.2 || >4.21.2,<4.21.3 || >4.21.3,<4.21.4 || >4.21.4,<4.21.5 || >4.21.5,<6.0.0.dev0" +requests = ">=2.18.0,<3.0.0.dev0" + +[package.extras] +async-rest = ["google-auth[aiohttp] (>=2.35.0,<3.0.dev0)"] +grpc = ["grpcio (>=1.33.2,<2.0dev)", "grpcio (>=1.49.1,<2.0dev)", "grpcio-status (>=1.33.2,<2.0.dev0)", "grpcio-status (>=1.49.1,<2.0.dev0)"] +grpcgcp = ["grpcio-gcp (>=0.2.2,<1.0.dev0)"] +grpcio-gcp = ["grpcio-gcp (>=0.2.2,<1.0.dev0)"] + +[[package]] +name = "google-auth" +version = "2.36.0" +description = "Google Authentication Library" +optional = false +python-versions = ">=3.7" +files = [ + {file = "google_auth-2.36.0-py2.py3-none-any.whl", hash = "sha256:51a15d47028b66fd36e5c64a82d2d57480075bccc7da37cde257fc94177a61fb"}, + {file = "google_auth-2.36.0.tar.gz", hash = "sha256:545e9618f2df0bcbb7dcbc45a546485b1212624716975a1ea5ae8149ce769ab1"}, +] + +[package.dependencies] +cachetools = ">=2.0.0,<6.0" +pyasn1-modules = ">=0.2.1" +rsa = ">=3.1.4,<5" + +[package.extras] +aiohttp = ["aiohttp (>=3.6.2,<4.0.0.dev0)", "requests (>=2.20.0,<3.0.0.dev0)"] +enterprise-cert = ["cryptography", "pyopenssl"] +pyopenssl = ["cryptography (>=38.0.3)", "pyopenssl (>=20.0.0)"] +reauth = ["pyu2f (>=0.1.5)"] +requests = ["requests (>=2.20.0,<3.0.0.dev0)"] + +[[package]] +name = "google-cloud-core" +version = "2.4.1" +description = "Google Cloud API client core library" +optional = false +python-versions = ">=3.7" +files = [ + {file = "google-cloud-core-2.4.1.tar.gz", hash = "sha256:9b7749272a812bde58fff28868d0c5e2f585b82f37e09a1f6ed2d4d10f134073"}, + {file = "google_cloud_core-2.4.1-py2.py3-none-any.whl", hash = "sha256:a9e6a4422b9ac5c29f79a0ede9485473338e2ce78d91f2370c01e730eab22e61"}, +] + +[package.dependencies] +google-api-core = ">=1.31.6,<2.0.dev0 || >2.3.0,<3.0.0dev" +google-auth = ">=1.25.0,<3.0dev" + +[package.extras] +grpc = ["grpcio (>=1.38.0,<2.0dev)", "grpcio-status (>=1.38.0,<2.0.dev0)"] + +[[package]] +name = "google-cloud-storage" +version = "2.18.2" +description = "Google Cloud Storage API client library" +optional = false +python-versions = ">=3.7" +files = [ + {file = "google_cloud_storage-2.18.2-py2.py3-none-any.whl", hash = "sha256:97a4d45c368b7d401ed48c4fdfe86e1e1cb96401c9e199e419d289e2c0370166"}, + {file = "google_cloud_storage-2.18.2.tar.gz", hash = "sha256:aaf7acd70cdad9f274d29332673fcab98708d0e1f4dceb5a5356aaef06af4d99"}, +] + +[package.dependencies] +google-api-core = ">=2.15.0,<3.0.0dev" +google-auth = ">=2.26.1,<3.0dev" +google-cloud-core = ">=2.3.0,<3.0dev" +google-crc32c = ">=1.0,<2.0dev" +google-resumable-media = ">=2.7.2" +requests = ">=2.18.0,<3.0.0dev" + +[package.extras] +protobuf = ["protobuf (<6.0.0dev)"] +tracing = ["opentelemetry-api (>=1.1.0)"] + +[[package]] +name = "google-crc32c" +version = "1.6.0" +description = "A python wrapper of the C library 'Google CRC32C'" +optional = false +python-versions = ">=3.9" +files = [ + {file = "google_crc32c-1.6.0-cp310-cp310-macosx_12_0_arm64.whl", hash = "sha256:5bcc90b34df28a4b38653c36bb5ada35671ad105c99cfe915fb5bed7ad6924aa"}, + {file = "google_crc32c-1.6.0-cp310-cp310-macosx_12_0_x86_64.whl", hash = "sha256:d9e9913f7bd69e093b81da4535ce27af842e7bf371cde42d1ae9e9bd382dc0e9"}, + {file = "google_crc32c-1.6.0-cp310-cp310-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:a184243544811e4a50d345838a883733461e67578959ac59964e43cca2c791e7"}, + {file = "google_crc32c-1.6.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:236c87a46cdf06384f614e9092b82c05f81bd34b80248021f729396a78e55d7e"}, + {file = "google_crc32c-1.6.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ebab974b1687509e5c973b5c4b8b146683e101e102e17a86bd196ecaa4d099fc"}, + {file = "google_crc32c-1.6.0-cp310-cp310-win_amd64.whl", hash = "sha256:50cf2a96da226dcbff8671233ecf37bf6e95de98b2a2ebadbfdf455e6d05df42"}, + {file = "google_crc32c-1.6.0-cp311-cp311-macosx_12_0_arm64.whl", hash = "sha256:f7a1fc29803712f80879b0806cb83ab24ce62fc8daf0569f2204a0cfd7f68ed4"}, + {file = "google_crc32c-1.6.0-cp311-cp311-macosx_12_0_x86_64.whl", hash = "sha256:40b05ab32a5067525670880eb5d169529089a26fe35dce8891127aeddc1950e8"}, + {file = "google_crc32c-1.6.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a9e4b426c3702f3cd23b933436487eb34e01e00327fac20c9aebb68ccf34117d"}, + {file = "google_crc32c-1.6.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:51c4f54dd8c6dfeb58d1df5e4f7f97df8abf17a36626a217f169893d1d7f3e9f"}, + {file = "google_crc32c-1.6.0-cp311-cp311-win_amd64.whl", hash = "sha256:bb8b3c75bd157010459b15222c3fd30577042a7060e29d42dabce449c087f2b3"}, + {file = "google_crc32c-1.6.0-cp312-cp312-macosx_12_0_arm64.whl", hash = "sha256:ed767bf4ba90104c1216b68111613f0d5926fb3780660ea1198fc469af410e9d"}, + {file = "google_crc32c-1.6.0-cp312-cp312-macosx_12_0_x86_64.whl", hash = "sha256:62f6d4a29fea082ac4a3c9be5e415218255cf11684ac6ef5488eea0c9132689b"}, + {file = "google_crc32c-1.6.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c87d98c7c4a69066fd31701c4e10d178a648c2cac3452e62c6b24dc51f9fcc00"}, + {file = "google_crc32c-1.6.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bd5e7d2445d1a958c266bfa5d04c39932dc54093fa391736dbfdb0f1929c1fb3"}, + {file = "google_crc32c-1.6.0-cp312-cp312-win_amd64.whl", hash = "sha256:7aec8e88a3583515f9e0957fe4f5f6d8d4997e36d0f61624e70469771584c760"}, + {file = "google_crc32c-1.6.0-cp39-cp39-macosx_12_0_arm64.whl", hash = "sha256:e2806553238cd076f0a55bddab37a532b53580e699ed8e5606d0de1f856b5205"}, + {file = "google_crc32c-1.6.0-cp39-cp39-macosx_12_0_x86_64.whl", hash = "sha256:bb0966e1c50d0ef5bc743312cc730b533491d60585a9a08f897274e57c3f70e0"}, + {file = "google_crc32c-1.6.0-cp39-cp39-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:386122eeaaa76951a8196310432c5b0ef3b53590ef4c317ec7588ec554fec5d2"}, + {file = "google_crc32c-1.6.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d2952396dc604544ea7476b33fe87faedc24d666fb0c2d5ac971a2b9576ab871"}, + {file = "google_crc32c-1.6.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:35834855408429cecf495cac67ccbab802de269e948e27478b1e47dfb6465e57"}, + {file = "google_crc32c-1.6.0-cp39-cp39-win_amd64.whl", hash = "sha256:d8797406499f28b5ef791f339594b0b5fdedf54e203b5066675c406ba69d705c"}, + {file = "google_crc32c-1.6.0-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:48abd62ca76a2cbe034542ed1b6aee851b6f28aaca4e6551b5599b6f3ef175cc"}, + {file = "google_crc32c-1.6.0-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:18e311c64008f1f1379158158bb3f0c8d72635b9eb4f9545f8cf990c5668e59d"}, + {file = "google_crc32c-1.6.0-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:05e2d8c9a2f853ff116db9706b4a27350587f341eda835f46db3c0a8c8ce2f24"}, + {file = "google_crc32c-1.6.0-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:91ca8145b060679ec9176e6de4f89b07363d6805bd4760631ef254905503598d"}, + {file = "google_crc32c-1.6.0.tar.gz", hash = "sha256:6eceb6ad197656a1ff49ebfbbfa870678c75be4344feb35ac1edf694309413dc"}, +] + +[package.extras] +testing = ["pytest"] + +[[package]] +name = "google-resumable-media" +version = "2.7.2" +description = "Utilities for Google Media Downloads and Resumable Uploads" +optional = false +python-versions = ">=3.7" +files = [ + {file = "google_resumable_media-2.7.2-py2.py3-none-any.whl", hash = "sha256:3ce7551e9fe6d99e9a126101d2536612bb73486721951e9562fee0f90c6ababa"}, + {file = "google_resumable_media-2.7.2.tar.gz", hash = "sha256:5280aed4629f2b60b847b0d42f9857fd4935c11af266744df33d8074cae92fe0"}, +] + +[package.dependencies] +google-crc32c = ">=1.0,<2.0dev" + +[package.extras] +aiohttp = ["aiohttp (>=3.6.2,<4.0.0dev)", "google-auth (>=1.22.0,<2.0dev)"] +requests = ["requests (>=2.18.0,<3.0.0dev)"] + +[[package]] +name = "googleapis-common-protos" +version = "1.66.0" +description = "Common protobufs used in Google APIs" +optional = false +python-versions = ">=3.7" +files = [ + {file = "googleapis_common_protos-1.66.0-py2.py3-none-any.whl", hash = "sha256:d7abcd75fabb2e0ec9f74466401f6c119a0b498e27370e9be4c94cb7e382b8ed"}, + {file = "googleapis_common_protos-1.66.0.tar.gz", hash = "sha256:c3e7b33d15fdca5374cc0a7346dd92ffa847425cc4ea941d970f13680052ec8c"}, +] + +[package.dependencies] +protobuf = ">=3.20.2,<4.21.1 || >4.21.1,<4.21.2 || >4.21.2,<4.21.3 || >4.21.3,<4.21.4 || >4.21.4,<4.21.5 || >4.21.5,<6.0.0.dev0" + +[package.extras] +grpc = ["grpcio (>=1.44.0,<2.0.0.dev0)"] + +[[package]] +name = "graphene" +version = "3.4.3" +description = "GraphQL Framework for Python" +optional = false +python-versions = "*" +files = [ + {file = "graphene-3.4.3-py2.py3-none-any.whl", hash = "sha256:820db6289754c181007a150db1f7fff544b94142b556d12e3ebc777a7bf36c71"}, + {file = "graphene-3.4.3.tar.gz", hash = "sha256:2a3786948ce75fe7e078443d37f609cbe5bb36ad8d6b828740ad3b95ed1a0aaa"}, +] + +[package.dependencies] +graphql-core = ">=3.1,<3.3" +graphql-relay = ">=3.1,<3.3" +python-dateutil = ">=2.7.0,<3" +typing-extensions = ">=4.7.1,<5" + +[package.extras] +dev = ["coveralls (>=3.3,<5)", "mypy (>=1.10,<2)", "pytest (>=8,<9)", "pytest-asyncio (>=0.16,<2)", "pytest-benchmark (>=4,<5)", "pytest-cov (>=5,<6)", "pytest-mock (>=3,<4)", "ruff (==0.5.0)", "types-python-dateutil (>=2.8.1,<3)"] +test = ["coveralls (>=3.3,<5)", "pytest (>=8,<9)", "pytest-asyncio (>=0.16,<2)", "pytest-benchmark (>=4,<5)", "pytest-cov (>=5,<6)", "pytest-mock (>=3,<4)"] + +[[package]] +name = "graphql-core" +version = "3.2.5" +description = "GraphQL implementation for Python, a port of GraphQL.js, the JavaScript reference implementation for GraphQL." +optional = false +python-versions = "<4,>=3.6" +files = [ + {file = "graphql_core-3.2.5-py3-none-any.whl", hash = "sha256:2f150d5096448aa4f8ab26268567bbfeef823769893b39c1a2e1409590939c8a"}, + {file = "graphql_core-3.2.5.tar.gz", hash = "sha256:e671b90ed653c808715645e3998b7ab67d382d55467b7e2978549111bbabf8d5"}, +] + +[[package]] +name = "graphql-relay" +version = "3.2.0" +description = "Relay library for graphql-core" +optional = false +python-versions = ">=3.6,<4" +files = [ + {file = "graphql-relay-3.2.0.tar.gz", hash = "sha256:1ff1c51298356e481a0be009ccdff249832ce53f30559c1338f22a0e0d17250c"}, + {file = "graphql_relay-3.2.0-py3-none-any.whl", hash = "sha256:c9b22bd28b170ba1fe674c74384a8ff30a76c8e26f88ac3aa1584dd3179953e5"}, +] + +[package.dependencies] +graphql-core = ">=3.2,<3.3" + +[[package]] +name = "greenlet" +version = "3.1.1" +description = "Lightweight in-process concurrent programming" +optional = false +python-versions = ">=3.7" +files = [ + {file = "greenlet-3.1.1-cp310-cp310-macosx_11_0_universal2.whl", hash = "sha256:0bbae94a29c9e5c7e4a2b7f0aae5c17e8e90acbfd3bf6270eeba60c39fce3563"}, + {file = "greenlet-3.1.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0fde093fb93f35ca72a556cf72c92ea3ebfda3d79fc35bb19fbe685853869a83"}, + {file = "greenlet-3.1.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:36b89d13c49216cadb828db8dfa6ce86bbbc476a82d3a6c397f0efae0525bdd0"}, + {file = "greenlet-3.1.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:94b6150a85e1b33b40b1464a3f9988dcc5251d6ed06842abff82e42632fac120"}, + {file = "greenlet-3.1.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:93147c513fac16385d1036b7e5b102c7fbbdb163d556b791f0f11eada7ba65dc"}, + {file = "greenlet-3.1.1-cp310-cp310-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:da7a9bff22ce038e19bf62c4dd1ec8391062878710ded0a845bcf47cc0200617"}, + {file = "greenlet-3.1.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:b2795058c23988728eec1f36a4e5e4ebad22f8320c85f3587b539b9ac84128d7"}, + {file = "greenlet-3.1.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:ed10eac5830befbdd0c32f83e8aa6288361597550ba669b04c48f0f9a2c843c6"}, + {file = "greenlet-3.1.1-cp310-cp310-win_amd64.whl", hash = "sha256:77c386de38a60d1dfb8e55b8c1101d68c79dfdd25c7095d51fec2dd800892b80"}, + {file = "greenlet-3.1.1-cp311-cp311-macosx_11_0_universal2.whl", hash = "sha256:e4d333e558953648ca09d64f13e6d8f0523fa705f51cae3f03b5983489958c70"}, + {file = "greenlet-3.1.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:09fc016b73c94e98e29af67ab7b9a879c307c6731a2c9da0db5a7d9b7edd1159"}, + {file = "greenlet-3.1.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d5e975ca70269d66d17dd995dafc06f1b06e8cb1ec1e9ed54c1d1e4a7c4cf26e"}, + {file = "greenlet-3.1.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3b2813dc3de8c1ee3f924e4d4227999285fd335d1bcc0d2be6dc3f1f6a318ec1"}, + {file = "greenlet-3.1.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e347b3bfcf985a05e8c0b7d462ba6f15b1ee1c909e2dcad795e49e91b152c383"}, + {file = "greenlet-3.1.1-cp311-cp311-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:9e8f8c9cb53cdac7ba9793c276acd90168f416b9ce36799b9b885790f8ad6c0a"}, + {file = "greenlet-3.1.1-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:62ee94988d6b4722ce0028644418d93a52429e977d742ca2ccbe1c4f4a792511"}, + {file = "greenlet-3.1.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:1776fd7f989fc6b8d8c8cb8da1f6b82c5814957264d1f6cf818d475ec2bf6395"}, + {file = "greenlet-3.1.1-cp311-cp311-win_amd64.whl", hash = "sha256:48ca08c771c268a768087b408658e216133aecd835c0ded47ce955381105ba39"}, + {file = "greenlet-3.1.1-cp312-cp312-macosx_11_0_universal2.whl", hash = "sha256:4afe7ea89de619adc868e087b4d2359282058479d7cfb94970adf4b55284574d"}, + {file = "greenlet-3.1.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f406b22b7c9a9b4f8aa9d2ab13d6ae0ac3e85c9a809bd590ad53fed2bf70dc79"}, + {file = "greenlet-3.1.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c3a701fe5a9695b238503ce5bbe8218e03c3bcccf7e204e455e7462d770268aa"}, + {file = "greenlet-3.1.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2846930c65b47d70b9d178e89c7e1a69c95c1f68ea5aa0a58646b7a96df12441"}, + {file = "greenlet-3.1.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:99cfaa2110534e2cf3ba31a7abcac9d328d1d9f1b95beede58294a60348fba36"}, + {file = "greenlet-3.1.1-cp312-cp312-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:1443279c19fca463fc33e65ef2a935a5b09bb90f978beab37729e1c3c6c25fe9"}, + {file = "greenlet-3.1.1-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:b7cede291382a78f7bb5f04a529cb18e068dd29e0fb27376074b6d0317bf4dd0"}, + {file = "greenlet-3.1.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:23f20bb60ae298d7d8656c6ec6db134bca379ecefadb0b19ce6f19d1f232a942"}, + {file = "greenlet-3.1.1-cp312-cp312-win_amd64.whl", hash = "sha256:7124e16b4c55d417577c2077be379514321916d5790fa287c9ed6f23bd2ffd01"}, + {file = "greenlet-3.1.1-cp313-cp313-macosx_11_0_universal2.whl", hash = "sha256:05175c27cb459dcfc05d026c4232f9de8913ed006d42713cb8a5137bd49375f1"}, + {file = "greenlet-3.1.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:935e943ec47c4afab8965954bf49bfa639c05d4ccf9ef6e924188f762145c0ff"}, + {file = "greenlet-3.1.1-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:667a9706c970cb552ede35aee17339a18e8f2a87a51fba2ed39ceeeb1004798a"}, + {file = "greenlet-3.1.1-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b8a678974d1f3aa55f6cc34dc480169d58f2e6d8958895d68845fa4ab566509e"}, + {file = "greenlet-3.1.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:efc0f674aa41b92da8c49e0346318c6075d734994c3c4e4430b1c3f853e498e4"}, + {file = "greenlet-3.1.1-cp313-cp313-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:0153404a4bb921f0ff1abeb5ce8a5131da56b953eda6e14b88dc6bbc04d2049e"}, + {file = "greenlet-3.1.1-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:275f72decf9932639c1c6dd1013a1bc266438eb32710016a1c742df5da6e60a1"}, + {file = "greenlet-3.1.1-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:c4aab7f6381f38a4b42f269057aee279ab0fc7bf2e929e3d4abfae97b682a12c"}, + {file = "greenlet-3.1.1-cp313-cp313-win_amd64.whl", hash = "sha256:b42703b1cf69f2aa1df7d1030b9d77d3e584a70755674d60e710f0af570f3761"}, + {file = "greenlet-3.1.1-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f1695e76146579f8c06c1509c7ce4dfe0706f49c6831a817ac04eebb2fd02011"}, + {file = "greenlet-3.1.1-cp313-cp313t-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:7876452af029456b3f3549b696bb36a06db7c90747740c5302f74a9e9fa14b13"}, + {file = "greenlet-3.1.1-cp313-cp313t-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4ead44c85f8ab905852d3de8d86f6f8baf77109f9da589cb4fa142bd3b57b475"}, + {file = "greenlet-3.1.1-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8320f64b777d00dd7ccdade271eaf0cad6636343293a25074cc5566160e4de7b"}, + {file = "greenlet-3.1.1-cp313-cp313t-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:6510bf84a6b643dabba74d3049ead221257603a253d0a9873f55f6a59a65f822"}, + {file = "greenlet-3.1.1-cp313-cp313t-musllinux_1_1_aarch64.whl", hash = "sha256:04b013dc07c96f83134b1e99888e7a79979f1a247e2a9f59697fa14b5862ed01"}, + {file = "greenlet-3.1.1-cp313-cp313t-musllinux_1_1_x86_64.whl", hash = "sha256:411f015496fec93c1c8cd4e5238da364e1da7a124bcb293f085bf2860c32c6f6"}, + {file = "greenlet-3.1.1-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:47da355d8687fd65240c364c90a31569a133b7b60de111c255ef5b606f2ae291"}, + {file = "greenlet-3.1.1-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:98884ecf2ffb7d7fe6bd517e8eb99d31ff7855a840fa6d0d63cd07c037f6a981"}, + {file = "greenlet-3.1.1-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f1d4aeb8891338e60d1ab6127af1fe45def5259def8094b9c7e34690c8858803"}, + {file = "greenlet-3.1.1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:db32b5348615a04b82240cc67983cb315309e88d444a288934ee6ceaebcad6cc"}, + {file = "greenlet-3.1.1-cp37-cp37m-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:dcc62f31eae24de7f8dce72134c8651c58000d3b1868e01392baea7c32c247de"}, + {file = "greenlet-3.1.1-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:1d3755bcb2e02de341c55b4fca7a745a24a9e7212ac953f6b3a48d117d7257aa"}, + {file = "greenlet-3.1.1-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:b8da394b34370874b4572676f36acabac172602abf054cbc4ac910219f3340af"}, + {file = "greenlet-3.1.1-cp37-cp37m-win32.whl", hash = "sha256:a0dfc6c143b519113354e780a50381508139b07d2177cb6ad6a08278ec655798"}, + {file = "greenlet-3.1.1-cp37-cp37m-win_amd64.whl", hash = "sha256:54558ea205654b50c438029505def3834e80f0869a70fb15b871c29b4575ddef"}, + {file = "greenlet-3.1.1-cp38-cp38-macosx_11_0_universal2.whl", hash = "sha256:346bed03fe47414091be4ad44786d1bd8bef0c3fcad6ed3dee074a032ab408a9"}, + {file = "greenlet-3.1.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:dfc59d69fc48664bc693842bd57acfdd490acafda1ab52c7836e3fc75c90a111"}, + {file = "greenlet-3.1.1-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d21e10da6ec19b457b82636209cbe2331ff4306b54d06fa04b7c138ba18c8a81"}, + {file = "greenlet-3.1.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:37b9de5a96111fc15418819ab4c4432e4f3c2ede61e660b1e33971eba26ef9ba"}, + {file = "greenlet-3.1.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6ef9ea3f137e5711f0dbe5f9263e8c009b7069d8a1acea822bd5e9dae0ae49c8"}, + {file = "greenlet-3.1.1-cp38-cp38-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:85f3ff71e2e60bd4b4932a043fbbe0f499e263c628390b285cb599154a3b03b1"}, + {file = "greenlet-3.1.1-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:95ffcf719966dd7c453f908e208e14cde192e09fde6c7186c8f1896ef778d8cd"}, + {file = "greenlet-3.1.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:03a088b9de532cbfe2ba2034b2b85e82df37874681e8c470d6fb2f8c04d7e4b7"}, + {file = "greenlet-3.1.1-cp38-cp38-win32.whl", hash = "sha256:8b8b36671f10ba80e159378df9c4f15c14098c4fd73a36b9ad715f057272fbef"}, + {file = "greenlet-3.1.1-cp38-cp38-win_amd64.whl", hash = "sha256:7017b2be767b9d43cc31416aba48aab0d2309ee31b4dbf10a1d38fb7972bdf9d"}, + {file = "greenlet-3.1.1-cp39-cp39-macosx_11_0_universal2.whl", hash = "sha256:396979749bd95f018296af156201d6211240e7a23090f50a8d5d18c370084dc3"}, + {file = "greenlet-3.1.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ca9d0ff5ad43e785350894d97e13633a66e2b50000e8a183a50a88d834752d42"}, + {file = "greenlet-3.1.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f6ff3b14f2df4c41660a7dec01045a045653998784bf8cfcb5a525bdffffbc8f"}, + {file = "greenlet-3.1.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:94ebba31df2aa506d7b14866fed00ac141a867e63143fe5bca82a8e503b36437"}, + {file = "greenlet-3.1.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:73aaad12ac0ff500f62cebed98d8789198ea0e6f233421059fa68a5aa7220145"}, + {file = "greenlet-3.1.1-cp39-cp39-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:63e4844797b975b9af3a3fb8f7866ff08775f5426925e1e0bbcfe7932059a12c"}, + {file = "greenlet-3.1.1-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:7939aa3ca7d2a1593596e7ac6d59391ff30281ef280d8632fa03d81f7c5f955e"}, + {file = "greenlet-3.1.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:d0028e725ee18175c6e422797c407874da24381ce0690d6b9396c204c7f7276e"}, + {file = "greenlet-3.1.1-cp39-cp39-win32.whl", hash = "sha256:5e06afd14cbaf9e00899fae69b24a32f2196c19de08fcb9f4779dd4f004e5e7c"}, + {file = "greenlet-3.1.1-cp39-cp39-win_amd64.whl", hash = "sha256:3319aa75e0e0639bc15ff54ca327e8dc7a6fe404003496e3c6925cd3142e0e22"}, + {file = "greenlet-3.1.1.tar.gz", hash = "sha256:4ce3ac6cdb6adf7946475d7ef31777c26d94bccc377e070a7986bd2d5c515467"}, +] + +[package.extras] +docs = ["Sphinx", "furo"] +test = ["objgraph", "psutil"] + +[[package]] +name = "grpcio" +version = "1.68.0" +description = "HTTP/2-based RPC framework" +optional = false +python-versions = ">=3.8" +files = [ + {file = "grpcio-1.68.0-cp310-cp310-linux_armv7l.whl", hash = "sha256:619b5d0f29f4f5351440e9343224c3e19912c21aeda44e0c49d0d147a8d01544"}, + {file = "grpcio-1.68.0-cp310-cp310-macosx_12_0_universal2.whl", hash = "sha256:a59f5822f9459bed098ffbceb2713abbf7c6fd13f2b9243461da5c338d0cd6c3"}, + {file = "grpcio-1.68.0-cp310-cp310-manylinux_2_17_aarch64.whl", hash = "sha256:c03d89df516128febc5a7e760d675b478ba25802447624edf7aa13b1e7b11e2a"}, + {file = "grpcio-1.68.0-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:44bcbebb24363d587472089b89e2ea0ab2e2b4df0e4856ba4c0b087c82412121"}, + {file = "grpcio-1.68.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:79f81b7fbfb136247b70465bd836fa1733043fdee539cd6031cb499e9608a110"}, + {file = "grpcio-1.68.0-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:88fb2925789cfe6daa20900260ef0a1d0a61283dfb2d2fffe6194396a354c618"}, + {file = "grpcio-1.68.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:99f06232b5c9138593ae6f2e355054318717d32a9c09cdc5a2885540835067a1"}, + {file = "grpcio-1.68.0-cp310-cp310-win32.whl", hash = "sha256:a6213d2f7a22c3c30a479fb5e249b6b7e648e17f364598ff64d08a5136fe488b"}, + {file = "grpcio-1.68.0-cp310-cp310-win_amd64.whl", hash = "sha256:15327ab81131ef9b94cb9f45b5bd98803a179c7c61205c8c0ac9aff9d6c4e82a"}, + {file = "grpcio-1.68.0-cp311-cp311-linux_armv7l.whl", hash = "sha256:3b2b559beb2d433129441783e5f42e3be40a9e1a89ec906efabf26591c5cd415"}, + {file = "grpcio-1.68.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:e46541de8425a4d6829ac6c5d9b16c03c292105fe9ebf78cb1c31e8d242f9155"}, + {file = "grpcio-1.68.0-cp311-cp311-manylinux_2_17_aarch64.whl", hash = "sha256:c1245651f3c9ea92a2db4f95d37b7597db6b246d5892bca6ee8c0e90d76fb73c"}, + {file = "grpcio-1.68.0-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4f1931c7aa85be0fa6cea6af388e576f3bf6baee9e5d481c586980c774debcb4"}, + {file = "grpcio-1.68.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8b0ff09c81e3aded7a183bc6473639b46b6caa9c1901d6f5e2cba24b95e59e30"}, + {file = "grpcio-1.68.0-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:8c73f9fbbaee1a132487e31585aa83987ddf626426d703ebcb9a528cf231c9b1"}, + {file = "grpcio-1.68.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:6b2f98165ea2790ea159393a2246b56f580d24d7da0d0342c18a085299c40a75"}, + {file = "grpcio-1.68.0-cp311-cp311-win32.whl", hash = "sha256:e1e7ed311afb351ff0d0e583a66fcb39675be112d61e7cfd6c8269884a98afbc"}, + {file = "grpcio-1.68.0-cp311-cp311-win_amd64.whl", hash = "sha256:e0d2f68eaa0a755edd9a47d40e50dba6df2bceda66960dee1218da81a2834d27"}, + {file = "grpcio-1.68.0-cp312-cp312-linux_armv7l.whl", hash = "sha256:8af6137cc4ae8e421690d276e7627cfc726d4293f6607acf9ea7260bd8fc3d7d"}, + {file = "grpcio-1.68.0-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:4028b8e9a3bff6f377698587d642e24bd221810c06579a18420a17688e421af7"}, + {file = "grpcio-1.68.0-cp312-cp312-manylinux_2_17_aarch64.whl", hash = "sha256:f60fa2adf281fd73ae3a50677572521edca34ba373a45b457b5ebe87c2d01e1d"}, + {file = "grpcio-1.68.0-cp312-cp312-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e18589e747c1e70b60fab6767ff99b2d0c359ea1db8a2cb524477f93cdbedf5b"}, + {file = "grpcio-1.68.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e0d30f3fee9372796f54d3100b31ee70972eaadcc87314be369360248a3dcffe"}, + {file = "grpcio-1.68.0-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:7e0a3e72c0e9a1acab77bef14a73a416630b7fd2cbd893c0a873edc47c42c8cd"}, + {file = "grpcio-1.68.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:a831dcc343440969aaa812004685ed322cdb526cd197112d0db303b0da1e8659"}, + {file = "grpcio-1.68.0-cp312-cp312-win32.whl", hash = "sha256:5a180328e92b9a0050958ced34dddcb86fec5a8b332f5a229e353dafc16cd332"}, + {file = "grpcio-1.68.0-cp312-cp312-win_amd64.whl", hash = "sha256:2bddd04a790b69f7a7385f6a112f46ea0b34c4746f361ebafe9ca0be567c78e9"}, + {file = "grpcio-1.68.0-cp313-cp313-linux_armv7l.whl", hash = "sha256:fc05759ffbd7875e0ff2bd877be1438dfe97c9312bbc558c8284a9afa1d0f40e"}, + {file = "grpcio-1.68.0-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:15fa1fe25d365a13bc6d52fcac0e3ee1f9baebdde2c9b3b2425f8a4979fccea1"}, + {file = "grpcio-1.68.0-cp313-cp313-manylinux_2_17_aarch64.whl", hash = "sha256:32a9cb4686eb2e89d97022ecb9e1606d132f85c444354c17a7dbde4a455e4a3b"}, + {file = "grpcio-1.68.0-cp313-cp313-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:dba037ff8d284c8e7ea9a510c8ae0f5b016004f13c3648f72411c464b67ff2fb"}, + {file = "grpcio-1.68.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0efbbd849867e0e569af09e165363ade75cf84f5229b2698d53cf22c7a4f9e21"}, + {file = "grpcio-1.68.0-cp313-cp313-musllinux_1_1_i686.whl", hash = "sha256:4e300e6978df0b65cc2d100c54e097c10dfc7018b9bd890bbbf08022d47f766d"}, + {file = "grpcio-1.68.0-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:6f9c7ad1a23e1047f827385f4713b5b8c6c7d325705be1dd3e31fb00dcb2f665"}, + {file = "grpcio-1.68.0-cp313-cp313-win32.whl", hash = "sha256:3ac7f10850fd0487fcce169c3c55509101c3bde2a3b454869639df2176b60a03"}, + {file = "grpcio-1.68.0-cp313-cp313-win_amd64.whl", hash = "sha256:afbf45a62ba85a720491bfe9b2642f8761ff348006f5ef67e4622621f116b04a"}, + {file = "grpcio-1.68.0-cp38-cp38-linux_armv7l.whl", hash = "sha256:f8f695d9576ce836eab27ba7401c60acaf9ef6cf2f70dfe5462055ba3df02cc3"}, + {file = "grpcio-1.68.0-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:9fe1b141cda52f2ca73e17d2d3c6a9f3f3a0c255c216b50ce616e9dca7e3441d"}, + {file = "grpcio-1.68.0-cp38-cp38-manylinux_2_17_aarch64.whl", hash = "sha256:4df81d78fd1646bf94ced4fb4cd0a7fe2e91608089c522ef17bc7db26e64effd"}, + {file = "grpcio-1.68.0-cp38-cp38-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:46a2d74d4dd8993151c6cd585594c082abe74112c8e4175ddda4106f2ceb022f"}, + {file = "grpcio-1.68.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a17278d977746472698460c63abf333e1d806bd41f2224f90dbe9460101c9796"}, + {file = "grpcio-1.68.0-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:15377bce516b1c861c35e18eaa1c280692bf563264836cece693c0f169b48829"}, + {file = "grpcio-1.68.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:cc5f0a4f5904b8c25729a0498886b797feb817d1fd3812554ffa39551112c161"}, + {file = "grpcio-1.68.0-cp38-cp38-win32.whl", hash = "sha256:def1a60a111d24376e4b753db39705adbe9483ef4ca4761f825639d884d5da78"}, + {file = "grpcio-1.68.0-cp38-cp38-win_amd64.whl", hash = "sha256:55d3b52fd41ec5772a953612db4e70ae741a6d6ed640c4c89a64f017a1ac02b5"}, + {file = "grpcio-1.68.0-cp39-cp39-linux_armv7l.whl", hash = "sha256:0d230852ba97654453d290e98d6aa61cb48fa5fafb474fb4c4298d8721809354"}, + {file = "grpcio-1.68.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:50992f214264e207e07222703c17d9cfdcc2c46ed5a1ea86843d440148ebbe10"}, + {file = "grpcio-1.68.0-cp39-cp39-manylinux_2_17_aarch64.whl", hash = "sha256:14331e5c27ed3545360464a139ed279aa09db088f6e9502e95ad4bfa852bb116"}, + {file = "grpcio-1.68.0-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f84890b205692ea813653ece4ac9afa2139eae136e419231b0eec7c39fdbe4c2"}, + {file = "grpcio-1.68.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b0cf343c6f4f6aa44863e13ec9ddfe299e0be68f87d68e777328bff785897b05"}, + {file = "grpcio-1.68.0-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:fd2c2d47969daa0e27eadaf15c13b5e92605c5e5953d23c06d0b5239a2f176d3"}, + {file = "grpcio-1.68.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:18668e36e7f4045820f069997834e94e8275910b1f03e078a6020bd464cb2363"}, + {file = "grpcio-1.68.0-cp39-cp39-win32.whl", hash = "sha256:2af76ab7c427aaa26aa9187c3e3c42f38d3771f91a20f99657d992afada2294a"}, + {file = "grpcio-1.68.0-cp39-cp39-win_amd64.whl", hash = "sha256:e694b5928b7b33ca2d3b4d5f9bf8b5888906f181daff6b406f4938f3a997a490"}, + {file = "grpcio-1.68.0.tar.gz", hash = "sha256:7e7483d39b4a4fddb9906671e9ea21aaad4f031cdfc349fec76bdfa1e404543a"}, +] + +[package.extras] +protobuf = ["grpcio-tools (>=1.68.0)"] + +[[package]] +name = "grpcio-status" +version = "1.62.3" +description = "Status proto mapping for gRPC" +optional = false +python-versions = ">=3.6" +files = [ + {file = "grpcio-status-1.62.3.tar.gz", hash = "sha256:289bdd7b2459794a12cf95dc0cb727bd4a1742c37bd823f760236c937e53a485"}, + {file = "grpcio_status-1.62.3-py3-none-any.whl", hash = "sha256:f9049b762ba8de6b1086789d8315846e094edac2c50beaf462338b301a8fd4b8"}, +] + +[package.dependencies] +googleapis-common-protos = ">=1.5.5" +grpcio = ">=1.62.3" +protobuf = ">=4.21.6" + +[[package]] +name = "gunicorn" +version = "23.0.0" +description = "WSGI HTTP Server for UNIX" +optional = false +python-versions = ">=3.7" +files = [ + {file = "gunicorn-23.0.0-py3-none-any.whl", hash = "sha256:ec400d38950de4dfd418cff8328b2c8faed0edb0d517d3394e457c317908ca4d"}, + {file = "gunicorn-23.0.0.tar.gz", hash = "sha256:f014447a0101dc57e294f6c18ca6b40227a4c90e9bdb586042628030cba004ec"}, +] + +[package.dependencies] +packaging = "*" + +[package.extras] +eventlet = ["eventlet (>=0.24.1,!=0.36.0)"] +gevent = ["gevent (>=1.4.0)"] +setproctitle = ["setproctitle"] +testing = ["coverage", "eventlet", "gevent", "pytest", "pytest-cov"] +tornado = ["tornado (>=0.2)"] + +[[package]] +name = "h11" +version = "0.14.0" +description = "A pure-Python, bring-your-own-I/O implementation of HTTP/1.1" +optional = false +python-versions = ">=3.7" +files = [ + {file = "h11-0.14.0-py3-none-any.whl", hash = "sha256:e3fe4ac4b851c468cc8363d500db52c2ead036020723024a109d37346efaa761"}, + {file = "h11-0.14.0.tar.gz", hash = "sha256:8f19fbbe99e72420ff35c00b27a34cb9937e902a8b810e2c88300c6f0a3b699d"}, +] + +[[package]] +name = "hatchling" +version = "1.26.3" +description = "Modern, extensible Python build backend" +optional = false +python-versions = ">=3.8" +files = [ + {file = "hatchling-1.26.3-py3-none-any.whl", hash = "sha256:c407e1c6c17b574584a66ae60e8e9a01235ecb6dc61d01559bb936577aaf5846"}, + {file = "hatchling-1.26.3.tar.gz", hash = "sha256:b672a9c36a601a06c4e88a1abb1330639ee8e721e0535a37536e546a667efc7a"}, +] + +[package.dependencies] +packaging = ">=24.2" +pathspec = ">=0.10.1" +pluggy = ">=1.0.0" +trove-classifiers = "*" + +[[package]] +name = "httpcore" +version = "1.0.7" +description = "A minimal low-level HTTP client." +optional = false +python-versions = ">=3.8" +files = [ + {file = "httpcore-1.0.7-py3-none-any.whl", hash = "sha256:a3fff8f43dc260d5bd363d9f9cf1830fa3a458b332856f34282de498ed420edd"}, + {file = "httpcore-1.0.7.tar.gz", hash = "sha256:8551cb62a169ec7162ac7be8d4817d561f60e08eaa485234898414bb5a8a0b4c"}, +] + +[package.dependencies] +certifi = "*" +h11 = ">=0.13,<0.15" + +[package.extras] +asyncio = ["anyio (>=4.0,<5.0)"] +http2 = ["h2 (>=3,<5)"] +socks = ["socksio (==1.*)"] +trio = ["trio (>=0.22.0,<1.0)"] + +[[package]] +name = "httpx" +version = "0.27.2" +description = "The next generation HTTP client." +optional = false +python-versions = ">=3.8" +files = [ + {file = "httpx-0.27.2-py3-none-any.whl", hash = "sha256:7bb2708e112d8fdd7829cd4243970f0c223274051cb35ee80c03301ee29a3df0"}, + {file = "httpx-0.27.2.tar.gz", hash = "sha256:f7c2be1d2f3c3c3160d441802406b206c2b76f5947b11115e6df10c6c65e66c2"}, +] + +[package.dependencies] +anyio = "*" +certifi = "*" +httpcore = "==1.*" +idna = "*" +sniffio = "*" + +[package.extras] +brotli = ["brotli", "brotlicffi"] +cli = ["click (==8.*)", "pygments (==2.*)", "rich (>=10,<14)"] +http2 = ["h2 (>=3,<5)"] +socks = ["socksio (==1.*)"] +zstd = ["zstandard (>=0.18.0)"] + +[[package]] +name = "huggingface-hub" +version = "0.26.2" +description = "Client library to download and publish models, datasets and other repos on the huggingface.co hub" +optional = false +python-versions = ">=3.8.0" +files = [ + {file = "huggingface_hub-0.26.2-py3-none-any.whl", hash = "sha256:98c2a5a8e786c7b2cb6fdeb2740893cba4d53e312572ed3d8afafda65b128c46"}, + {file = "huggingface_hub-0.26.2.tar.gz", hash = "sha256:b100d853465d965733964d123939ba287da60a547087783ddff8a323f340332b"}, +] + +[package.dependencies] +filelock = "*" +fsspec = ">=2023.5.0" +packaging = ">=20.9" +pyyaml = ">=5.1" +requests = "*" +tqdm = ">=4.42.1" +typing-extensions = ">=3.7.4.3" + +[package.extras] +all = ["InquirerPy (==0.3.4)", "Jinja2", "Pillow", "aiohttp", "fastapi", "gradio (>=4.0.0)", "jedi", "libcst (==1.4.0)", "mypy (==1.5.1)", "numpy", "pytest (>=8.1.1,<8.2.2)", "pytest-asyncio", "pytest-cov", "pytest-env", "pytest-mock", "pytest-rerunfailures", "pytest-vcr", "pytest-xdist", "ruff (>=0.5.0)", "soundfile", "types-PyYAML", "types-requests", "types-simplejson", "types-toml", "types-tqdm", "types-urllib3", "typing-extensions (>=4.8.0)", "urllib3 (<2.0)"] +cli = ["InquirerPy (==0.3.4)"] +dev = ["InquirerPy (==0.3.4)", "Jinja2", "Pillow", "aiohttp", "fastapi", "gradio (>=4.0.0)", "jedi", "libcst (==1.4.0)", "mypy (==1.5.1)", "numpy", "pytest (>=8.1.1,<8.2.2)", "pytest-asyncio", "pytest-cov", "pytest-env", "pytest-mock", "pytest-rerunfailures", "pytest-vcr", "pytest-xdist", "ruff (>=0.5.0)", "soundfile", "types-PyYAML", "types-requests", "types-simplejson", "types-toml", "types-tqdm", "types-urllib3", "typing-extensions (>=4.8.0)", "urllib3 (<2.0)"] +fastai = ["fastai (>=2.4)", "fastcore (>=1.3.27)", "toml"] +hf-transfer = ["hf-transfer (>=0.1.4)"] +inference = ["aiohttp"] +quality = ["libcst (==1.4.0)", "mypy (==1.5.1)", "ruff (>=0.5.0)"] +tensorflow = ["graphviz", "pydot", "tensorflow"] +tensorflow-testing = ["keras (<3.0)", "tensorflow"] +testing = ["InquirerPy (==0.3.4)", "Jinja2", "Pillow", "aiohttp", "fastapi", "gradio (>=4.0.0)", "jedi", "numpy", "pytest (>=8.1.1,<8.2.2)", "pytest-asyncio", "pytest-cov", "pytest-env", "pytest-mock", "pytest-rerunfailures", "pytest-vcr", "pytest-xdist", "soundfile", "urllib3 (<2.0)"] +torch = ["safetensors[torch]", "torch"] +typing = ["types-PyYAML", "types-requests", "types-simplejson", "types-toml", "types-tqdm", "types-urllib3", "typing-extensions (>=4.8.0)"] + +[[package]] +name = "idna" +version = "3.10" +description = "Internationalized Domain Names in Applications (IDNA)" +optional = false +python-versions = ">=3.6" +files = [ + {file = "idna-3.10-py3-none-any.whl", hash = "sha256:946d195a0d259cbba61165e88e65941f16e9b36ea6ddb97f00452bae8b1287d3"}, + {file = "idna-3.10.tar.gz", hash = "sha256:12f65c9b470abda6dc35cf8e63cc574b1c52b11df2c86030af0ac09b01b13ea9"}, +] + +[package.extras] +all = ["flake8 (>=7.1.1)", "mypy (>=1.11.2)", "pytest (>=8.3.2)", "ruff (>=0.6.2)"] + +[[package]] +name = "importlib-metadata" +version = "8.5.0" +description = "Read metadata from Python packages" +optional = false +python-versions = ">=3.8" +files = [ + {file = "importlib_metadata-8.5.0-py3-none-any.whl", hash = "sha256:45e54197d28b7a7f1559e60b95e7c567032b602131fbd588f1497f47880aa68b"}, + {file = "importlib_metadata-8.5.0.tar.gz", hash = "sha256:71522656f0abace1d072b9e5481a48f07c138e00f079c38c8f883823f9c26bd7"}, +] + +[package.dependencies] +zipp = ">=3.20" + +[package.extras] +check = ["pytest-checkdocs (>=2.4)", "pytest-ruff (>=0.2.1)"] +cover = ["pytest-cov"] +doc = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-lint"] +enabler = ["pytest-enabler (>=2.2)"] +perf = ["ipython"] +test = ["flufl.flake8", "importlib-resources (>=1.3)", "jaraco.test (>=5.4)", "packaging", "pyfakefs", "pytest (>=6,!=8.1.*)", "pytest-perf (>=0.9.2)"] +type = ["pytest-mypy"] + +[[package]] +name = "iniconfig" +version = "2.0.0" +description = "brain-dead simple config-ini parsing" +optional = false +python-versions = ">=3.7" +files = [ + {file = "iniconfig-2.0.0-py3-none-any.whl", hash = "sha256:b6a85871a79d2e3b22d2d1b94ac2824226a63c6b741c88f7ae975f18b6778374"}, + {file = "iniconfig-2.0.0.tar.gz", hash = "sha256:2d91e135bf72d31a410b17c16da610a82cb55f6b0477d1a902134b24a455b8b3"}, +] + +[[package]] +name = "intel-openmp" +version = "2021.4.0" +description = "Intel OpenMP* Runtime Library" +optional = false +python-versions = "*" +files = [ + {file = "intel_openmp-2021.4.0-py2.py3-none-macosx_10_15_x86_64.macosx_11_0_x86_64.whl", hash = "sha256:41c01e266a7fdb631a7609191709322da2bbf24b252ba763f125dd651bcc7675"}, + {file = "intel_openmp-2021.4.0-py2.py3-none-manylinux1_i686.whl", hash = "sha256:3b921236a38384e2016f0f3d65af6732cf2c12918087128a9163225451e776f2"}, + {file = "intel_openmp-2021.4.0-py2.py3-none-manylinux1_x86_64.whl", hash = "sha256:e2240ab8d01472fed04f3544a878cda5da16c26232b7ea1b59132dbfb48b186e"}, + {file = "intel_openmp-2021.4.0-py2.py3-none-win32.whl", hash = "sha256:6e863d8fd3d7e8ef389d52cf97a50fe2afe1a19247e8c0d168ce021546f96fc9"}, + {file = "intel_openmp-2021.4.0-py2.py3-none-win_amd64.whl", hash = "sha256:eef4c8bcc8acefd7f5cd3b9384dbf73d59e2c99fc56545712ded913f43c4a94f"}, +] + +[[package]] +name = "ipykernel" +version = "6.29.5" +description = "IPython Kernel for Jupyter" +optional = false +python-versions = ">=3.8" +files = [ + {file = "ipykernel-6.29.5-py3-none-any.whl", hash = "sha256:afdb66ba5aa354b09b91379bac28ae4afebbb30e8b39510c9690afb7a10421b5"}, + {file = "ipykernel-6.29.5.tar.gz", hash = "sha256:f093a22c4a40f8828f8e330a9c297cb93dcab13bd9678ded6de8e5cf81c56215"}, +] + +[package.dependencies] +appnope = {version = "*", markers = "platform_system == \"Darwin\""} +comm = ">=0.1.1" +debugpy = ">=1.6.5" +ipython = ">=7.23.1" +jupyter-client = ">=6.1.12" +jupyter-core = ">=4.12,<5.0.dev0 || >=5.1.dev0" +matplotlib-inline = ">=0.1" +nest-asyncio = "*" +packaging = "*" +psutil = "*" +pyzmq = ">=24" +tornado = ">=6.1" +traitlets = ">=5.4.0" + +[package.extras] +cov = ["coverage[toml]", "curio", "matplotlib", "pytest-cov", "trio"] +docs = ["myst-parser", "pydata-sphinx-theme", "sphinx", "sphinx-autodoc-typehints", "sphinxcontrib-github-alt", "sphinxcontrib-spelling", "trio"] +pyqt5 = ["pyqt5"] +pyside6 = ["pyside6"] +test = ["flaky", "ipyparallel", "pre-commit", "pytest (>=7.0)", "pytest-asyncio (>=0.23.5)", "pytest-cov", "pytest-timeout"] + +[[package]] +name = "ipython" +version = "8.29.0" +description = "IPython: Productive Interactive Computing" +optional = false +python-versions = ">=3.10" +files = [ + {file = "ipython-8.29.0-py3-none-any.whl", hash = "sha256:0188a1bd83267192123ccea7f4a8ed0a78910535dbaa3f37671dca76ebd429c8"}, + {file = "ipython-8.29.0.tar.gz", hash = "sha256:40b60e15b22591450eef73e40a027cf77bd652e757523eebc5bd7c7c498290eb"}, +] + +[package.dependencies] +colorama = {version = "*", markers = "sys_platform == \"win32\""} +decorator = "*" +jedi = ">=0.16" +matplotlib-inline = "*" +pexpect = {version = ">4.3", markers = "sys_platform != \"win32\" and sys_platform != \"emscripten\""} +prompt-toolkit = ">=3.0.41,<3.1.0" +pygments = ">=2.4.0" +stack-data = "*" +traitlets = ">=5.13.0" +typing-extensions = {version = ">=4.6", markers = "python_version < \"3.12\""} + +[package.extras] +all = ["ipython[black,doc,kernel,matplotlib,nbconvert,nbformat,notebook,parallel,qtconsole]", "ipython[test,test-extra]"] +black = ["black"] +doc = ["docrepr", "exceptiongroup", "intersphinx-registry", "ipykernel", "ipython[test]", "matplotlib", "setuptools (>=18.5)", "sphinx (>=1.3)", "sphinx-rtd-theme", "sphinxcontrib-jquery", "tomli", "typing-extensions"] +kernel = ["ipykernel"] +matplotlib = ["matplotlib"] +nbconvert = ["nbconvert"] +nbformat = ["nbformat"] +notebook = ["ipywidgets", "notebook"] +parallel = ["ipyparallel"] +qtconsole = ["qtconsole"] +test = ["packaging", "pickleshare", "pytest", "pytest-asyncio (<0.22)", "testpath"] +test-extra = ["curio", "ipython[test]", "matplotlib (!=3.2.0)", "nbformat", "numpy (>=1.23)", "pandas", "trio"] + +[[package]] +name = "ipywidgets" +version = "8.1.5" +description = "Jupyter interactive widgets" +optional = false +python-versions = ">=3.7" +files = [ + {file = "ipywidgets-8.1.5-py3-none-any.whl", hash = "sha256:3290f526f87ae6e77655555baba4f36681c555b8bdbbff430b70e52c34c86245"}, + {file = "ipywidgets-8.1.5.tar.gz", hash = "sha256:870e43b1a35656a80c18c9503bbf2d16802db1cb487eec6fab27d683381dde17"}, +] + +[package.dependencies] +comm = ">=0.1.3" +ipython = ">=6.1.0" +jupyterlab-widgets = ">=3.0.12,<3.1.0" +traitlets = ">=4.3.1" +widgetsnbextension = ">=4.0.12,<4.1.0" + +[package.extras] +test = ["ipykernel", "jsonschema", "pytest (>=3.6.0)", "pytest-cov", "pytz"] + +[[package]] +name = "isodate" +version = "0.7.2" +description = "An ISO 8601 date/time/duration parser and formatter" +optional = false +python-versions = ">=3.7" +files = [ + {file = "isodate-0.7.2-py3-none-any.whl", hash = "sha256:28009937d8031054830160fce6d409ed342816b543597cece116d966c6d99e15"}, + {file = "isodate-0.7.2.tar.gz", hash = "sha256:4cd1aa0f43ca76f4a6c6c0292a85f40b35ec2e43e315b59f06e6d32171a953e6"}, +] + +[[package]] +name = "itsdangerous" +version = "2.2.0" +description = "Safely pass data to untrusted environments and back." +optional = false +python-versions = ">=3.8" +files = [ + {file = "itsdangerous-2.2.0-py3-none-any.whl", hash = "sha256:c6242fc49e35958c8b15141343aa660db5fc54d4f13a1db01a3f5891b98700ef"}, + {file = "itsdangerous-2.2.0.tar.gz", hash = "sha256:e0050c0b7da1eea53ffaf149c0cfbb5c6e2e2b69c4bef22c81fa6eb73e5f6173"}, +] + +[[package]] +name = "jedi" +version = "0.19.2" +description = "An autocompletion tool for Python that can be used for text editors." +optional = false +python-versions = ">=3.6" +files = [ + {file = "jedi-0.19.2-py2.py3-none-any.whl", hash = "sha256:a8ef22bde8490f57fe5c7681a3c83cb58874daf72b4784de3cce5b6ef6edb5b9"}, + {file = "jedi-0.19.2.tar.gz", hash = "sha256:4770dc3de41bde3966b02eb84fbcf557fb33cce26ad23da12c742fb50ecb11f0"}, +] + +[package.dependencies] +parso = ">=0.8.4,<0.9.0" + +[package.extras] +docs = ["Jinja2 (==2.11.3)", "MarkupSafe (==1.1.1)", "Pygments (==2.8.1)", "alabaster (==0.7.12)", "babel (==2.9.1)", "chardet (==4.0.0)", "commonmark (==0.8.1)", "docutils (==0.17.1)", "future (==0.18.2)", "idna (==2.10)", "imagesize (==1.2.0)", "mock (==1.0.1)", "packaging (==20.9)", "pyparsing (==2.4.7)", "pytz (==2021.1)", "readthedocs-sphinx-ext (==2.1.4)", "recommonmark (==0.5.0)", "requests (==2.25.1)", "six (==1.15.0)", "snowballstemmer (==2.1.0)", "sphinx (==1.8.5)", "sphinx-rtd-theme (==0.4.3)", "sphinxcontrib-serializinghtml (==1.1.4)", "sphinxcontrib-websupport (==1.2.4)", "urllib3 (==1.26.4)"] +qa = ["flake8 (==5.0.4)", "mypy (==0.971)", "types-setuptools (==67.2.0.1)"] +testing = ["Django", "attrs", "colorama", "docopt", "pytest (<9.0.0)"] + +[[package]] +name = "jinja2" +version = "3.1.4" +description = "A very fast and expressive template engine." +optional = false +python-versions = ">=3.7" +files = [ + {file = "jinja2-3.1.4-py3-none-any.whl", hash = "sha256:bc5dd2abb727a5319567b7a813e6a2e7318c39f4f487cfe6c89c6f9c7d25197d"}, + {file = "jinja2-3.1.4.tar.gz", hash = "sha256:4a3aee7acbbe7303aede8e9648d13b8bf88a429282aa6122a993f0ac800cb369"}, +] + +[package.dependencies] +MarkupSafe = ">=2.0" + +[package.extras] +i18n = ["Babel (>=2.7)"] + +[[package]] +name = "jiter" +version = "0.7.1" +description = "Fast iterable JSON parser." +optional = false +python-versions = ">=3.8" +files = [ + {file = "jiter-0.7.1-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:262e96d06696b673fad6f257e6a0abb6e873dc22818ca0e0600f4a1189eb334f"}, + {file = "jiter-0.7.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:be6de02939aac5be97eb437f45cfd279b1dc9de358b13ea6e040e63a3221c40d"}, + {file = "jiter-0.7.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:935f10b802bc1ce2b2f61843e498c7720aa7f4e4bb7797aa8121eab017293c3d"}, + {file = "jiter-0.7.1-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:9cd3cccccabf5064e4bb3099c87bf67db94f805c1e62d1aefd2b7476e90e0ee2"}, + {file = "jiter-0.7.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4aa919ebfc5f7b027cc368fe3964c0015e1963b92e1db382419dadb098a05192"}, + {file = "jiter-0.7.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5ae2d01e82c94491ce4d6f461a837f63b6c4e6dd5bb082553a70c509034ff3d4"}, + {file = "jiter-0.7.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9f9568cd66dbbdab67ae1b4c99f3f7da1228c5682d65913e3f5f95586b3cb9a9"}, + {file = "jiter-0.7.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:9ecbf4e20ec2c26512736284dc1a3f8ed79b6ca7188e3b99032757ad48db97dc"}, + {file = "jiter-0.7.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:b1a0508fddc70ce00b872e463b387d49308ef02b0787992ca471c8d4ba1c0fa1"}, + {file = "jiter-0.7.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:f84c9996664c460f24213ff1e5881530abd8fafd82058d39af3682d5fd2d6316"}, + {file = "jiter-0.7.1-cp310-none-win32.whl", hash = "sha256:c915e1a1960976ba4dfe06551ea87063b2d5b4d30759012210099e712a414d9f"}, + {file = "jiter-0.7.1-cp310-none-win_amd64.whl", hash = "sha256:75bf3b7fdc5c0faa6ffffcf8028a1f974d126bac86d96490d1b51b3210aa0f3f"}, + {file = "jiter-0.7.1-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:ad04a23a91f3d10d69d6c87a5f4471b61c2c5cd6e112e85136594a02043f462c"}, + {file = "jiter-0.7.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:1e47a554de88dff701226bb5722b7f1b6bccd0b98f1748459b7e56acac2707a5"}, + {file = "jiter-0.7.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1e44fff69c814a2e96a20b4ecee3e2365e9b15cf5fe4e00869d18396daa91dab"}, + {file = "jiter-0.7.1-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:df0a1d05081541b45743c965436f8b5a1048d6fd726e4a030113a2699a6046ea"}, + {file = "jiter-0.7.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f22cf8f236a645cb6d8ffe2a64edb5d2b66fb148bf7c75eea0cb36d17014a7bc"}, + {file = "jiter-0.7.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:da8589f50b728ea4bf22e0632eefa125c8aa9c38ed202a5ee6ca371f05eeb3ff"}, + {file = "jiter-0.7.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f20de711224f2ca2dbb166a8d512f6ff48c9c38cc06b51f796520eb4722cc2ce"}, + {file = "jiter-0.7.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:8a9803396032117b85ec8cbf008a54590644a062fedd0425cbdb95e4b2b60479"}, + {file = "jiter-0.7.1-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:3d8bae77c82741032e9d89a4026479061aba6e646de3bf5f2fc1ae2bbd9d06e0"}, + {file = "jiter-0.7.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:3dc9939e576bbc68c813fc82f6620353ed68c194c7bcf3d58dc822591ec12490"}, + {file = "jiter-0.7.1-cp311-none-win32.whl", hash = "sha256:f7605d24cd6fab156ec89e7924578e21604feee9c4f1e9da34d8b67f63e54892"}, + {file = "jiter-0.7.1-cp311-none-win_amd64.whl", hash = "sha256:f3ea649e7751a1a29ea5ecc03c4ada0a833846c59c6da75d747899f9b48b7282"}, + {file = "jiter-0.7.1-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:ad36a1155cbd92e7a084a568f7dc6023497df781adf2390c345dd77a120905ca"}, + {file = "jiter-0.7.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:7ba52e6aaed2dc5c81a3d9b5e4ab95b039c4592c66ac973879ba57c3506492bb"}, + {file = "jiter-0.7.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2b7de0b6f6728b678540c7927587e23f715284596724be203af952418acb8a2d"}, + {file = "jiter-0.7.1-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:9463b62bd53c2fb85529c700c6a3beb2ee54fde8bef714b150601616dcb184a6"}, + {file = "jiter-0.7.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:627164ec01d28af56e1f549da84caf0fe06da3880ebc7b7ee1ca15df106ae172"}, + {file = "jiter-0.7.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:25d0e5bf64e368b0aa9e0a559c3ab2f9b67e35fe7269e8a0d81f48bbd10e8963"}, + {file = "jiter-0.7.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c244261306f08f8008b3087059601997016549cb8bb23cf4317a4827f07b7d74"}, + {file = "jiter-0.7.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:7ded4e4b75b68b843b7cea5cd7c55f738c20e1394c68c2cb10adb655526c5f1b"}, + {file = "jiter-0.7.1-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:80dae4f1889b9d09e5f4de6b58c490d9c8ce7730e35e0b8643ab62b1538f095c"}, + {file = "jiter-0.7.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:5970cf8ec943b51bce7f4b98d2e1ed3ada170c2a789e2db3cb484486591a176a"}, + {file = "jiter-0.7.1-cp312-none-win32.whl", hash = "sha256:701d90220d6ecb3125d46853c8ca8a5bc158de8c49af60fd706475a49fee157e"}, + {file = "jiter-0.7.1-cp312-none-win_amd64.whl", hash = "sha256:7824c3ecf9ecf3321c37f4e4d4411aad49c666ee5bc2a937071bdd80917e4533"}, + {file = "jiter-0.7.1-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:097676a37778ba3c80cb53f34abd6943ceb0848263c21bf423ae98b090f6c6ba"}, + {file = "jiter-0.7.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:3298af506d4271257c0a8f48668b0f47048d69351675dd8500f22420d4eec378"}, + {file = "jiter-0.7.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:12fd88cfe6067e2199964839c19bd2b422ca3fd792949b8f44bb8a4e7d21946a"}, + {file = "jiter-0.7.1-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:dacca921efcd21939123c8ea8883a54b9fa7f6545c8019ffcf4f762985b6d0c8"}, + {file = "jiter-0.7.1-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:de3674a5fe1f6713a746d25ad9c32cd32fadc824e64b9d6159b3b34fd9134143"}, + {file = "jiter-0.7.1-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:65df9dbae6d67e0788a05b4bad5706ad40f6f911e0137eb416b9eead6ba6f044"}, + {file = "jiter-0.7.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7ba9a358d59a0a55cccaa4957e6ae10b1a25ffdabda863c0343c51817610501d"}, + {file = "jiter-0.7.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:576eb0f0c6207e9ede2b11ec01d9c2182973986514f9c60bc3b3b5d5798c8f50"}, + {file = "jiter-0.7.1-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:e550e29cdf3577d2c970a18f3959e6b8646fd60ef1b0507e5947dc73703b5627"}, + {file = "jiter-0.7.1-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:81d968dbf3ce0db2e0e4dec6b0a0d5d94f846ee84caf779b07cab49f5325ae43"}, + {file = "jiter-0.7.1-cp313-none-win32.whl", hash = "sha256:f892e547e6e79a1506eb571a676cf2f480a4533675f834e9ae98de84f9b941ac"}, + {file = "jiter-0.7.1-cp313-none-win_amd64.whl", hash = "sha256:0302f0940b1455b2a7fb0409b8d5b31183db70d2b07fd177906d83bf941385d1"}, + {file = "jiter-0.7.1-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:c65a3ce72b679958b79d556473f192a4dfc5895e8cc1030c9f4e434690906076"}, + {file = "jiter-0.7.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:e80052d3db39f9bb8eb86d207a1be3d9ecee5e05fdec31380817f9609ad38e60"}, + {file = "jiter-0.7.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:70a497859c4f3f7acd71c8bd89a6f9cf753ebacacf5e3e799138b8e1843084e3"}, + {file = "jiter-0.7.1-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:c1288bc22b9e36854a0536ba83666c3b1fb066b811019d7b682c9cf0269cdf9f"}, + {file = "jiter-0.7.1-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b096ca72dd38ef35675e1d3b01785874315182243ef7aea9752cb62266ad516f"}, + {file = "jiter-0.7.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8dbbd52c50b605af13dbee1a08373c520e6fcc6b5d32f17738875847fea4e2cd"}, + {file = "jiter-0.7.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:af29c5c6eb2517e71ffa15c7ae9509fa5e833ec2a99319ac88cc271eca865519"}, + {file = "jiter-0.7.1-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:f114a4df1e40c03c0efbf974b376ed57756a1141eb27d04baee0680c5af3d424"}, + {file = "jiter-0.7.1-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:191fbaee7cf46a9dd9b817547bf556facde50f83199d07fc48ebeff4082f9df4"}, + {file = "jiter-0.7.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:0e2b445e5ee627fb4ee6bbceeb486251e60a0c881a8e12398dfdff47c56f0723"}, + {file = "jiter-0.7.1-cp38-none-win32.whl", hash = "sha256:47ac4c3cf8135c83e64755b7276339b26cd3c7ddadf9e67306ace4832b283edf"}, + {file = "jiter-0.7.1-cp38-none-win_amd64.whl", hash = "sha256:60b49c245cd90cde4794f5c30f123ee06ccf42fb8730a019a2870cd005653ebd"}, + {file = "jiter-0.7.1-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:8f212eeacc7203256f526f550d105d8efa24605828382cd7d296b703181ff11d"}, + {file = "jiter-0.7.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:d9e247079d88c00e75e297e6cb3a18a039ebcd79fefc43be9ba4eb7fb43eb726"}, + {file = "jiter-0.7.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f0aacaa56360139c53dcf352992b0331f4057a0373bbffd43f64ba0c32d2d155"}, + {file = "jiter-0.7.1-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:bc1b55314ca97dbb6c48d9144323896e9c1a25d41c65bcb9550b3e0c270ca560"}, + {file = "jiter-0.7.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f281aae41b47e90deb70e7386558e877a8e62e1693e0086f37d015fa1c102289"}, + {file = "jiter-0.7.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:93c20d2730a84d43f7c0b6fb2579dc54335db742a59cf9776d0b80e99d587382"}, + {file = "jiter-0.7.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e81ccccd8069110e150613496deafa10da2f6ff322a707cbec2b0d52a87b9671"}, + {file = "jiter-0.7.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:0a7d5e85766eff4c9be481d77e2226b4c259999cb6862ccac5ef6621d3c8dcce"}, + {file = "jiter-0.7.1-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:f52ce5799df5b6975439ecb16b1e879d7655e1685b6e3758c9b1b97696313bfb"}, + {file = "jiter-0.7.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:e0c91a0304373fdf97d56f88356a010bba442e6d995eb7773cbe32885b71cdd8"}, + {file = "jiter-0.7.1-cp39-none-win32.whl", hash = "sha256:5c08adf93e41ce2755970e8aa95262298afe2bf58897fb9653c47cd93c3c6cdc"}, + {file = "jiter-0.7.1-cp39-none-win_amd64.whl", hash = "sha256:6592f4067c74176e5f369228fb2995ed01400c9e8e1225fb73417183a5e635f0"}, + {file = "jiter-0.7.1.tar.gz", hash = "sha256:448cf4f74f7363c34cdef26214da527e8eeffd88ba06d0b80b485ad0667baf5d"}, +] + +[[package]] +name = "jmespath" +version = "1.0.1" +description = "JSON Matching Expressions" +optional = false +python-versions = ">=3.7" +files = [ + {file = "jmespath-1.0.1-py3-none-any.whl", hash = "sha256:02e2e4cc71b5bcab88332eebf907519190dd9e6e82107fa7f83b1003a6252980"}, + {file = "jmespath-1.0.1.tar.gz", hash = "sha256:90261b206d6defd58fdd5e85f478bf633a2901798906be2ad389150c5c60edbe"}, +] + +[[package]] +name = "joblib" +version = "1.4.2" +description = "Lightweight pipelining with Python functions" +optional = false +python-versions = ">=3.8" +files = [ + {file = "joblib-1.4.2-py3-none-any.whl", hash = "sha256:06d478d5674cbc267e7496a410ee875abd68e4340feff4490bcb7afb88060ae6"}, + {file = "joblib-1.4.2.tar.gz", hash = "sha256:2382c5816b2636fbd20a09e0f4e9dad4736765fdfb7dca582943b9c1366b3f0e"}, +] + +[[package]] +name = "jsonpatch" +version = "1.33" +description = "Apply JSON-Patches (RFC 6902)" +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, !=3.5.*, !=3.6.*" +files = [ + {file = "jsonpatch-1.33-py2.py3-none-any.whl", hash = "sha256:0ae28c0cd062bbd8b8ecc26d7d164fbbea9652a1a3693f3b956c1eae5145dade"}, + {file = "jsonpatch-1.33.tar.gz", hash = "sha256:9fcd4009c41e6d12348b4a0ff2563ba56a2923a7dfee731d004e212e1ee5030c"}, +] + +[package.dependencies] +jsonpointer = ">=1.9" + +[[package]] +name = "jsonpointer" +version = "3.0.0" +description = "Identify specific nodes in a JSON document (RFC 6901)" +optional = false +python-versions = ">=3.7" +files = [ + {file = "jsonpointer-3.0.0-py2.py3-none-any.whl", hash = "sha256:13e088adc14fca8b6aa8177c044e12701e6ad4b28ff10e65f2267a90109c9942"}, + {file = "jsonpointer-3.0.0.tar.gz", hash = "sha256:2b2d729f2091522d61c3b31f82e11870f60b68f43fbc705cb76bf4b832af59ef"}, +] + +[[package]] +name = "jupyter-client" +version = "8.6.3" +description = "Jupyter protocol implementation and client libraries" +optional = false +python-versions = ">=3.8" +files = [ + {file = "jupyter_client-8.6.3-py3-none-any.whl", hash = "sha256:e8a19cc986cc45905ac3362915f410f3af85424b4c0905e94fa5f2cb08e8f23f"}, + {file = "jupyter_client-8.6.3.tar.gz", hash = "sha256:35b3a0947c4a6e9d589eb97d7d4cd5e90f910ee73101611f01283732bd6d9419"}, +] + +[package.dependencies] +jupyter-core = ">=4.12,<5.0.dev0 || >=5.1.dev0" +python-dateutil = ">=2.8.2" +pyzmq = ">=23.0" +tornado = ">=6.2" +traitlets = ">=5.3" + +[package.extras] +docs = ["ipykernel", "myst-parser", "pydata-sphinx-theme", "sphinx (>=4)", "sphinx-autodoc-typehints", "sphinxcontrib-github-alt", "sphinxcontrib-spelling"] +test = ["coverage", "ipykernel (>=6.14)", "mypy", "paramiko", "pre-commit", "pytest (<8.2.0)", "pytest-cov", "pytest-jupyter[client] (>=0.4.1)", "pytest-timeout"] + +[[package]] +name = "jupyter-core" +version = "5.7.2" +description = "Jupyter core package. A base package on which Jupyter projects rely." +optional = false +python-versions = ">=3.8" +files = [ + {file = "jupyter_core-5.7.2-py3-none-any.whl", hash = "sha256:4f7315d2f6b4bcf2e3e7cb6e46772eba760ae459cd1f59d29eb57b0a01bd7409"}, + {file = "jupyter_core-5.7.2.tar.gz", hash = "sha256:aa5f8d32bbf6b431ac830496da7392035d6f61b4f54872f15c4bd2a9c3f536d9"}, +] + +[package.dependencies] +platformdirs = ">=2.5" +pywin32 = {version = ">=300", markers = "sys_platform == \"win32\" and platform_python_implementation != \"PyPy\""} +traitlets = ">=5.3" + +[package.extras] +docs = ["myst-parser", "pydata-sphinx-theme", "sphinx-autodoc-typehints", "sphinxcontrib-github-alt", "sphinxcontrib-spelling", "traitlets"] +test = ["ipykernel", "pre-commit", "pytest (<8)", "pytest-cov", "pytest-timeout"] + +[[package]] +name = "jupyterlab-widgets" +version = "3.0.13" +description = "Jupyter interactive widgets for JupyterLab" +optional = false +python-versions = ">=3.7" +files = [ + {file = "jupyterlab_widgets-3.0.13-py3-none-any.whl", hash = "sha256:e3cda2c233ce144192f1e29914ad522b2f4c40e77214b0cc97377ca3d323db54"}, + {file = "jupyterlab_widgets-3.0.13.tar.gz", hash = "sha256:a2966d385328c1942b683a8cd96b89b8dd82c8b8f81dda902bb2bc06d46f5bed"}, +] + +[[package]] +name = "kiwisolver" +version = "1.4.7" +description = "A fast implementation of the Cassowary constraint solver" +optional = false +python-versions = ">=3.8" +files = [ + {file = "kiwisolver-1.4.7-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:8a9c83f75223d5e48b0bc9cb1bf2776cf01563e00ade8775ffe13b0b6e1af3a6"}, + {file = "kiwisolver-1.4.7-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:58370b1ffbd35407444d57057b57da5d6549d2d854fa30249771775c63b5fe17"}, + {file = "kiwisolver-1.4.7-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:aa0abdf853e09aff551db11fce173e2177d00786c688203f52c87ad7fcd91ef9"}, + {file = "kiwisolver-1.4.7-cp310-cp310-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:8d53103597a252fb3ab8b5845af04c7a26d5e7ea8122303dd7a021176a87e8b9"}, + {file = "kiwisolver-1.4.7-cp310-cp310-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:88f17c5ffa8e9462fb79f62746428dd57b46eb931698e42e990ad63103f35e6c"}, + {file = "kiwisolver-1.4.7-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:88a9ca9c710d598fd75ee5de59d5bda2684d9db36a9f50b6125eaea3969c2599"}, + {file = "kiwisolver-1.4.7-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f4d742cb7af1c28303a51b7a27aaee540e71bb8e24f68c736f6f2ffc82f2bf05"}, + {file = "kiwisolver-1.4.7-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e28c7fea2196bf4c2f8d46a0415c77a1c480cc0724722f23d7410ffe9842c407"}, + {file = "kiwisolver-1.4.7-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:e968b84db54f9d42046cf154e02911e39c0435c9801681e3fc9ce8a3c4130278"}, + {file = "kiwisolver-1.4.7-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:0c18ec74c0472de033e1bebb2911c3c310eef5649133dd0bedf2a169a1b269e5"}, + {file = "kiwisolver-1.4.7-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:8f0ea6da6d393d8b2e187e6a5e3fb81f5862010a40c3945e2c6d12ae45cfb2ad"}, + {file = "kiwisolver-1.4.7-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:f106407dda69ae456dd1227966bf445b157ccc80ba0dff3802bb63f30b74e895"}, + {file = "kiwisolver-1.4.7-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:84ec80df401cfee1457063732d90022f93951944b5b58975d34ab56bb150dfb3"}, + {file = "kiwisolver-1.4.7-cp310-cp310-win32.whl", hash = "sha256:71bb308552200fb2c195e35ef05de12f0c878c07fc91c270eb3d6e41698c3bcc"}, + {file = "kiwisolver-1.4.7-cp310-cp310-win_amd64.whl", hash = "sha256:44756f9fd339de0fb6ee4f8c1696cfd19b2422e0d70b4cefc1cc7f1f64045a8c"}, + {file = "kiwisolver-1.4.7-cp310-cp310-win_arm64.whl", hash = "sha256:78a42513018c41c2ffd262eb676442315cbfe3c44eed82385c2ed043bc63210a"}, + {file = "kiwisolver-1.4.7-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:d2b0e12a42fb4e72d509fc994713d099cbb15ebf1103545e8a45f14da2dfca54"}, + {file = "kiwisolver-1.4.7-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:2a8781ac3edc42ea4b90bc23e7d37b665d89423818e26eb6df90698aa2287c95"}, + {file = "kiwisolver-1.4.7-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:46707a10836894b559e04b0fd143e343945c97fd170d69a2d26d640b4e297935"}, + {file = "kiwisolver-1.4.7-cp311-cp311-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ef97b8df011141c9b0f6caf23b29379f87dd13183c978a30a3c546d2c47314cb"}, + {file = "kiwisolver-1.4.7-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3ab58c12a2cd0fc769089e6d38466c46d7f76aced0a1f54c77652446733d2d02"}, + {file = "kiwisolver-1.4.7-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:803b8e1459341c1bb56d1c5c010406d5edec8a0713a0945851290a7930679b51"}, + {file = "kiwisolver-1.4.7-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f9a9e8a507420fe35992ee9ecb302dab68550dedc0da9e2880dd88071c5fb052"}, + {file = "kiwisolver-1.4.7-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:18077b53dc3bb490e330669a99920c5e6a496889ae8c63b58fbc57c3d7f33a18"}, + {file = "kiwisolver-1.4.7-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:6af936f79086a89b3680a280c47ea90b4df7047b5bdf3aa5c524bbedddb9e545"}, + {file = "kiwisolver-1.4.7-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:3abc5b19d24af4b77d1598a585b8a719beb8569a71568b66f4ebe1fb0449460b"}, + {file = "kiwisolver-1.4.7-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:933d4de052939d90afbe6e9d5273ae05fb836cc86c15b686edd4b3560cc0ee36"}, + {file = "kiwisolver-1.4.7-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:65e720d2ab2b53f1f72fb5da5fb477455905ce2c88aaa671ff0a447c2c80e8e3"}, + {file = "kiwisolver-1.4.7-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:3bf1ed55088f214ba6427484c59553123fdd9b218a42bbc8c6496d6754b1e523"}, + {file = "kiwisolver-1.4.7-cp311-cp311-win32.whl", hash = "sha256:4c00336b9dd5ad96d0a558fd18a8b6f711b7449acce4c157e7343ba92dd0cf3d"}, + {file = "kiwisolver-1.4.7-cp311-cp311-win_amd64.whl", hash = "sha256:929e294c1ac1e9f615c62a4e4313ca1823ba37326c164ec720a803287c4c499b"}, + {file = "kiwisolver-1.4.7-cp311-cp311-win_arm64.whl", hash = "sha256:e33e8fbd440c917106b237ef1a2f1449dfbb9b6f6e1ce17c94cd6a1e0d438376"}, + {file = "kiwisolver-1.4.7-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:5360cc32706dab3931f738d3079652d20982511f7c0ac5711483e6eab08efff2"}, + {file = "kiwisolver-1.4.7-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:942216596dc64ddb25adb215c3c783215b23626f8d84e8eff8d6d45c3f29f75a"}, + {file = "kiwisolver-1.4.7-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:48b571ecd8bae15702e4f22d3ff6a0f13e54d3d00cd25216d5e7f658242065ee"}, + {file = "kiwisolver-1.4.7-cp312-cp312-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ad42ba922c67c5f219097b28fae965e10045ddf145d2928bfac2eb2e17673640"}, + {file = "kiwisolver-1.4.7-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:612a10bdae23404a72941a0fc8fa2660c6ea1217c4ce0dbcab8a8f6543ea9e7f"}, + {file = "kiwisolver-1.4.7-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9e838bba3a3bac0fe06d849d29772eb1afb9745a59710762e4ba3f4cb8424483"}, + {file = "kiwisolver-1.4.7-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:22f499f6157236c19f4bbbd472fa55b063db77a16cd74d49afe28992dff8c258"}, + {file = "kiwisolver-1.4.7-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:693902d433cf585133699972b6d7c42a8b9f8f826ebcaf0132ff55200afc599e"}, + {file = "kiwisolver-1.4.7-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:4e77f2126c3e0b0d055f44513ed349038ac180371ed9b52fe96a32aa071a5107"}, + {file = "kiwisolver-1.4.7-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:657a05857bda581c3656bfc3b20e353c232e9193eb167766ad2dc58b56504948"}, + {file = "kiwisolver-1.4.7-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:4bfa75a048c056a411f9705856abfc872558e33c055d80af6a380e3658766038"}, + {file = "kiwisolver-1.4.7-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:34ea1de54beef1c104422d210c47c7d2a4999bdecf42c7b5718fbe59a4cac383"}, + {file = "kiwisolver-1.4.7-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:90da3b5f694b85231cf93586dad5e90e2d71b9428f9aad96952c99055582f520"}, + {file = "kiwisolver-1.4.7-cp312-cp312-win32.whl", hash = "sha256:18e0cca3e008e17fe9b164b55735a325140a5a35faad8de92dd80265cd5eb80b"}, + {file = "kiwisolver-1.4.7-cp312-cp312-win_amd64.whl", hash = "sha256:58cb20602b18f86f83a5c87d3ee1c766a79c0d452f8def86d925e6c60fbf7bfb"}, + {file = "kiwisolver-1.4.7-cp312-cp312-win_arm64.whl", hash = "sha256:f5a8b53bdc0b3961f8b6125e198617c40aeed638b387913bf1ce78afb1b0be2a"}, + {file = "kiwisolver-1.4.7-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:2e6039dcbe79a8e0f044f1c39db1986a1b8071051efba3ee4d74f5b365f5226e"}, + {file = "kiwisolver-1.4.7-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:a1ecf0ac1c518487d9d23b1cd7139a6a65bc460cd101ab01f1be82ecf09794b6"}, + {file = "kiwisolver-1.4.7-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:7ab9ccab2b5bd5702ab0803676a580fffa2aa178c2badc5557a84cc943fcf750"}, + {file = "kiwisolver-1.4.7-cp313-cp313-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f816dd2277f8d63d79f9c8473a79fe54047bc0467754962840782c575522224d"}, + {file = "kiwisolver-1.4.7-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cf8bcc23ceb5a1b624572a1623b9f79d2c3b337c8c455405ef231933a10da379"}, + {file = "kiwisolver-1.4.7-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:dea0bf229319828467d7fca8c7c189780aa9ff679c94539eed7532ebe33ed37c"}, + {file = "kiwisolver-1.4.7-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7c06a4c7cf15ec739ce0e5971b26c93638730090add60e183530d70848ebdd34"}, + {file = "kiwisolver-1.4.7-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:913983ad2deb14e66d83c28b632fd35ba2b825031f2fa4ca29675e665dfecbe1"}, + {file = "kiwisolver-1.4.7-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:5337ec7809bcd0f424c6b705ecf97941c46279cf5ed92311782c7c9c2026f07f"}, + {file = "kiwisolver-1.4.7-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:4c26ed10c4f6fa6ddb329a5120ba3b6db349ca192ae211e882970bfc9d91420b"}, + {file = "kiwisolver-1.4.7-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:c619b101e6de2222c1fcb0531e1b17bbffbe54294bfba43ea0d411d428618c27"}, + {file = "kiwisolver-1.4.7-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:073a36c8273647592ea332e816e75ef8da5c303236ec0167196793eb1e34657a"}, + {file = "kiwisolver-1.4.7-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:3ce6b2b0231bda412463e152fc18335ba32faf4e8c23a754ad50ffa70e4091ee"}, + {file = "kiwisolver-1.4.7-cp313-cp313-win32.whl", hash = "sha256:f4c9aee212bc89d4e13f58be11a56cc8036cabad119259d12ace14b34476fd07"}, + {file = "kiwisolver-1.4.7-cp313-cp313-win_amd64.whl", hash = "sha256:8a3ec5aa8e38fc4c8af308917ce12c536f1c88452ce554027e55b22cbbfbff76"}, + {file = "kiwisolver-1.4.7-cp313-cp313-win_arm64.whl", hash = "sha256:76c8094ac20ec259471ac53e774623eb62e6e1f56cd8690c67ce6ce4fcb05650"}, + {file = "kiwisolver-1.4.7-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:5d5abf8f8ec1f4e22882273c423e16cae834c36856cac348cfbfa68e01c40f3a"}, + {file = "kiwisolver-1.4.7-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:aeb3531b196ef6f11776c21674dba836aeea9d5bd1cf630f869e3d90b16cfade"}, + {file = "kiwisolver-1.4.7-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:b7d755065e4e866a8086c9bdada157133ff466476a2ad7861828e17b6026e22c"}, + {file = "kiwisolver-1.4.7-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:08471d4d86cbaec61f86b217dd938a83d85e03785f51121e791a6e6689a3be95"}, + {file = "kiwisolver-1.4.7-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:7bbfcb7165ce3d54a3dfbe731e470f65739c4c1f85bb1018ee912bae139e263b"}, + {file = "kiwisolver-1.4.7-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5d34eb8494bea691a1a450141ebb5385e4b69d38bb8403b5146ad279f4b30fa3"}, + {file = "kiwisolver-1.4.7-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:9242795d174daa40105c1d86aba618e8eab7bf96ba8c3ee614da8302a9f95503"}, + {file = "kiwisolver-1.4.7-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:a0f64a48bb81af7450e641e3fe0b0394d7381e342805479178b3d335d60ca7cf"}, + {file = "kiwisolver-1.4.7-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:8e045731a5416357638d1700927529e2b8ab304811671f665b225f8bf8d8f933"}, + {file = "kiwisolver-1.4.7-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:4322872d5772cae7369f8351da1edf255a604ea7087fe295411397d0cfd9655e"}, + {file = "kiwisolver-1.4.7-cp38-cp38-musllinux_1_2_ppc64le.whl", hash = "sha256:e1631290ee9271dffe3062d2634c3ecac02c83890ada077d225e081aca8aab89"}, + {file = "kiwisolver-1.4.7-cp38-cp38-musllinux_1_2_s390x.whl", hash = "sha256:edcfc407e4eb17e037bca59be0e85a2031a2ac87e4fed26d3e9df88b4165f92d"}, + {file = "kiwisolver-1.4.7-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:4d05d81ecb47d11e7f8932bd8b61b720bf0b41199358f3f5e36d38e28f0532c5"}, + {file = "kiwisolver-1.4.7-cp38-cp38-win32.whl", hash = "sha256:b38ac83d5f04b15e515fd86f312479d950d05ce2368d5413d46c088dda7de90a"}, + {file = "kiwisolver-1.4.7-cp38-cp38-win_amd64.whl", hash = "sha256:d83db7cde68459fc803052a55ace60bea2bae361fc3b7a6d5da07e11954e4b09"}, + {file = "kiwisolver-1.4.7-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:3f9362ecfca44c863569d3d3c033dbe8ba452ff8eed6f6b5806382741a1334bd"}, + {file = "kiwisolver-1.4.7-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:e8df2eb9b2bac43ef8b082e06f750350fbbaf2887534a5be97f6cf07b19d9583"}, + {file = "kiwisolver-1.4.7-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:f32d6edbc638cde7652bd690c3e728b25332acbadd7cad670cc4a02558d9c417"}, + {file = "kiwisolver-1.4.7-cp39-cp39-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:e2e6c39bd7b9372b0be21456caab138e8e69cc0fc1190a9dfa92bd45a1e6e904"}, + {file = "kiwisolver-1.4.7-cp39-cp39-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:dda56c24d869b1193fcc763f1284b9126550eaf84b88bbc7256e15028f19188a"}, + {file = "kiwisolver-1.4.7-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:79849239c39b5e1fd906556c474d9b0439ea6792b637511f3fe3a41158d89ca8"}, + {file = "kiwisolver-1.4.7-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5e3bc157fed2a4c02ec468de4ecd12a6e22818d4f09cde2c31ee3226ffbefab2"}, + {file = "kiwisolver-1.4.7-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3da53da805b71e41053dc670f9a820d1157aae77b6b944e08024d17bcd51ef88"}, + {file = "kiwisolver-1.4.7-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:8705f17dfeb43139a692298cb6637ee2e59c0194538153e83e9ee0c75c2eddde"}, + {file = "kiwisolver-1.4.7-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:82a5c2f4b87c26bb1a0ef3d16b5c4753434633b83d365cc0ddf2770c93829e3c"}, + {file = "kiwisolver-1.4.7-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:ce8be0466f4c0d585cdb6c1e2ed07232221df101a4c6f28821d2aa754ca2d9e2"}, + {file = "kiwisolver-1.4.7-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:409afdfe1e2e90e6ee7fc896f3df9a7fec8e793e58bfa0d052c8a82f99c37abb"}, + {file = "kiwisolver-1.4.7-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:5b9c3f4ee0b9a439d2415012bd1b1cc2df59e4d6a9939f4d669241d30b414327"}, + {file = "kiwisolver-1.4.7-cp39-cp39-win32.whl", hash = "sha256:a79ae34384df2b615eefca647a2873842ac3b596418032bef9a7283675962644"}, + {file = "kiwisolver-1.4.7-cp39-cp39-win_amd64.whl", hash = "sha256:cf0438b42121a66a3a667de17e779330fc0f20b0d97d59d2f2121e182b0505e4"}, + {file = "kiwisolver-1.4.7-cp39-cp39-win_arm64.whl", hash = "sha256:764202cc7e70f767dab49e8df52c7455e8de0df5d858fa801a11aa0d882ccf3f"}, + {file = "kiwisolver-1.4.7-pp310-pypy310_pp73-macosx_10_15_x86_64.whl", hash = "sha256:94252291e3fe68001b1dd747b4c0b3be12582839b95ad4d1b641924d68fd4643"}, + {file = "kiwisolver-1.4.7-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:5b7dfa3b546da08a9f622bb6becdb14b3e24aaa30adba66749d38f3cc7ea9706"}, + {file = "kiwisolver-1.4.7-pp310-pypy310_pp73-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:bd3de6481f4ed8b734da5df134cd5a6a64fe32124fe83dde1e5b5f29fe30b1e6"}, + {file = "kiwisolver-1.4.7-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a91b5f9f1205845d488c928e8570dcb62b893372f63b8b6e98b863ebd2368ff2"}, + {file = "kiwisolver-1.4.7-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:40fa14dbd66b8b8f470d5fc79c089a66185619d31645f9b0773b88b19f7223c4"}, + {file = "kiwisolver-1.4.7-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:eb542fe7933aa09d8d8f9d9097ef37532a7df6497819d16efe4359890a2f417a"}, + {file = "kiwisolver-1.4.7-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:bfa1acfa0c54932d5607e19a2c24646fb4c1ae2694437789129cf099789a3b00"}, + {file = "kiwisolver-1.4.7-pp38-pypy38_pp73-macosx_11_0_arm64.whl", hash = "sha256:eee3ea935c3d227d49b4eb85660ff631556841f6e567f0f7bda972df6c2c9935"}, + {file = "kiwisolver-1.4.7-pp38-pypy38_pp73-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:f3160309af4396e0ed04db259c3ccbfdc3621b5559b5453075e5de555e1f3a1b"}, + {file = "kiwisolver-1.4.7-pp38-pypy38_pp73-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:a17f6a29cf8935e587cc8a4dbfc8368c55edc645283db0ce9801016f83526c2d"}, + {file = "kiwisolver-1.4.7-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:10849fb2c1ecbfae45a693c070e0320a91b35dd4bcf58172c023b994283a124d"}, + {file = "kiwisolver-1.4.7-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:ac542bf38a8a4be2dc6b15248d36315ccc65f0743f7b1a76688ffb6b5129a5c2"}, + {file = "kiwisolver-1.4.7-pp39-pypy39_pp73-macosx_10_15_x86_64.whl", hash = "sha256:8b01aac285f91ca889c800042c35ad3b239e704b150cfd3382adfc9dcc780e39"}, + {file = "kiwisolver-1.4.7-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:48be928f59a1f5c8207154f935334d374e79f2b5d212826307d072595ad76a2e"}, + {file = "kiwisolver-1.4.7-pp39-pypy39_pp73-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f37cfe618a117e50d8c240555331160d73d0411422b59b5ee217843d7b693608"}, + {file = "kiwisolver-1.4.7-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:599b5c873c63a1f6ed7eead644a8a380cfbdf5db91dcb6f85707aaab213b1674"}, + {file = "kiwisolver-1.4.7-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:801fa7802e5cfabe3ab0c81a34c323a319b097dfb5004be950482d882f3d7225"}, + {file = "kiwisolver-1.4.7-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:0c6c43471bc764fad4bc99c5c2d6d16a676b1abf844ca7c8702bdae92df01ee0"}, + {file = "kiwisolver-1.4.7.tar.gz", hash = "sha256:9893ff81bd7107f7b685d3017cc6583daadb4fc26e4a888350df530e41980a60"}, +] + +[[package]] +name = "langchain-core" +version = "0.2.43" +description = "Building applications with LLMs through composability" +optional = false +python-versions = "<4.0,>=3.8.1" +files = [ + {file = "langchain_core-0.2.43-py3-none-any.whl", hash = "sha256:619601235113298ebf8252a349754b7c28d3cf7166c7c922da24944b78a9363a"}, + {file = "langchain_core-0.2.43.tar.gz", hash = "sha256:42c2ef6adedb911f4254068b6adc9eb4c4075f6c8cb3d83590d3539a815695f5"}, +] + +[package.dependencies] +jsonpatch = ">=1.33,<2.0" +langsmith = ">=0.1.112,<0.2.0" +packaging = ">=23.2,<25" +pydantic = [ + {version = ">=2.7.4,<3.0.0", markers = "python_full_version >= \"3.12.4\""}, + {version = ">=1,<3", markers = "python_full_version < \"3.12.4\""}, +] +PyYAML = ">=5.3" +tenacity = ">=8.1.0,<8.4.0 || >8.4.0,<9.0.0" +typing-extensions = ">=4.7" + +[[package]] +name = "langchain-openai" +version = "0.1.25" +description = "An integration package connecting OpenAI and LangChain" +optional = false +python-versions = "<4.0,>=3.8.1" +files = [ + {file = "langchain_openai-0.1.25-py3-none-any.whl", hash = "sha256:f0b34a233d0d9cb8fce6006c903e57085c493c4f0e32862b99063b96eaedb109"}, + {file = "langchain_openai-0.1.25.tar.gz", hash = "sha256:eb116f744f820247a72f54313fb7c01524fba0927120d4e899e5e4ab41ad3928"}, +] + +[package.dependencies] +langchain-core = ">=0.2.40,<0.3.0" +openai = ">=1.40.0,<2.0.0" +tiktoken = ">=0.7,<1" + +[[package]] +name = "langchain-text-splitters" +version = "0.2.0" +description = "LangChain text splitting utilities" +optional = false +python-versions = "<4.0,>=3.8.1" +files = [ + {file = "langchain_text_splitters-0.2.0-py3-none-any.whl", hash = "sha256:7b4c6a45f8471630a882b321e138329b6897102a5bc62f4c12be1c0b05bb9199"}, + {file = "langchain_text_splitters-0.2.0.tar.gz", hash = "sha256:b32ab4f7397f7d42c1fa3283fefc2547ba356bd63a68ee9092865e5ad83c82f9"}, +] + +[package.dependencies] +langchain-core = ">=0.2.0,<0.3.0" + +[package.extras] +extended-testing = ["beautifulsoup4 (>=4.12.3,<5.0.0)", "lxml (>=4.9.3,<6.0)"] + +[[package]] +name = "langsmith" +version = "0.1.143" +description = "Client library to connect to the LangSmith LLM Tracing and Evaluation Platform." +optional = false +python-versions = "<4.0,>=3.8.1" +files = [ + {file = "langsmith-0.1.143-py3-none-any.whl", hash = "sha256:ba0d827269e9b03a90fababe41fa3e4e3f833300b95add10184f7e67167dde6f"}, + {file = "langsmith-0.1.143.tar.gz", hash = "sha256:4c5159e5cd84b3f8499433009e72d2076dd2daf6c044ac8a3611b30d0d0161c5"}, +] + +[package.dependencies] +httpx = ">=0.23.0,<1" +orjson = ">=3.9.14,<4.0.0" +pydantic = [ + {version = ">=2.7.4,<3.0.0", markers = "python_full_version >= \"3.12.4\""}, + {version = ">=1,<3", markers = "python_full_version < \"3.12.4\""}, +] +requests = ">=2,<3" +requests-toolbelt = ">=1.0.0,<2.0.0" + +[[package]] +name = "mako" +version = "1.3.6" +description = "A super-fast templating language that borrows the best ideas from the existing templating languages." +optional = false +python-versions = ">=3.8" +files = [ + {file = "Mako-1.3.6-py3-none-any.whl", hash = "sha256:a91198468092a2f1a0de86ca92690fb0cfc43ca90ee17e15d93662b4c04b241a"}, + {file = "mako-1.3.6.tar.gz", hash = "sha256:9ec3a1583713479fae654f83ed9fa8c9a4c16b7bb0daba0e6bbebff50c0d983d"}, +] + +[package.dependencies] +MarkupSafe = ">=0.9.2" + +[package.extras] +babel = ["Babel"] +lingua = ["lingua"] +testing = ["pytest"] + +[[package]] +name = "markdown" +version = "3.7" +description = "Python implementation of John Gruber's Markdown." +optional = false +python-versions = ">=3.8" +files = [ + {file = "Markdown-3.7-py3-none-any.whl", hash = "sha256:7eb6df5690b81a1d7942992c97fad2938e956e79df20cbc6186e9c3a77b1c803"}, + {file = "markdown-3.7.tar.gz", hash = "sha256:2ae2471477cfd02dbbf038d5d9bc226d40def84b4fe2986e49b59b6b472bbed2"}, +] + +[package.extras] +docs = ["mdx-gh-links (>=0.2)", "mkdocs (>=1.5)", "mkdocs-gen-files", "mkdocs-literate-nav", "mkdocs-nature (>=0.6)", "mkdocs-section-index", "mkdocstrings[python]"] +testing = ["coverage", "pyyaml"] + +[[package]] +name = "markdownify" +version = "0.12.1" +description = "Convert HTML to markdown." +optional = false +python-versions = "*" +files = [ + {file = "markdownify-0.12.1-py3-none-any.whl", hash = "sha256:a3805abd8166dbb7b27783c5599d91f54f10d79894b2621404d85b333c7ce561"}, + {file = "markdownify-0.12.1.tar.gz", hash = "sha256:1fb08c618b30e0ee7a31a39b998f44a18fb28ab254f55f4af06b6d35a2179e27"}, +] + +[package.dependencies] +beautifulsoup4 = ">=4.9,<5" +six = ">=1.15,<2" + +[[package]] +name = "markupsafe" +version = "3.0.2" +description = "Safely add untrusted strings to HTML/XML markup." +optional = false +python-versions = ">=3.9" +files = [ + {file = "MarkupSafe-3.0.2-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:7e94c425039cde14257288fd61dcfb01963e658efbc0ff54f5306b06054700f8"}, + {file = "MarkupSafe-3.0.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:9e2d922824181480953426608b81967de705c3cef4d1af983af849d7bd619158"}, + {file = "MarkupSafe-3.0.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:38a9ef736c01fccdd6600705b09dc574584b89bea478200c5fbf112a6b0d5579"}, + {file = "MarkupSafe-3.0.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bbcb445fa71794da8f178f0f6d66789a28d7319071af7a496d4d507ed566270d"}, + {file = "MarkupSafe-3.0.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:57cb5a3cf367aeb1d316576250f65edec5bb3be939e9247ae594b4bcbc317dfb"}, + {file = "MarkupSafe-3.0.2-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:3809ede931876f5b2ec92eef964286840ed3540dadf803dd570c3b7e13141a3b"}, + {file = "MarkupSafe-3.0.2-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:e07c3764494e3776c602c1e78e298937c3315ccc9043ead7e685b7f2b8d47b3c"}, + {file = "MarkupSafe-3.0.2-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:b424c77b206d63d500bcb69fa55ed8d0e6a3774056bdc4839fc9298a7edca171"}, + {file = "MarkupSafe-3.0.2-cp310-cp310-win32.whl", hash = "sha256:fcabf5ff6eea076f859677f5f0b6b5c1a51e70a376b0579e0eadef8db48c6b50"}, + {file = "MarkupSafe-3.0.2-cp310-cp310-win_amd64.whl", hash = "sha256:6af100e168aa82a50e186c82875a5893c5597a0c1ccdb0d8b40240b1f28b969a"}, + {file = "MarkupSafe-3.0.2-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:9025b4018f3a1314059769c7bf15441064b2207cb3f065e6ea1e7359cb46db9d"}, + {file = "MarkupSafe-3.0.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:93335ca3812df2f366e80509ae119189886b0f3c2b81325d39efdb84a1e2ae93"}, + {file = "MarkupSafe-3.0.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2cb8438c3cbb25e220c2ab33bb226559e7afb3baec11c4f218ffa7308603c832"}, + {file = "MarkupSafe-3.0.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a123e330ef0853c6e822384873bef7507557d8e4a082961e1defa947aa59ba84"}, + {file = "MarkupSafe-3.0.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1e084f686b92e5b83186b07e8a17fc09e38fff551f3602b249881fec658d3eca"}, + {file = "MarkupSafe-3.0.2-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:d8213e09c917a951de9d09ecee036d5c7d36cb6cb7dbaece4c71a60d79fb9798"}, + {file = "MarkupSafe-3.0.2-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:5b02fb34468b6aaa40dfc198d813a641e3a63b98c2b05a16b9f80b7ec314185e"}, + {file = "MarkupSafe-3.0.2-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:0bff5e0ae4ef2e1ae4fdf2dfd5b76c75e5c2fa4132d05fc1b0dabcd20c7e28c4"}, + {file = "MarkupSafe-3.0.2-cp311-cp311-win32.whl", hash = "sha256:6c89876f41da747c8d3677a2b540fb32ef5715f97b66eeb0c6b66f5e3ef6f59d"}, + {file = "MarkupSafe-3.0.2-cp311-cp311-win_amd64.whl", hash = "sha256:70a87b411535ccad5ef2f1df5136506a10775d267e197e4cf531ced10537bd6b"}, + {file = "MarkupSafe-3.0.2-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:9778bd8ab0a994ebf6f84c2b949e65736d5575320a17ae8984a77fab08db94cf"}, + {file = "MarkupSafe-3.0.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:846ade7b71e3536c4e56b386c2a47adf5741d2d8b94ec9dc3e92e5e1ee1e2225"}, + {file = "MarkupSafe-3.0.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1c99d261bd2d5f6b59325c92c73df481e05e57f19837bdca8413b9eac4bd8028"}, + {file = "MarkupSafe-3.0.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e17c96c14e19278594aa4841ec148115f9c7615a47382ecb6b82bd8fea3ab0c8"}, + {file = "MarkupSafe-3.0.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:88416bd1e65dcea10bc7569faacb2c20ce071dd1f87539ca2ab364bf6231393c"}, + {file = "MarkupSafe-3.0.2-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:2181e67807fc2fa785d0592dc2d6206c019b9502410671cc905d132a92866557"}, + {file = "MarkupSafe-3.0.2-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:52305740fe773d09cffb16f8ed0427942901f00adedac82ec8b67752f58a1b22"}, + {file = "MarkupSafe-3.0.2-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:ad10d3ded218f1039f11a75f8091880239651b52e9bb592ca27de44eed242a48"}, + {file = "MarkupSafe-3.0.2-cp312-cp312-win32.whl", hash = "sha256:0f4ca02bea9a23221c0182836703cbf8930c5e9454bacce27e767509fa286a30"}, + {file = "MarkupSafe-3.0.2-cp312-cp312-win_amd64.whl", hash = "sha256:8e06879fc22a25ca47312fbe7c8264eb0b662f6db27cb2d3bbbc74b1df4b9b87"}, + {file = "MarkupSafe-3.0.2-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:ba9527cdd4c926ed0760bc301f6728ef34d841f405abf9d4f959c478421e4efd"}, + {file = "MarkupSafe-3.0.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:f8b3d067f2e40fe93e1ccdd6b2e1d16c43140e76f02fb1319a05cf2b79d99430"}, + {file = "MarkupSafe-3.0.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:569511d3b58c8791ab4c2e1285575265991e6d8f8700c7be0e88f86cb0672094"}, + {file = "MarkupSafe-3.0.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:15ab75ef81add55874e7ab7055e9c397312385bd9ced94920f2802310c930396"}, + {file = "MarkupSafe-3.0.2-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f3818cb119498c0678015754eba762e0d61e5b52d34c8b13d770f0719f7b1d79"}, + {file = "MarkupSafe-3.0.2-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:cdb82a876c47801bb54a690c5ae105a46b392ac6099881cdfb9f6e95e4014c6a"}, + {file = "MarkupSafe-3.0.2-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:cabc348d87e913db6ab4aa100f01b08f481097838bdddf7c7a84b7575b7309ca"}, + {file = "MarkupSafe-3.0.2-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:444dcda765c8a838eaae23112db52f1efaf750daddb2d9ca300bcae1039adc5c"}, + {file = "MarkupSafe-3.0.2-cp313-cp313-win32.whl", hash = "sha256:bcf3e58998965654fdaff38e58584d8937aa3096ab5354d493c77d1fdd66d7a1"}, + {file = "MarkupSafe-3.0.2-cp313-cp313-win_amd64.whl", hash = "sha256:e6a2a455bd412959b57a172ce6328d2dd1f01cb2135efda2e4576e8a23fa3b0f"}, + {file = "MarkupSafe-3.0.2-cp313-cp313t-macosx_10_13_universal2.whl", hash = "sha256:b5a6b3ada725cea8a5e634536b1b01c30bcdcd7f9c6fff4151548d5bf6b3a36c"}, + {file = "MarkupSafe-3.0.2-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:a904af0a6162c73e3edcb969eeeb53a63ceeb5d8cf642fade7d39e7963a22ddb"}, + {file = "MarkupSafe-3.0.2-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4aa4e5faecf353ed117801a068ebab7b7e09ffb6e1d5e412dc852e0da018126c"}, + {file = "MarkupSafe-3.0.2-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c0ef13eaeee5b615fb07c9a7dadb38eac06a0608b41570d8ade51c56539e509d"}, + {file = "MarkupSafe-3.0.2-cp313-cp313t-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d16a81a06776313e817c951135cf7340a3e91e8c1ff2fac444cfd75fffa04afe"}, + {file = "MarkupSafe-3.0.2-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:6381026f158fdb7c72a168278597a5e3a5222e83ea18f543112b2662a9b699c5"}, + {file = "MarkupSafe-3.0.2-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:3d79d162e7be8f996986c064d1c7c817f6df3a77fe3d6859f6f9e7be4b8c213a"}, + {file = "MarkupSafe-3.0.2-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:131a3c7689c85f5ad20f9f6fb1b866f402c445b220c19fe4308c0b147ccd2ad9"}, + {file = "MarkupSafe-3.0.2-cp313-cp313t-win32.whl", hash = "sha256:ba8062ed2cf21c07a9e295d5b8a2a5ce678b913b45fdf68c32d95d6c1291e0b6"}, + {file = "MarkupSafe-3.0.2-cp313-cp313t-win_amd64.whl", hash = "sha256:e444a31f8db13eb18ada366ab3cf45fd4b31e4db1236a4448f68778c1d1a5a2f"}, + {file = "MarkupSafe-3.0.2-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:eaa0a10b7f72326f1372a713e73c3f739b524b3af41feb43e4921cb529f5929a"}, + {file = "MarkupSafe-3.0.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:48032821bbdf20f5799ff537c7ac3d1fba0ba032cfc06194faffa8cda8b560ff"}, + {file = "MarkupSafe-3.0.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1a9d3f5f0901fdec14d8d2f66ef7d035f2157240a433441719ac9a3fba440b13"}, + {file = "MarkupSafe-3.0.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:88b49a3b9ff31e19998750c38e030fc7bb937398b1f78cfa599aaef92d693144"}, + {file = "MarkupSafe-3.0.2-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:cfad01eed2c2e0c01fd0ecd2ef42c492f7f93902e39a42fc9ee1692961443a29"}, + {file = "MarkupSafe-3.0.2-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:1225beacc926f536dc82e45f8a4d68502949dc67eea90eab715dea3a21c1b5f0"}, + {file = "MarkupSafe-3.0.2-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:3169b1eefae027567d1ce6ee7cae382c57fe26e82775f460f0b2778beaad66c0"}, + {file = "MarkupSafe-3.0.2-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:eb7972a85c54febfb25b5c4b4f3af4dcc731994c7da0d8a0b4a6eb0640e1d178"}, + {file = "MarkupSafe-3.0.2-cp39-cp39-win32.whl", hash = "sha256:8c4e8c3ce11e1f92f6536ff07154f9d49677ebaaafc32db9db4620bc11ed480f"}, + {file = "MarkupSafe-3.0.2-cp39-cp39-win_amd64.whl", hash = "sha256:6e296a513ca3d94054c2c881cc913116e90fd030ad1c656b3869762b754f5f8a"}, + {file = "markupsafe-3.0.2.tar.gz", hash = "sha256:ee55d3edf80167e48ea11a923c7386f4669df67d7994554387f84e7d8b0a2bf0"}, +] + +[[package]] +name = "matplotlib" +version = "3.9.2" +description = "Python plotting package" +optional = false +python-versions = ">=3.9" +files = [ + {file = "matplotlib-3.9.2-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:9d78bbc0cbc891ad55b4f39a48c22182e9bdaea7fc0e5dbd364f49f729ca1bbb"}, + {file = "matplotlib-3.9.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:c375cc72229614632c87355366bdf2570c2dac01ac66b8ad048d2dabadf2d0d4"}, + {file = "matplotlib-3.9.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1d94ff717eb2bd0b58fe66380bd8b14ac35f48a98e7c6765117fe67fb7684e64"}, + {file = "matplotlib-3.9.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ab68d50c06938ef28681073327795c5db99bb4666214d2d5f880ed11aeaded66"}, + {file = "matplotlib-3.9.2-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:65aacf95b62272d568044531e41de26285d54aec8cb859031f511f84bd8b495a"}, + {file = "matplotlib-3.9.2-cp310-cp310-win_amd64.whl", hash = "sha256:3fd595f34aa8a55b7fc8bf9ebea8aa665a84c82d275190a61118d33fbc82ccae"}, + {file = "matplotlib-3.9.2-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:d8dd059447824eec055e829258ab092b56bb0579fc3164fa09c64f3acd478772"}, + {file = "matplotlib-3.9.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:c797dac8bb9c7a3fd3382b16fe8f215b4cf0f22adccea36f1545a6d7be310b41"}, + {file = "matplotlib-3.9.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d719465db13267bcef19ea8954a971db03b9f48b4647e3860e4bc8e6ed86610f"}, + {file = "matplotlib-3.9.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8912ef7c2362f7193b5819d17dae8629b34a95c58603d781329712ada83f9447"}, + {file = "matplotlib-3.9.2-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:7741f26a58a240f43bee74965c4882b6c93df3e7eb3de160126d8c8f53a6ae6e"}, + {file = "matplotlib-3.9.2-cp311-cp311-win_amd64.whl", hash = "sha256:ae82a14dab96fbfad7965403c643cafe6515e386de723e498cf3eeb1e0b70cc7"}, + {file = "matplotlib-3.9.2-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:ac43031375a65c3196bee99f6001e7fa5bdfb00ddf43379d3c0609bdca042df9"}, + {file = "matplotlib-3.9.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:be0fc24a5e4531ae4d8e858a1a548c1fe33b176bb13eff7f9d0d38ce5112a27d"}, + {file = "matplotlib-3.9.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bf81de2926c2db243c9b2cbc3917619a0fc85796c6ba4e58f541df814bbf83c7"}, + {file = "matplotlib-3.9.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f6ee45bc4245533111ced13f1f2cace1e7f89d1c793390392a80c139d6cf0e6c"}, + {file = "matplotlib-3.9.2-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:306c8dfc73239f0e72ac50e5a9cf19cc4e8e331dd0c54f5e69ca8758550f1e1e"}, + {file = "matplotlib-3.9.2-cp312-cp312-win_amd64.whl", hash = "sha256:5413401594cfaff0052f9d8b1aafc6d305b4bd7c4331dccd18f561ff7e1d3bd3"}, + {file = "matplotlib-3.9.2-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:18128cc08f0d3cfff10b76baa2f296fc28c4607368a8402de61bb3f2eb33c7d9"}, + {file = "matplotlib-3.9.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:4876d7d40219e8ae8bb70f9263bcbe5714415acfdf781086601211335e24f8aa"}, + {file = "matplotlib-3.9.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6d9f07a80deab4bb0b82858a9e9ad53d1382fd122be8cde11080f4e7dfedb38b"}, + {file = "matplotlib-3.9.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f7c0410f181a531ec4e93bbc27692f2c71a15c2da16766f5ba9761e7ae518413"}, + {file = "matplotlib-3.9.2-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:909645cce2dc28b735674ce0931a4ac94e12f5b13f6bb0b5a5e65e7cea2c192b"}, + {file = "matplotlib-3.9.2-cp313-cp313-win_amd64.whl", hash = "sha256:f32c7410c7f246838a77d6d1eff0c0f87f3cb0e7c4247aebea71a6d5a68cab49"}, + {file = "matplotlib-3.9.2-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:37e51dd1c2db16ede9cfd7b5cabdfc818b2c6397c83f8b10e0e797501c963a03"}, + {file = "matplotlib-3.9.2-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:b82c5045cebcecd8496a4d694d43f9cc84aeeb49fe2133e036b207abe73f4d30"}, + {file = "matplotlib-3.9.2-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f053c40f94bc51bc03832a41b4f153d83f2062d88c72b5e79997072594e97e51"}, + {file = "matplotlib-3.9.2-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dbe196377a8248972f5cede786d4c5508ed5f5ca4a1e09b44bda889958b33f8c"}, + {file = "matplotlib-3.9.2-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:5816b1e1fe8c192cbc013f8f3e3368ac56fbecf02fb41b8f8559303f24c5015e"}, + {file = "matplotlib-3.9.2-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:cef2a73d06601437be399908cf13aee74e86932a5ccc6ccdf173408ebc5f6bb2"}, + {file = "matplotlib-3.9.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:e0830e188029c14e891fadd99702fd90d317df294c3298aad682739c5533721a"}, + {file = "matplotlib-3.9.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:03ba9c1299c920964e8d3857ba27173b4dbb51ca4bab47ffc2c2ba0eb5e2cbc5"}, + {file = "matplotlib-3.9.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1cd93b91ab47a3616b4d3c42b52f8363b88ca021e340804c6ab2536344fad9ca"}, + {file = "matplotlib-3.9.2-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:6d1ce5ed2aefcdce11904fc5bbea7d9c21fff3d5f543841edf3dea84451a09ea"}, + {file = "matplotlib-3.9.2-cp39-cp39-win_amd64.whl", hash = "sha256:b2696efdc08648536efd4e1601b5fd491fd47f4db97a5fbfd175549a7365c1b2"}, + {file = "matplotlib-3.9.2-pp39-pypy39_pp73-macosx_10_15_x86_64.whl", hash = "sha256:d52a3b618cb1cbb769ce2ee1dcdb333c3ab6e823944e9a2d36e37253815f9556"}, + {file = "matplotlib-3.9.2-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:039082812cacd6c6bec8e17a9c1e6baca230d4116d522e81e1f63a74d01d2e21"}, + {file = "matplotlib-3.9.2-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6758baae2ed64f2331d4fd19be38b7b4eae3ecec210049a26b6a4f3ae1c85dcc"}, + {file = "matplotlib-3.9.2-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:050598c2b29e0b9832cde72bcf97627bf00262adbc4a54e2b856426bb2ef0697"}, + {file = "matplotlib-3.9.2.tar.gz", hash = "sha256:96ab43906269ca64a6366934106fa01534454a69e471b7bf3d79083981aaab92"}, +] + +[package.dependencies] +contourpy = ">=1.0.1" +cycler = ">=0.10" +fonttools = ">=4.22.0" +kiwisolver = ">=1.3.1" +numpy = ">=1.23" +packaging = ">=20.0" +pillow = ">=8" +pyparsing = ">=2.3.1" +python-dateutil = ">=2.7" + +[package.extras] +dev = ["meson-python (>=0.13.1)", "numpy (>=1.25)", "pybind11 (>=2.6)", "setuptools (>=64)", "setuptools_scm (>=7)"] + +[[package]] +name = "matplotlib-inline" +version = "0.1.7" +description = "Inline Matplotlib backend for Jupyter" +optional = false +python-versions = ">=3.8" +files = [ + {file = "matplotlib_inline-0.1.7-py3-none-any.whl", hash = "sha256:df192d39a4ff8f21b1895d72e6a13f5fcc5099f00fa84384e0ea28c2cc0653ca"}, + {file = "matplotlib_inline-0.1.7.tar.gz", hash = "sha256:8423b23ec666be3d16e16b60bdd8ac4e86e840ebd1dd11a30b9f117f2fa0ab90"}, +] + +[package.dependencies] +traitlets = "*" + +[[package]] +name = "mkl" +version = "2021.4.0" +description = "Intel® oneAPI Math Kernel Library" +optional = false +python-versions = "*" +files = [ + {file = "mkl-2021.4.0-py2.py3-none-macosx_10_15_x86_64.macosx_11_0_x86_64.whl", hash = "sha256:67460f5cd7e30e405b54d70d1ed3ca78118370b65f7327d495e9c8847705e2fb"}, + {file = "mkl-2021.4.0-py2.py3-none-manylinux1_i686.whl", hash = "sha256:636d07d90e68ccc9630c654d47ce9fdeb036bb46e2b193b3a9ac8cfea683cce5"}, + {file = "mkl-2021.4.0-py2.py3-none-manylinux1_x86_64.whl", hash = "sha256:398dbf2b0d12acaf54117a5210e8f191827f373d362d796091d161f610c1ebfb"}, + {file = "mkl-2021.4.0-py2.py3-none-win32.whl", hash = "sha256:439c640b269a5668134e3dcbcea4350459c4a8bc46469669b2d67e07e3d330e8"}, + {file = "mkl-2021.4.0-py2.py3-none-win_amd64.whl", hash = "sha256:ceef3cafce4c009dd25f65d7ad0d833a0fbadc3d8903991ec92351fe5de1e718"}, +] + +[package.dependencies] +intel-openmp = "==2021.*" +tbb = "==2021.*" + +[[package]] +name = "mlflow" +version = "2.18.0" +description = "MLflow is an open source platform for the complete machine learning lifecycle" +optional = false +python-versions = ">=3.9" +files = [ + {file = "mlflow-2.18.0-py3-none-any.whl", hash = "sha256:844a5c26ce8f83bbb5e038a7ce5a47be62edee89ad13ecf98ae2dbf0db9fa58f"}, + {file = "mlflow-2.18.0.tar.gz", hash = "sha256:90f0d04b02e35c0f2fccc88e892e37b84871cb4f766acd3ef904c1c30be63ee3"}, +] + +[package.dependencies] +alembic = "<1.10.0 || >1.10.0,<2" +docker = ">=4.0.0,<8" +Flask = "<4" +graphene = "<4" +gunicorn = {version = "<24", markers = "platform_system != \"Windows\""} +Jinja2 = [ + {version = ">=2.11,<4", markers = "platform_system != \"Windows\""}, + {version = ">=3.0,<4", markers = "platform_system == \"Windows\""}, +] +markdown = ">=3.3,<4" +matplotlib = "<4" +mlflow-skinny = "2.18.0" +numpy = "<3" +pandas = "<3" +pyarrow = ">=4.0.0,<19" +scikit-learn = "<2" +scipy = "<2" +sqlalchemy = ">=1.4.0,<3" +waitress = {version = "<4", markers = "platform_system == \"Windows\""} + +[package.extras] +aliyun-oss = ["aliyunstoreplugin"] +databricks = ["azure-storage-file-datalake (>12)", "boto3 (>1)", "botocore", "google-cloud-storage (>=1.30.0)"] +extras = ["azureml-core (>=1.2.0)", "boto3", "botocore", "google-cloud-storage (>=1.30.0)", "kubernetes", "prometheus-flask-exporter", "pyarrow", "pysftp", "requests-auth-aws-sigv4", "virtualenv"] +gateway = ["aiohttp (<4)", "boto3 (>=1.28.56,<2)", "fastapi (<1)", "pydantic (>=1.0,<3)", "slowapi (>=0.1.9,<1)", "tiktoken (<1)", "uvicorn[standard] (<1)", "watchfiles (<1)"] +genai = ["aiohttp (<4)", "boto3 (>=1.28.56,<2)", "fastapi (<1)", "pydantic (>=1.0,<3)", "slowapi (>=0.1.9,<1)", "tiktoken (<1)", "uvicorn[standard] (<1)", "watchfiles (<1)"] +jfrog = ["mlflow-jfrog-plugin"] +langchain = ["langchain (>=0.1.0,<=0.3.7)"] +mlserver = ["mlserver (>=1.2.0,!=1.3.1)", "mlserver-mlflow (>=1.2.0,!=1.3.1)"] +sqlserver = ["mlflow-dbstore"] +xethub = ["mlflow-xethub"] + +[[package]] +name = "mlflow-skinny" +version = "2.18.0" +description = "MLflow is an open source platform for the complete machine learning lifecycle" +optional = false +python-versions = ">=3.9" +files = [ + {file = "mlflow_skinny-2.18.0-py3-none-any.whl", hash = "sha256:b924730b38cf9a7400737aa3e011c97edf978eed354bb0eb89ccb1f9e42764dc"}, + {file = "mlflow_skinny-2.18.0.tar.gz", hash = "sha256:87e83f56c362a520196b2f0292b24efdca7f8b2068a6a6941f2ec9feb9bfd914"}, +] + +[package.dependencies] +azure-storage-file-datalake = {version = ">12", optional = true, markers = "extra == \"databricks\""} +boto3 = {version = ">1", optional = true, markers = "extra == \"databricks\""} +botocore = {version = "*", optional = true, markers = "extra == \"databricks\""} +cachetools = ">=5.0.0,<6" +click = ">=7.0,<9" +cloudpickle = "<4" +databricks-sdk = ">=0.20.0,<1" +gitpython = ">=3.1.9,<4" +google-cloud-storage = {version = ">=1.30.0", optional = true, markers = "extra == \"databricks\""} +importlib-metadata = ">=3.7.0,<4.7.0 || >4.7.0,<9" +opentelemetry-api = ">=1.9.0,<3" +opentelemetry-sdk = ">=1.9.0,<3" +packaging = "<25" +protobuf = ">=3.12.0,<6" +pyyaml = ">=5.1,<7" +requests = ">=2.17.3,<3" +sqlparse = ">=0.4.0,<1" + +[package.extras] +aliyun-oss = ["aliyunstoreplugin"] +databricks = ["azure-storage-file-datalake (>12)", "boto3 (>1)", "botocore", "google-cloud-storage (>=1.30.0)"] +extras = ["azureml-core (>=1.2.0)", "boto3", "botocore", "google-cloud-storage (>=1.30.0)", "kubernetes", "prometheus-flask-exporter", "pyarrow", "pysftp", "requests-auth-aws-sigv4", "virtualenv"] +gateway = ["aiohttp (<4)", "boto3 (>=1.28.56,<2)", "fastapi (<1)", "pydantic (>=1.0,<3)", "slowapi (>=0.1.9,<1)", "tiktoken (<1)", "uvicorn[standard] (<1)", "watchfiles (<1)"] +genai = ["aiohttp (<4)", "boto3 (>=1.28.56,<2)", "fastapi (<1)", "pydantic (>=1.0,<3)", "slowapi (>=0.1.9,<1)", "tiktoken (<1)", "uvicorn[standard] (<1)", "watchfiles (<1)"] +jfrog = ["mlflow-jfrog-plugin"] +langchain = ["langchain (>=0.1.0,<=0.3.7)"] +mlserver = ["mlserver (>=1.2.0,!=1.3.1)", "mlserver-mlflow (>=1.2.0,!=1.3.1)"] +sqlserver = ["mlflow-dbstore"] +xethub = ["mlflow-xethub"] + +[[package]] +name = "mpmath" +version = "1.3.0" +description = "Python library for arbitrary-precision floating-point arithmetic" +optional = false +python-versions = "*" +files = [ + {file = "mpmath-1.3.0-py3-none-any.whl", hash = "sha256:a0b2b9fe80bbcd81a6647ff13108738cfb482d481d826cc0e02f5b35e5c88d2c"}, + {file = "mpmath-1.3.0.tar.gz", hash = "sha256:7a28eb2a9774d00c7bc92411c19a89209d5da7c4c9a9e227be8330a23a25b91f"}, +] + +[package.extras] +develop = ["codecov", "pycodestyle", "pytest (>=4.6)", "pytest-cov", "wheel"] +docs = ["sphinx"] +gmpy = ["gmpy2 (>=2.1.0a4)"] +tests = ["pytest (>=4.6)"] + +[[package]] +name = "nest-asyncio" +version = "1.6.0" +description = "Patch asyncio to allow nested event loops" +optional = false +python-versions = ">=3.5" +files = [ + {file = "nest_asyncio-1.6.0-py3-none-any.whl", hash = "sha256:87af6efd6b5e897c81050477ef65c62e2b2f35d51703cae01aff2905b1852e1c"}, + {file = "nest_asyncio-1.6.0.tar.gz", hash = "sha256:6f172d5449aca15afd6c646851f4e31e02c598d553a667e38cafa997cfec55fe"}, +] + +[[package]] +name = "networkx" +version = "3.4.2" +description = "Python package for creating and manipulating graphs and networks" +optional = false +python-versions = ">=3.10" +files = [ + {file = "networkx-3.4.2-py3-none-any.whl", hash = "sha256:df5d4365b724cf81b8c6a7312509d0c22386097011ad1abe274afd5e9d3bbc5f"}, + {file = "networkx-3.4.2.tar.gz", hash = "sha256:307c3669428c5362aab27c8a1260aa8f47c4e91d3891f48be0141738d8d053e1"}, +] + +[package.extras] +default = ["matplotlib (>=3.7)", "numpy (>=1.24)", "pandas (>=2.0)", "scipy (>=1.10,!=1.11.0,!=1.11.1)"] +developer = ["changelist (==0.5)", "mypy (>=1.1)", "pre-commit (>=3.2)", "rtoml"] +doc = ["intersphinx-registry", "myst-nb (>=1.1)", "numpydoc (>=1.8.0)", "pillow (>=9.4)", "pydata-sphinx-theme (>=0.15)", "sphinx (>=7.3)", "sphinx-gallery (>=0.16)", "texext (>=0.6.7)"] +example = ["cairocffi (>=1.7)", "contextily (>=1.6)", "igraph (>=0.11)", "momepy (>=0.7.2)", "osmnx (>=1.9)", "scikit-learn (>=1.5)", "seaborn (>=0.13)"] +extra = ["lxml (>=4.6)", "pydot (>=3.0.1)", "pygraphviz (>=1.14)", "sympy (>=1.10)"] +test = ["pytest (>=7.2)", "pytest-cov (>=4.0)"] + +[[package]] +name = "numpy" +version = "1.26.4" +description = "Fundamental package for array computing in Python" +optional = false +python-versions = ">=3.9" +files = [ + {file = "numpy-1.26.4-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:9ff0f4f29c51e2803569d7a51c2304de5554655a60c5d776e35b4a41413830d0"}, + {file = "numpy-1.26.4-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:2e4ee3380d6de9c9ec04745830fd9e2eccb3e6cf790d39d7b98ffd19b0dd754a"}, + {file = "numpy-1.26.4-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d209d8969599b27ad20994c8e41936ee0964e6da07478d6c35016bc386b66ad4"}, + {file = "numpy-1.26.4-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ffa75af20b44f8dba823498024771d5ac50620e6915abac414251bd971b4529f"}, + {file = "numpy-1.26.4-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:62b8e4b1e28009ef2846b4c7852046736bab361f7aeadeb6a5b89ebec3c7055a"}, + {file = "numpy-1.26.4-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:a4abb4f9001ad2858e7ac189089c42178fcce737e4169dc61321660f1a96c7d2"}, + {file = "numpy-1.26.4-cp310-cp310-win32.whl", hash = "sha256:bfe25acf8b437eb2a8b2d49d443800a5f18508cd811fea3181723922a8a82b07"}, + {file = "numpy-1.26.4-cp310-cp310-win_amd64.whl", hash = "sha256:b97fe8060236edf3662adfc2c633f56a08ae30560c56310562cb4f95500022d5"}, + {file = "numpy-1.26.4-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:4c66707fabe114439db9068ee468c26bbdf909cac0fb58686a42a24de1760c71"}, + {file = "numpy-1.26.4-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:edd8b5fe47dab091176d21bb6de568acdd906d1887a4584a15a9a96a1dca06ef"}, + {file = "numpy-1.26.4-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7ab55401287bfec946ced39700c053796e7cc0e3acbef09993a9ad2adba6ca6e"}, + {file = "numpy-1.26.4-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:666dbfb6ec68962c033a450943ded891bed2d54e6755e35e5835d63f4f6931d5"}, + {file = "numpy-1.26.4-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:96ff0b2ad353d8f990b63294c8986f1ec3cb19d749234014f4e7eb0112ceba5a"}, + {file = "numpy-1.26.4-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:60dedbb91afcbfdc9bc0b1f3f402804070deed7392c23eb7a7f07fa857868e8a"}, + {file = "numpy-1.26.4-cp311-cp311-win32.whl", hash = "sha256:1af303d6b2210eb850fcf03064d364652b7120803a0b872f5211f5234b399f20"}, + {file = "numpy-1.26.4-cp311-cp311-win_amd64.whl", hash = "sha256:cd25bcecc4974d09257ffcd1f098ee778f7834c3ad767fe5db785be9a4aa9cb2"}, + {file = "numpy-1.26.4-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:b3ce300f3644fb06443ee2222c2201dd3a89ea6040541412b8fa189341847218"}, + {file = "numpy-1.26.4-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:03a8c78d01d9781b28a6989f6fa1bb2c4f2d51201cf99d3dd875df6fbd96b23b"}, + {file = "numpy-1.26.4-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9fad7dcb1aac3c7f0584a5a8133e3a43eeb2fe127f47e3632d43d677c66c102b"}, + {file = "numpy-1.26.4-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:675d61ffbfa78604709862923189bad94014bef562cc35cf61d3a07bba02a7ed"}, + {file = "numpy-1.26.4-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:ab47dbe5cc8210f55aa58e4805fe224dac469cde56b9f731a4c098b91917159a"}, + {file = "numpy-1.26.4-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:1dda2e7b4ec9dd512f84935c5f126c8bd8b9f2fc001e9f54af255e8c5f16b0e0"}, + {file = "numpy-1.26.4-cp312-cp312-win32.whl", hash = "sha256:50193e430acfc1346175fcbdaa28ffec49947a06918b7b92130744e81e640110"}, + {file = "numpy-1.26.4-cp312-cp312-win_amd64.whl", hash = "sha256:08beddf13648eb95f8d867350f6a018a4be2e5ad54c8d8caed89ebca558b2818"}, + {file = "numpy-1.26.4-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:7349ab0fa0c429c82442a27a9673fc802ffdb7c7775fad780226cb234965e53c"}, + {file = "numpy-1.26.4-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:52b8b60467cd7dd1e9ed082188b4e6bb35aa5cdd01777621a1658910745b90be"}, + {file = "numpy-1.26.4-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d5241e0a80d808d70546c697135da2c613f30e28251ff8307eb72ba696945764"}, + {file = "numpy-1.26.4-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f870204a840a60da0b12273ef34f7051e98c3b5961b61b0c2c1be6dfd64fbcd3"}, + {file = "numpy-1.26.4-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:679b0076f67ecc0138fd2ede3a8fd196dddc2ad3254069bcb9faf9a79b1cebcd"}, + {file = "numpy-1.26.4-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:47711010ad8555514b434df65f7d7b076bb8261df1ca9bb78f53d3b2db02e95c"}, + {file = "numpy-1.26.4-cp39-cp39-win32.whl", hash = "sha256:a354325ee03388678242a4d7ebcd08b5c727033fcff3b2f536aea978e15ee9e6"}, + {file = "numpy-1.26.4-cp39-cp39-win_amd64.whl", hash = "sha256:3373d5d70a5fe74a2c1bb6d2cfd9609ecf686d47a2d7b1d37a8f3b6bf6003aea"}, + {file = "numpy-1.26.4-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:afedb719a9dcfc7eaf2287b839d8198e06dcd4cb5d276a3df279231138e83d30"}, + {file = "numpy-1.26.4-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:95a7476c59002f2f6c590b9b7b998306fba6a5aa646b1e22ddfeaf8f78c3a29c"}, + {file = "numpy-1.26.4-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:7e50d0a0cc3189f9cb0aeb3a6a6af18c16f59f004b866cd2be1c14b36134a4a0"}, + {file = "numpy-1.26.4.tar.gz", hash = "sha256:2a02aba9ed12e4ac4eb3ea9421c420301a0c6460d9830d74a9df87efa4912010"}, +] + +[[package]] +name = "nvidia-cublas-cu12" +version = "12.1.3.1" +description = "CUBLAS native runtime libraries" +optional = false +python-versions = ">=3" +files = [ + {file = "nvidia_cublas_cu12-12.1.3.1-py3-none-manylinux1_x86_64.whl", hash = "sha256:ee53ccca76a6fc08fb9701aa95b6ceb242cdaab118c3bb152af4e579af792728"}, + {file = "nvidia_cublas_cu12-12.1.3.1-py3-none-win_amd64.whl", hash = "sha256:2b964d60e8cf11b5e1073d179d85fa340c120e99b3067558f3cf98dd69d02906"}, +] + +[[package]] +name = "nvidia-cuda-cupti-cu12" +version = "12.1.105" +description = "CUDA profiling tools runtime libs." +optional = false +python-versions = ">=3" +files = [ + {file = "nvidia_cuda_cupti_cu12-12.1.105-py3-none-manylinux1_x86_64.whl", hash = "sha256:e54fde3983165c624cb79254ae9818a456eb6e87a7fd4d56a2352c24ee542d7e"}, + {file = "nvidia_cuda_cupti_cu12-12.1.105-py3-none-win_amd64.whl", hash = "sha256:bea8236d13a0ac7190bd2919c3e8e6ce1e402104276e6f9694479e48bb0eb2a4"}, +] + +[[package]] +name = "nvidia-cuda-nvrtc-cu12" +version = "12.1.105" +description = "NVRTC native runtime libraries" +optional = false +python-versions = ">=3" +files = [ + {file = "nvidia_cuda_nvrtc_cu12-12.1.105-py3-none-manylinux1_x86_64.whl", hash = "sha256:339b385f50c309763ca65456ec75e17bbefcbbf2893f462cb8b90584cd27a1c2"}, + {file = "nvidia_cuda_nvrtc_cu12-12.1.105-py3-none-win_amd64.whl", hash = "sha256:0a98a522d9ff138b96c010a65e145dc1b4850e9ecb75a0172371793752fd46ed"}, +] + +[[package]] +name = "nvidia-cuda-runtime-cu12" +version = "12.1.105" +description = "CUDA Runtime native Libraries" +optional = false +python-versions = ">=3" +files = [ + {file = "nvidia_cuda_runtime_cu12-12.1.105-py3-none-manylinux1_x86_64.whl", hash = "sha256:6e258468ddf5796e25f1dc591a31029fa317d97a0a94ed93468fc86301d61e40"}, + {file = "nvidia_cuda_runtime_cu12-12.1.105-py3-none-win_amd64.whl", hash = "sha256:dfb46ef84d73fababab44cf03e3b83f80700d27ca300e537f85f636fac474344"}, +] + +[[package]] +name = "nvidia-cudnn-cu12" +version = "8.9.2.26" +description = "cuDNN runtime libraries" +optional = false +python-versions = ">=3" +files = [ + {file = "nvidia_cudnn_cu12-8.9.2.26-py3-none-manylinux1_x86_64.whl", hash = "sha256:5ccb288774fdfb07a7e7025ffec286971c06d8d7b4fb162525334616d7629ff9"}, +] + +[package.dependencies] +nvidia-cublas-cu12 = "*" + +[[package]] +name = "nvidia-cufft-cu12" +version = "11.0.2.54" +description = "CUFFT native runtime libraries" +optional = false +python-versions = ">=3" +files = [ + {file = "nvidia_cufft_cu12-11.0.2.54-py3-none-manylinux1_x86_64.whl", hash = "sha256:794e3948a1aa71fd817c3775866943936774d1c14e7628c74f6f7417224cdf56"}, + {file = "nvidia_cufft_cu12-11.0.2.54-py3-none-win_amd64.whl", hash = "sha256:d9ac353f78ff89951da4af698f80870b1534ed69993f10a4cf1d96f21357e253"}, +] + +[[package]] +name = "nvidia-curand-cu12" +version = "10.3.2.106" +description = "CURAND native runtime libraries" +optional = false +python-versions = ">=3" +files = [ + {file = "nvidia_curand_cu12-10.3.2.106-py3-none-manylinux1_x86_64.whl", hash = "sha256:9d264c5036dde4e64f1de8c50ae753237c12e0b1348738169cd0f8a536c0e1e0"}, + {file = "nvidia_curand_cu12-10.3.2.106-py3-none-win_amd64.whl", hash = "sha256:75b6b0c574c0037839121317e17fd01f8a69fd2ef8e25853d826fec30bdba74a"}, +] + +[[package]] +name = "nvidia-cusolver-cu12" +version = "11.4.5.107" +description = "CUDA solver native runtime libraries" +optional = false +python-versions = ">=3" +files = [ + {file = "nvidia_cusolver_cu12-11.4.5.107-py3-none-manylinux1_x86_64.whl", hash = "sha256:8a7ec542f0412294b15072fa7dab71d31334014a69f953004ea7a118206fe0dd"}, + {file = "nvidia_cusolver_cu12-11.4.5.107-py3-none-win_amd64.whl", hash = "sha256:74e0c3a24c78612192a74fcd90dd117f1cf21dea4822e66d89e8ea80e3cd2da5"}, +] + +[package.dependencies] +nvidia-cublas-cu12 = "*" +nvidia-cusparse-cu12 = "*" +nvidia-nvjitlink-cu12 = "*" + +[[package]] +name = "nvidia-cusparse-cu12" +version = "12.1.0.106" +description = "CUSPARSE native runtime libraries" +optional = false +python-versions = ">=3" +files = [ + {file = "nvidia_cusparse_cu12-12.1.0.106-py3-none-manylinux1_x86_64.whl", hash = "sha256:f3b50f42cf363f86ab21f720998517a659a48131e8d538dc02f8768237bd884c"}, + {file = "nvidia_cusparse_cu12-12.1.0.106-py3-none-win_amd64.whl", hash = "sha256:b798237e81b9719373e8fae8d4f091b70a0cf09d9d85c95a557e11df2d8e9a5a"}, +] + +[package.dependencies] +nvidia-nvjitlink-cu12 = "*" + +[[package]] +name = "nvidia-nccl-cu12" +version = "2.20.5" +description = "NVIDIA Collective Communication Library (NCCL) Runtime" +optional = false +python-versions = ">=3" +files = [ + {file = "nvidia_nccl_cu12-2.20.5-py3-none-manylinux2014_aarch64.whl", hash = "sha256:1fc150d5c3250b170b29410ba682384b14581db722b2531b0d8d33c595f33d01"}, + {file = "nvidia_nccl_cu12-2.20.5-py3-none-manylinux2014_x86_64.whl", hash = "sha256:057f6bf9685f75215d0c53bf3ac4a10b3e6578351de307abad9e18a99182af56"}, +] + +[[package]] +name = "nvidia-nvjitlink-cu12" +version = "12.6.77" +description = "Nvidia JIT LTO Library" +optional = false +python-versions = ">=3" +files = [ + {file = "nvidia_nvjitlink_cu12-12.6.77-py3-none-manylinux2014_aarch64.whl", hash = "sha256:3bf10d85bb1801e9c894c6e197e44dd137d2a0a9e43f8450e9ad13f2df0dd52d"}, + {file = "nvidia_nvjitlink_cu12-12.6.77-py3-none-manylinux2014_x86_64.whl", hash = "sha256:9ae346d16203ae4ea513be416495167a0101d33d2d14935aa9c1829a3fb45142"}, + {file = "nvidia_nvjitlink_cu12-12.6.77-py3-none-win_amd64.whl", hash = "sha256:410718cd44962bed862a31dd0318620f6f9a8b28a6291967bcfcb446a6516771"}, +] + +[[package]] +name = "nvidia-nvtx-cu12" +version = "12.1.105" +description = "NVIDIA Tools Extension" +optional = false +python-versions = ">=3" +files = [ + {file = "nvidia_nvtx_cu12-12.1.105-py3-none-manylinux1_x86_64.whl", hash = "sha256:dc21cf308ca5691e7c04d962e213f8a4aa9bbfa23d95412f452254c2caeb09e5"}, + {file = "nvidia_nvtx_cu12-12.1.105-py3-none-win_amd64.whl", hash = "sha256:65f4d98982b31b60026e0e6de73fbdfc09d08a96f4656dd3665ca616a11e1e82"}, +] + +[[package]] +name = "openai" +version = "1.54.4" +description = "The official Python library for the openai API" +optional = false +python-versions = ">=3.8" +files = [ + {file = "openai-1.54.4-py3-none-any.whl", hash = "sha256:0d95cef99346bf9b6d7fbf57faf61a673924c3e34fa8af84c9ffe04660673a7e"}, + {file = "openai-1.54.4.tar.gz", hash = "sha256:50f3656e45401c54e973fa05dc29f3f0b0d19348d685b2f7ddb4d92bf7b1b6bf"}, +] + +[package.dependencies] +anyio = ">=3.5.0,<5" +distro = ">=1.7.0,<2" +httpx = ">=0.23.0,<1" +jiter = ">=0.4.0,<1" +pydantic = ">=1.9.0,<3" +sniffio = "*" +tqdm = ">4" +typing-extensions = ">=4.11,<5" + +[package.extras] +datalib = ["numpy (>=1)", "pandas (>=1.2.3)", "pandas-stubs (>=1.1.0.11)"] + +[[package]] +name = "opentelemetry-api" +version = "1.28.2" +description = "OpenTelemetry Python API" +optional = false +python-versions = ">=3.8" +files = [ + {file = "opentelemetry_api-1.28.2-py3-none-any.whl", hash = "sha256:6fcec89e265beb258fe6b1acaaa3c8c705a934bd977b9f534a2b7c0d2d4275a6"}, + {file = "opentelemetry_api-1.28.2.tar.gz", hash = "sha256:ecdc70c7139f17f9b0cf3742d57d7020e3e8315d6cffcdf1a12a905d45b19cc0"}, +] + +[package.dependencies] +deprecated = ">=1.2.6" +importlib-metadata = ">=6.0,<=8.5.0" + +[[package]] +name = "opentelemetry-sdk" +version = "1.28.2" +description = "OpenTelemetry Python SDK" +optional = false +python-versions = ">=3.8" +files = [ + {file = "opentelemetry_sdk-1.28.2-py3-none-any.whl", hash = "sha256:93336c129556f1e3ccd21442b94d3521759541521861b2214c499571b85cb71b"}, + {file = "opentelemetry_sdk-1.28.2.tar.gz", hash = "sha256:5fed24c5497e10df30282456fe2910f83377797511de07d14cec0d3e0a1a3110"}, +] + +[package.dependencies] +opentelemetry-api = "1.28.2" +opentelemetry-semantic-conventions = "0.49b2" +typing-extensions = ">=3.7.4" + +[[package]] +name = "opentelemetry-semantic-conventions" +version = "0.49b2" +description = "OpenTelemetry Semantic Conventions" +optional = false +python-versions = ">=3.8" +files = [ + {file = "opentelemetry_semantic_conventions-0.49b2-py3-none-any.whl", hash = "sha256:51e7e1d0daa958782b6c2a8ed05e5f0e7dd0716fc327ac058777b8659649ee54"}, + {file = "opentelemetry_semantic_conventions-0.49b2.tar.gz", hash = "sha256:44e32ce6a5bb8d7c0c617f84b9dc1c8deda1045a07dc16a688cc7cbeab679997"}, +] + +[package.dependencies] +deprecated = ">=1.2.6" +opentelemetry-api = "1.28.2" + +[[package]] +name = "orjson" +version = "3.10.11" +description = "Fast, correct Python JSON library supporting dataclasses, datetimes, and numpy" +optional = false +python-versions = ">=3.8" +files = [ + {file = "orjson-3.10.11-cp310-cp310-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:6dade64687f2bd7c090281652fe18f1151292d567a9302b34c2dbb92a3872f1f"}, + {file = "orjson-3.10.11-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:82f07c550a6ccd2b9290849b22316a609023ed851a87ea888c0456485a7d196a"}, + {file = "orjson-3.10.11-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:bd9a187742d3ead9df2e49240234d728c67c356516cf4db018833a86f20ec18c"}, + {file = "orjson-3.10.11-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:77b0fed6f209d76c1c39f032a70df2d7acf24b1812ca3e6078fd04e8972685a3"}, + {file = "orjson-3.10.11-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:63fc9d5fe1d4e8868f6aae547a7b8ba0a2e592929245fff61d633f4caccdcdd6"}, + {file = "orjson-3.10.11-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:65cd3e3bb4fbb4eddc3c1e8dce10dc0b73e808fcb875f9fab40c81903dd9323e"}, + {file = "orjson-3.10.11-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:6f67c570602300c4befbda12d153113b8974a3340fdcf3d6de095ede86c06d92"}, + {file = "orjson-3.10.11-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:1f39728c7f7d766f1f5a769ce4d54b5aaa4c3f92d5b84817053cc9995b977acc"}, + {file = "orjson-3.10.11-cp310-none-win32.whl", hash = "sha256:1789d9db7968d805f3d94aae2c25d04014aae3a2fa65b1443117cd462c6da647"}, + {file = "orjson-3.10.11-cp310-none-win_amd64.whl", hash = "sha256:5576b1e5a53a5ba8f8df81872bb0878a112b3ebb1d392155f00f54dd86c83ff6"}, + {file = "orjson-3.10.11-cp311-cp311-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:1444f9cb7c14055d595de1036f74ecd6ce15f04a715e73f33bb6326c9cef01b6"}, + {file = "orjson-3.10.11-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cdec57fe3b4bdebcc08a946db3365630332dbe575125ff3d80a3272ebd0ddafe"}, + {file = "orjson-3.10.11-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:4eed32f33a0ea6ef36ccc1d37f8d17f28a1d6e8eefae5928f76aff8f1df85e67"}, + {file = "orjson-3.10.11-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:80df27dd8697242b904f4ea54820e2d98d3f51f91e97e358fc13359721233e4b"}, + {file = "orjson-3.10.11-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:705f03cee0cb797256d54de6695ef219e5bc8c8120b6654dd460848d57a9af3d"}, + {file = "orjson-3.10.11-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:03246774131701de8e7059b2e382597da43144a9a7400f178b2a32feafc54bd5"}, + {file = "orjson-3.10.11-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:8b5759063a6c940a69c728ea70d7c33583991c6982915a839c8da5f957e0103a"}, + {file = "orjson-3.10.11-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:677f23e32491520eebb19c99bb34675daf5410c449c13416f7f0d93e2cf5f981"}, + {file = "orjson-3.10.11-cp311-none-win32.whl", hash = "sha256:a11225d7b30468dcb099498296ffac36b4673a8398ca30fdaec1e6c20df6aa55"}, + {file = "orjson-3.10.11-cp311-none-win_amd64.whl", hash = "sha256:df8c677df2f9f385fcc85ab859704045fa88d4668bc9991a527c86e710392bec"}, + {file = "orjson-3.10.11-cp312-cp312-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:360a4e2c0943da7c21505e47cf6bd725588962ff1d739b99b14e2f7f3545ba51"}, + {file = "orjson-3.10.11-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:496e2cb45de21c369079ef2d662670a4892c81573bcc143c4205cae98282ba97"}, + {file = "orjson-3.10.11-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:7dfa8db55c9792d53c5952900c6a919cfa377b4f4534c7a786484a6a4a350c19"}, + {file = "orjson-3.10.11-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:51f3382415747e0dbda9dade6f1e1a01a9d37f630d8c9049a8ed0e385b7a90c0"}, + {file = "orjson-3.10.11-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f35a1b9f50a219f470e0e497ca30b285c9f34948d3c8160d5ad3a755d9299433"}, + {file = "orjson-3.10.11-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e2f3b7c5803138e67028dde33450e054c87e0703afbe730c105f1fcd873496d5"}, + {file = "orjson-3.10.11-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:f91d9eb554310472bd09f5347950b24442600594c2edc1421403d7610a0998fd"}, + {file = "orjson-3.10.11-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:dfbb2d460a855c9744bbc8e36f9c3a997c4b27d842f3d5559ed54326e6911f9b"}, + {file = "orjson-3.10.11-cp312-none-win32.whl", hash = "sha256:d4a62c49c506d4d73f59514986cadebb7e8d186ad510c518f439176cf8d5359d"}, + {file = "orjson-3.10.11-cp312-none-win_amd64.whl", hash = "sha256:f1eec3421a558ff7a9b010a6c7effcfa0ade65327a71bb9b02a1c3b77a247284"}, + {file = "orjson-3.10.11-cp313-cp313-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:c46294faa4e4d0eb73ab68f1a794d2cbf7bab33b1dda2ac2959ffb7c61591899"}, + {file = "orjson-3.10.11-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:52e5834d7d6e58a36846e059d00559cb9ed20410664f3ad156cd2cc239a11230"}, + {file = "orjson-3.10.11-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a2fc947e5350fdce548bfc94f434e8760d5cafa97fb9c495d2fef6757aa02ec0"}, + {file = "orjson-3.10.11-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:0efabbf839388a1dab5b72b5d3baedbd6039ac83f3b55736eb9934ea5494d258"}, + {file = "orjson-3.10.11-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:a3f29634260708c200c4fe148e42b4aae97d7b9fee417fbdd74f8cfc265f15b0"}, + {file = "orjson-3.10.11-cp313-none-win32.whl", hash = "sha256:1a1222ffcee8a09476bbdd5d4f6f33d06d0d6642df2a3d78b7a195ca880d669b"}, + {file = "orjson-3.10.11-cp313-none-win_amd64.whl", hash = "sha256:bc274ac261cc69260913b2d1610760e55d3c0801bb3457ba7b9004420b6b4270"}, + {file = "orjson-3.10.11-cp38-cp38-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:19b3763e8bbf8ad797df6b6b5e0fc7c843ec2e2fc0621398534e0c6400098f87"}, + {file = "orjson-3.10.11-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1be83a13312e5e58d633580c5eb8d0495ae61f180da2722f20562974188af205"}, + {file = "orjson-3.10.11-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:afacfd1ab81f46dedd7f6001b6d4e8de23396e4884cd3c3436bd05defb1a6446"}, + {file = "orjson-3.10.11-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:cb4d0bea56bba596723d73f074c420aec3b2e5d7d30698bc56e6048066bd560c"}, + {file = "orjson-3.10.11-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:96ed1de70fcb15d5fed529a656df29f768187628727ee2788344e8a51e1c1350"}, + {file = "orjson-3.10.11-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4bfb30c891b530f3f80e801e3ad82ef150b964e5c38e1fb8482441c69c35c61c"}, + {file = "orjson-3.10.11-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:d496c74fc2b61341e3cefda7eec21b7854c5f672ee350bc55d9a4997a8a95204"}, + {file = "orjson-3.10.11-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:655a493bac606655db9a47fe94d3d84fc7f3ad766d894197c94ccf0c5408e7d3"}, + {file = "orjson-3.10.11-cp38-none-win32.whl", hash = "sha256:b9546b278c9fb5d45380f4809e11b4dd9844ca7aaf1134024503e134ed226161"}, + {file = "orjson-3.10.11-cp38-none-win_amd64.whl", hash = "sha256:b592597fe551d518f42c5a2eb07422eb475aa8cfdc8c51e6da7054b836b26782"}, + {file = "orjson-3.10.11-cp39-cp39-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:c95f2ecafe709b4e5c733b5e2768ac569bed308623c85806c395d9cca00e08af"}, + {file = "orjson-3.10.11-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:80c00d4acded0c51c98754fe8218cb49cb854f0f7eb39ea4641b7f71732d2cb7"}, + {file = "orjson-3.10.11-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:461311b693d3d0a060439aa669c74f3603264d4e7a08faa68c47ae5a863f352d"}, + {file = "orjson-3.10.11-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:52ca832f17d86a78cbab86cdc25f8c13756ebe182b6fc1a97d534051c18a08de"}, + {file = "orjson-3.10.11-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f4c57ea78a753812f528178aa2f1c57da633754c91d2124cb28991dab4c79a54"}, + {file = "orjson-3.10.11-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b7fcfc6f7ca046383fb954ba528587e0f9336828b568282b27579c49f8e16aad"}, + {file = "orjson-3.10.11-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:86b9dd983857970c29e4c71bb3e95ff085c07d3e83e7c46ebe959bac07ebd80b"}, + {file = "orjson-3.10.11-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:4d83f87582d223e54efb2242a79547611ba4ebae3af8bae1e80fa9a0af83bb7f"}, + {file = "orjson-3.10.11-cp39-none-win32.whl", hash = "sha256:9fd0ad1c129bc9beb1154c2655f177620b5beaf9a11e0d10bac63ef3fce96950"}, + {file = "orjson-3.10.11-cp39-none-win_amd64.whl", hash = "sha256:10f416b2a017c8bd17f325fb9dee1fb5cdd7a54e814284896b7c3f2763faa017"}, + {file = "orjson-3.10.11.tar.gz", hash = "sha256:e35b6d730de6384d5b2dab5fd23f0d76fae8bbc8c353c2f78210aa5fa4beb3ef"}, +] + +[[package]] +name = "packaging" +version = "24.2" +description = "Core utilities for Python packages" +optional = false +python-versions = ">=3.8" +files = [ + {file = "packaging-24.2-py3-none-any.whl", hash = "sha256:09abb1bccd265c01f4a3aa3f7a7db064b36514d2cba19a2f694fe6150451a759"}, + {file = "packaging-24.2.tar.gz", hash = "sha256:c228a6dc5e932d346bc5739379109d49e8853dd8223571c7c5b55260edc0b97f"}, +] + +[[package]] +name = "pandas" +version = "2.2.3" +description = "Powerful data structures for data analysis, time series, and statistics" +optional = false +python-versions = ">=3.9" +files = [ + {file = "pandas-2.2.3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:1948ddde24197a0f7add2bdc4ca83bf2b1ef84a1bc8ccffd95eda17fd836ecb5"}, + {file = "pandas-2.2.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:381175499d3802cde0eabbaf6324cce0c4f5d52ca6f8c377c29ad442f50f6348"}, + {file = "pandas-2.2.3-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:d9c45366def9a3dd85a6454c0e7908f2b3b8e9c138f5dc38fed7ce720d8453ed"}, + {file = "pandas-2.2.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:86976a1c5b25ae3f8ccae3a5306e443569ee3c3faf444dfd0f41cda24667ad57"}, + {file = "pandas-2.2.3-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:b8661b0238a69d7aafe156b7fa86c44b881387509653fdf857bebc5e4008ad42"}, + {file = "pandas-2.2.3-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:37e0aced3e8f539eccf2e099f65cdb9c8aa85109b0be6e93e2baff94264bdc6f"}, + {file = "pandas-2.2.3-cp310-cp310-win_amd64.whl", hash = "sha256:56534ce0746a58afaf7942ba4863e0ef81c9c50d3f0ae93e9497d6a41a057645"}, + {file = "pandas-2.2.3-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:66108071e1b935240e74525006034333f98bcdb87ea116de573a6a0dccb6c039"}, + {file = "pandas-2.2.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:7c2875855b0ff77b2a64a0365e24455d9990730d6431b9e0ee18ad8acee13dbd"}, + {file = "pandas-2.2.3-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:cd8d0c3be0515c12fed0bdbae072551c8b54b7192c7b1fda0ba56059a0179698"}, + {file = "pandas-2.2.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c124333816c3a9b03fbeef3a9f230ba9a737e9e5bb4060aa2107a86cc0a497fc"}, + {file = "pandas-2.2.3-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:63cc132e40a2e084cf01adf0775b15ac515ba905d7dcca47e9a251819c575ef3"}, + {file = "pandas-2.2.3-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:29401dbfa9ad77319367d36940cd8a0b3a11aba16063e39632d98b0e931ddf32"}, + {file = "pandas-2.2.3-cp311-cp311-win_amd64.whl", hash = "sha256:3fc6873a41186404dad67245896a6e440baacc92f5b716ccd1bc9ed2995ab2c5"}, + {file = "pandas-2.2.3-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:b1d432e8d08679a40e2a6d8b2f9770a5c21793a6f9f47fdd52c5ce1948a5a8a9"}, + {file = "pandas-2.2.3-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:a5a1595fe639f5988ba6a8e5bc9649af3baf26df3998a0abe56c02609392e0a4"}, + {file = "pandas-2.2.3-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:5de54125a92bb4d1c051c0659e6fcb75256bf799a732a87184e5ea503965bce3"}, + {file = "pandas-2.2.3-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fffb8ae78d8af97f849404f21411c95062db1496aeb3e56f146f0355c9989319"}, + {file = "pandas-2.2.3-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:6dfcb5ee8d4d50c06a51c2fffa6cff6272098ad6540aed1a76d15fb9318194d8"}, + {file = "pandas-2.2.3-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:062309c1b9ea12a50e8ce661145c6aab431b1e99530d3cd60640e255778bd43a"}, + {file = "pandas-2.2.3-cp312-cp312-win_amd64.whl", hash = "sha256:59ef3764d0fe818125a5097d2ae867ca3fa64df032331b7e0917cf5d7bf66b13"}, + {file = "pandas-2.2.3-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:f00d1345d84d8c86a63e476bb4955e46458b304b9575dcf71102b5c705320015"}, + {file = "pandas-2.2.3-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:3508d914817e153ad359d7e069d752cdd736a247c322d932eb89e6bc84217f28"}, + {file = "pandas-2.2.3-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:22a9d949bfc9a502d320aa04e5d02feab689d61da4e7764b62c30b991c42c5f0"}, + {file = "pandas-2.2.3-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f3a255b2c19987fbbe62a9dfd6cff7ff2aa9ccab3fc75218fd4b7530f01efa24"}, + {file = "pandas-2.2.3-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:800250ecdadb6d9c78eae4990da62743b857b470883fa27f652db8bdde7f6659"}, + {file = "pandas-2.2.3-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:6374c452ff3ec675a8f46fd9ab25c4ad0ba590b71cf0656f8b6daa5202bca3fb"}, + {file = "pandas-2.2.3-cp313-cp313-win_amd64.whl", hash = "sha256:61c5ad4043f791b61dd4752191d9f07f0ae412515d59ba8f005832a532f8736d"}, + {file = "pandas-2.2.3-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:3b71f27954685ee685317063bf13c7709a7ba74fc996b84fc6821c59b0f06468"}, + {file = "pandas-2.2.3-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:38cf8125c40dae9d5acc10fa66af8ea6fdf760b2714ee482ca691fc66e6fcb18"}, + {file = "pandas-2.2.3-cp313-cp313t-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:ba96630bc17c875161df3818780af30e43be9b166ce51c9a18c1feae342906c2"}, + {file = "pandas-2.2.3-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1db71525a1538b30142094edb9adc10be3f3e176748cd7acc2240c2f2e5aa3a4"}, + {file = "pandas-2.2.3-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:15c0e1e02e93116177d29ff83e8b1619c93ddc9c49083f237d4312337a61165d"}, + {file = "pandas-2.2.3-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:ad5b65698ab28ed8d7f18790a0dc58005c7629f227be9ecc1072aa74c0c1d43a"}, + {file = "pandas-2.2.3-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:bc6b93f9b966093cb0fd62ff1a7e4c09e6d546ad7c1de191767baffc57628f39"}, + {file = "pandas-2.2.3-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:5dbca4c1acd72e8eeef4753eeca07de9b1db4f398669d5994086f788a5d7cc30"}, + {file = "pandas-2.2.3-cp39-cp39-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:8cd6d7cc958a3910f934ea8dbdf17b2364827bb4dafc38ce6eef6bb3d65ff09c"}, + {file = "pandas-2.2.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:99df71520d25fade9db7c1076ac94eb994f4d2673ef2aa2e86ee039b6746d20c"}, + {file = "pandas-2.2.3-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:31d0ced62d4ea3e231a9f228366919a5ea0b07440d9d4dac345376fd8e1477ea"}, + {file = "pandas-2.2.3-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:7eee9e7cea6adf3e3d24e304ac6b8300646e2a5d1cd3a3c2abed9101b0846761"}, + {file = "pandas-2.2.3-cp39-cp39-win_amd64.whl", hash = "sha256:4850ba03528b6dd51d6c5d273c46f183f39a9baf3f0143e566b89450965b105e"}, + {file = "pandas-2.2.3.tar.gz", hash = "sha256:4f18ba62b61d7e192368b84517265a99b4d7ee8912f8708660fb4a366cc82667"}, +] + +[package.dependencies] +numpy = [ + {version = ">=1.26.0", markers = "python_version >= \"3.12\""}, + {version = ">=1.23.2", markers = "python_version == \"3.11\""}, +] +python-dateutil = ">=2.8.2" +pytz = ">=2020.1" +tzdata = ">=2022.7" + +[package.extras] +all = ["PyQt5 (>=5.15.9)", "SQLAlchemy (>=2.0.0)", "adbc-driver-postgresql (>=0.8.0)", "adbc-driver-sqlite (>=0.8.0)", "beautifulsoup4 (>=4.11.2)", "bottleneck (>=1.3.6)", "dataframe-api-compat (>=0.1.7)", "fastparquet (>=2022.12.0)", "fsspec (>=2022.11.0)", "gcsfs (>=2022.11.0)", "html5lib (>=1.1)", "hypothesis (>=6.46.1)", "jinja2 (>=3.1.2)", "lxml (>=4.9.2)", "matplotlib (>=3.6.3)", "numba (>=0.56.4)", "numexpr (>=2.8.4)", "odfpy (>=1.4.1)", "openpyxl (>=3.1.0)", "pandas-gbq (>=0.19.0)", "psycopg2 (>=2.9.6)", "pyarrow (>=10.0.1)", "pymysql (>=1.0.2)", "pyreadstat (>=1.2.0)", "pytest (>=7.3.2)", "pytest-xdist (>=2.2.0)", "python-calamine (>=0.1.7)", "pyxlsb (>=1.0.10)", "qtpy (>=2.3.0)", "s3fs (>=2022.11.0)", "scipy (>=1.10.0)", "tables (>=3.8.0)", "tabulate (>=0.9.0)", "xarray (>=2022.12.0)", "xlrd (>=2.0.1)", "xlsxwriter (>=3.0.5)", "zstandard (>=0.19.0)"] +aws = ["s3fs (>=2022.11.0)"] +clipboard = ["PyQt5 (>=5.15.9)", "qtpy (>=2.3.0)"] +compression = ["zstandard (>=0.19.0)"] +computation = ["scipy (>=1.10.0)", "xarray (>=2022.12.0)"] +consortium-standard = ["dataframe-api-compat (>=0.1.7)"] +excel = ["odfpy (>=1.4.1)", "openpyxl (>=3.1.0)", "python-calamine (>=0.1.7)", "pyxlsb (>=1.0.10)", "xlrd (>=2.0.1)", "xlsxwriter (>=3.0.5)"] +feather = ["pyarrow (>=10.0.1)"] +fss = ["fsspec (>=2022.11.0)"] +gcp = ["gcsfs (>=2022.11.0)", "pandas-gbq (>=0.19.0)"] +hdf5 = ["tables (>=3.8.0)"] +html = ["beautifulsoup4 (>=4.11.2)", "html5lib (>=1.1)", "lxml (>=4.9.2)"] +mysql = ["SQLAlchemy (>=2.0.0)", "pymysql (>=1.0.2)"] +output-formatting = ["jinja2 (>=3.1.2)", "tabulate (>=0.9.0)"] +parquet = ["pyarrow (>=10.0.1)"] +performance = ["bottleneck (>=1.3.6)", "numba (>=0.56.4)", "numexpr (>=2.8.4)"] +plot = ["matplotlib (>=3.6.3)"] +postgresql = ["SQLAlchemy (>=2.0.0)", "adbc-driver-postgresql (>=0.8.0)", "psycopg2 (>=2.9.6)"] +pyarrow = ["pyarrow (>=10.0.1)"] +spss = ["pyreadstat (>=1.2.0)"] +sql-other = ["SQLAlchemy (>=2.0.0)", "adbc-driver-postgresql (>=0.8.0)", "adbc-driver-sqlite (>=0.8.0)"] +test = ["hypothesis (>=6.46.1)", "pytest (>=7.3.2)", "pytest-xdist (>=2.2.0)"] +xml = ["lxml (>=4.9.2)"] + +[[package]] +name = "parso" +version = "0.8.4" +description = "A Python Parser" +optional = false +python-versions = ">=3.6" +files = [ + {file = "parso-0.8.4-py2.py3-none-any.whl", hash = "sha256:a418670a20291dacd2dddc80c377c5c3791378ee1e8d12bffc35420643d43f18"}, + {file = "parso-0.8.4.tar.gz", hash = "sha256:eb3a7b58240fb99099a345571deecc0f9540ea5f4dd2fe14c2a99d6b281ab92d"}, +] + +[package.extras] +qa = ["flake8 (==5.0.4)", "mypy (==0.971)", "types-setuptools (==67.2.0.1)"] +testing = ["docopt", "pytest"] + +[[package]] +name = "pathspec" +version = "0.12.1" +description = "Utility library for gitignore style pattern matching of file paths." +optional = false +python-versions = ">=3.8" +files = [ + {file = "pathspec-0.12.1-py3-none-any.whl", hash = "sha256:a0d503e138a4c123b27490a4f7beda6a01c6f288df0e4a8b79c7eb0dc7b4cc08"}, + {file = "pathspec-0.12.1.tar.gz", hash = "sha256:a482d51503a1ab33b1c67a6c3813a26953dbdc71c31dacaef9a838c4e29f5712"}, +] + +[[package]] +name = "pexpect" +version = "4.9.0" +description = "Pexpect allows easy control of interactive console applications." +optional = false +python-versions = "*" +files = [ + {file = "pexpect-4.9.0-py2.py3-none-any.whl", hash = "sha256:7236d1e080e4936be2dc3e326cec0af72acf9212a7e1d060210e70a47e253523"}, + {file = "pexpect-4.9.0.tar.gz", hash = "sha256:ee7d41123f3c9911050ea2c2dac107568dc43b2d3b0c7557a33212c398ead30f"}, +] + +[package.dependencies] +ptyprocess = ">=0.5" + +[[package]] +name = "pillow" +version = "11.0.0" +description = "Python Imaging Library (Fork)" +optional = false +python-versions = ">=3.9" +files = [ + {file = "pillow-11.0.0-cp310-cp310-macosx_10_10_x86_64.whl", hash = "sha256:6619654954dc4936fcff82db8eb6401d3159ec6be81e33c6000dfd76ae189947"}, + {file = "pillow-11.0.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:b3c5ac4bed7519088103d9450a1107f76308ecf91d6dabc8a33a2fcfb18d0fba"}, + {file = "pillow-11.0.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a65149d8ada1055029fcb665452b2814fe7d7082fcb0c5bed6db851cb69b2086"}, + {file = "pillow-11.0.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:88a58d8ac0cc0e7f3a014509f0455248a76629ca9b604eca7dc5927cc593c5e9"}, + {file = "pillow-11.0.0-cp310-cp310-manylinux_2_28_aarch64.whl", hash = "sha256:c26845094b1af3c91852745ae78e3ea47abf3dbcd1cf962f16b9a5fbe3ee8488"}, + {file = "pillow-11.0.0-cp310-cp310-manylinux_2_28_x86_64.whl", hash = "sha256:1a61b54f87ab5786b8479f81c4b11f4d61702830354520837f8cc791ebba0f5f"}, + {file = "pillow-11.0.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:674629ff60030d144b7bca2b8330225a9b11c482ed408813924619c6f302fdbb"}, + {file = "pillow-11.0.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:598b4e238f13276e0008299bd2482003f48158e2b11826862b1eb2ad7c768b97"}, + {file = "pillow-11.0.0-cp310-cp310-win32.whl", hash = "sha256:9a0f748eaa434a41fccf8e1ee7a3eed68af1b690e75328fd7a60af123c193b50"}, + {file = "pillow-11.0.0-cp310-cp310-win_amd64.whl", hash = "sha256:a5629742881bcbc1f42e840af185fd4d83a5edeb96475a575f4da50d6ede337c"}, + {file = "pillow-11.0.0-cp310-cp310-win_arm64.whl", hash = "sha256:ee217c198f2e41f184f3869f3e485557296d505b5195c513b2bfe0062dc537f1"}, + {file = "pillow-11.0.0-cp311-cp311-macosx_10_10_x86_64.whl", hash = "sha256:1c1d72714f429a521d8d2d018badc42414c3077eb187a59579f28e4270b4b0fc"}, + {file = "pillow-11.0.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:499c3a1b0d6fc8213519e193796eb1a86a1be4b1877d678b30f83fd979811d1a"}, + {file = "pillow-11.0.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c8b2351c85d855293a299038e1f89db92a2f35e8d2f783489c6f0b2b5f3fe8a3"}, + {file = "pillow-11.0.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6f4dba50cfa56f910241eb7f883c20f1e7b1d8f7d91c750cd0b318bad443f4d5"}, + {file = "pillow-11.0.0-cp311-cp311-manylinux_2_28_aarch64.whl", hash = "sha256:5ddbfd761ee00c12ee1be86c9c0683ecf5bb14c9772ddbd782085779a63dd55b"}, + {file = "pillow-11.0.0-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:45c566eb10b8967d71bf1ab8e4a525e5a93519e29ea071459ce517f6b903d7fa"}, + {file = "pillow-11.0.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:b4fd7bd29610a83a8c9b564d457cf5bd92b4e11e79a4ee4716a63c959699b306"}, + {file = "pillow-11.0.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:cb929ca942d0ec4fac404cbf520ee6cac37bf35be479b970c4ffadf2b6a1cad9"}, + {file = "pillow-11.0.0-cp311-cp311-win32.whl", hash = "sha256:006bcdd307cc47ba43e924099a038cbf9591062e6c50e570819743f5607404f5"}, + {file = "pillow-11.0.0-cp311-cp311-win_amd64.whl", hash = "sha256:52a2d8323a465f84faaba5236567d212c3668f2ab53e1c74c15583cf507a0291"}, + {file = "pillow-11.0.0-cp311-cp311-win_arm64.whl", hash = "sha256:16095692a253047fe3ec028e951fa4221a1f3ed3d80c397e83541a3037ff67c9"}, + {file = "pillow-11.0.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:d2c0a187a92a1cb5ef2c8ed5412dd8d4334272617f532d4ad4de31e0495bd923"}, + {file = "pillow-11.0.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:084a07ef0821cfe4858fe86652fffac8e187b6ae677e9906e192aafcc1b69903"}, + {file = "pillow-11.0.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8069c5179902dcdce0be9bfc8235347fdbac249d23bd90514b7a47a72d9fecf4"}, + {file = "pillow-11.0.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f02541ef64077f22bf4924f225c0fd1248c168f86e4b7abdedd87d6ebaceab0f"}, + {file = "pillow-11.0.0-cp312-cp312-manylinux_2_28_aarch64.whl", hash = "sha256:fcb4621042ac4b7865c179bb972ed0da0218a076dc1820ffc48b1d74c1e37fe9"}, + {file = "pillow-11.0.0-cp312-cp312-manylinux_2_28_x86_64.whl", hash = "sha256:00177a63030d612148e659b55ba99527803288cea7c75fb05766ab7981a8c1b7"}, + {file = "pillow-11.0.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:8853a3bf12afddfdf15f57c4b02d7ded92c7a75a5d7331d19f4f9572a89c17e6"}, + {file = "pillow-11.0.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:3107c66e43bda25359d5ef446f59c497de2b5ed4c7fdba0894f8d6cf3822dafc"}, + {file = "pillow-11.0.0-cp312-cp312-win32.whl", hash = "sha256:86510e3f5eca0ab87429dd77fafc04693195eec7fd6a137c389c3eeb4cfb77c6"}, + {file = "pillow-11.0.0-cp312-cp312-win_amd64.whl", hash = "sha256:8ec4a89295cd6cd4d1058a5e6aec6bf51e0eaaf9714774e1bfac7cfc9051db47"}, + {file = "pillow-11.0.0-cp312-cp312-win_arm64.whl", hash = "sha256:27a7860107500d813fcd203b4ea19b04babe79448268403172782754870dac25"}, + {file = "pillow-11.0.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:bcd1fb5bb7b07f64c15618c89efcc2cfa3e95f0e3bcdbaf4642509de1942a699"}, + {file = "pillow-11.0.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:0e038b0745997c7dcaae350d35859c9715c71e92ffb7e0f4a8e8a16732150f38"}, + {file = "pillow-11.0.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0ae08bd8ffc41aebf578c2af2f9d8749d91f448b3bfd41d7d9ff573d74f2a6b2"}, + {file = "pillow-11.0.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d69bfd8ec3219ae71bcde1f942b728903cad25fafe3100ba2258b973bd2bc1b2"}, + {file = "pillow-11.0.0-cp313-cp313-manylinux_2_28_aarch64.whl", hash = "sha256:61b887f9ddba63ddf62fd02a3ba7add935d053b6dd7d58998c630e6dbade8527"}, + {file = "pillow-11.0.0-cp313-cp313-manylinux_2_28_x86_64.whl", hash = "sha256:c6a660307ca9d4867caa8d9ca2c2658ab685de83792d1876274991adec7b93fa"}, + {file = "pillow-11.0.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:73e3a0200cdda995c7e43dd47436c1548f87a30bb27fb871f352a22ab8dcf45f"}, + {file = "pillow-11.0.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:fba162b8872d30fea8c52b258a542c5dfd7b235fb5cb352240c8d63b414013eb"}, + {file = "pillow-11.0.0-cp313-cp313-win32.whl", hash = "sha256:f1b82c27e89fffc6da125d5eb0ca6e68017faf5efc078128cfaa42cf5cb38798"}, + {file = "pillow-11.0.0-cp313-cp313-win_amd64.whl", hash = "sha256:8ba470552b48e5835f1d23ecb936bb7f71d206f9dfeee64245f30c3270b994de"}, + {file = "pillow-11.0.0-cp313-cp313-win_arm64.whl", hash = "sha256:846e193e103b41e984ac921b335df59195356ce3f71dcfd155aa79c603873b84"}, + {file = "pillow-11.0.0-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:4ad70c4214f67d7466bea6a08061eba35c01b1b89eaa098040a35272a8efb22b"}, + {file = "pillow-11.0.0-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:6ec0d5af64f2e3d64a165f490d96368bb5dea8b8f9ad04487f9ab60dc4bb6003"}, + {file = "pillow-11.0.0-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c809a70e43c7977c4a42aefd62f0131823ebf7dd73556fa5d5950f5b354087e2"}, + {file = "pillow-11.0.0-cp313-cp313t-manylinux_2_28_x86_64.whl", hash = "sha256:4b60c9520f7207aaf2e1d94de026682fc227806c6e1f55bba7606d1c94dd623a"}, + {file = "pillow-11.0.0-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:1e2688958a840c822279fda0086fec1fdab2f95bf2b717b66871c4ad9859d7e8"}, + {file = "pillow-11.0.0-cp313-cp313t-win32.whl", hash = "sha256:607bbe123c74e272e381a8d1957083a9463401f7bd01287f50521ecb05a313f8"}, + {file = "pillow-11.0.0-cp313-cp313t-win_amd64.whl", hash = "sha256:5c39ed17edea3bc69c743a8dd3e9853b7509625c2462532e62baa0732163a904"}, + {file = "pillow-11.0.0-cp313-cp313t-win_arm64.whl", hash = "sha256:75acbbeb05b86bc53cbe7b7e6fe00fbcf82ad7c684b3ad82e3d711da9ba287d3"}, + {file = "pillow-11.0.0-cp39-cp39-macosx_10_10_x86_64.whl", hash = "sha256:2e46773dc9f35a1dd28bd6981332fd7f27bec001a918a72a79b4133cf5291dba"}, + {file = "pillow-11.0.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:2679d2258b7f1192b378e2893a8a0a0ca472234d4c2c0e6bdd3380e8dfa21b6a"}, + {file = "pillow-11.0.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:eda2616eb2313cbb3eebbe51f19362eb434b18e3bb599466a1ffa76a033fb916"}, + {file = "pillow-11.0.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:20ec184af98a121fb2da42642dea8a29ec80fc3efbaefb86d8fdd2606619045d"}, + {file = "pillow-11.0.0-cp39-cp39-manylinux_2_28_aarch64.whl", hash = "sha256:8594f42df584e5b4bb9281799698403f7af489fba84c34d53d1c4bfb71b7c4e7"}, + {file = "pillow-11.0.0-cp39-cp39-manylinux_2_28_x86_64.whl", hash = "sha256:c12b5ae868897c7338519c03049a806af85b9b8c237b7d675b8c5e089e4a618e"}, + {file = "pillow-11.0.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:70fbbdacd1d271b77b7721fe3cdd2d537bbbd75d29e6300c672ec6bb38d9672f"}, + {file = "pillow-11.0.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:5178952973e588b3f1360868847334e9e3bf49d19e169bbbdfaf8398002419ae"}, + {file = "pillow-11.0.0-cp39-cp39-win32.whl", hash = "sha256:8c676b587da5673d3c75bd67dd2a8cdfeb282ca38a30f37950511766b26858c4"}, + {file = "pillow-11.0.0-cp39-cp39-win_amd64.whl", hash = "sha256:94f3e1780abb45062287b4614a5bc0874519c86a777d4a7ad34978e86428b8dd"}, + {file = "pillow-11.0.0-cp39-cp39-win_arm64.whl", hash = "sha256:290f2cc809f9da7d6d622550bbf4c1e57518212da51b6a30fe8e0a270a5b78bd"}, + {file = "pillow-11.0.0-pp310-pypy310_pp73-macosx_10_15_x86_64.whl", hash = "sha256:1187739620f2b365de756ce086fdb3604573337cc28a0d3ac4a01ab6b2d2a6d2"}, + {file = "pillow-11.0.0-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:fbbcb7b57dc9c794843e3d1258c0fbf0f48656d46ffe9e09b63bbd6e8cd5d0a2"}, + {file = "pillow-11.0.0-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5d203af30149ae339ad1b4f710d9844ed8796e97fda23ffbc4cc472968a47d0b"}, + {file = "pillow-11.0.0-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:21a0d3b115009ebb8ac3d2ebec5c2982cc693da935f4ab7bb5c8ebe2f47d36f2"}, + {file = "pillow-11.0.0-pp310-pypy310_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:73853108f56df97baf2bb8b522f3578221e56f646ba345a372c78326710d3830"}, + {file = "pillow-11.0.0-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:e58876c91f97b0952eb766123bfef372792ab3f4e3e1f1a2267834c2ab131734"}, + {file = "pillow-11.0.0-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:224aaa38177597bb179f3ec87eeefcce8e4f85e608025e9cfac60de237ba6316"}, + {file = "pillow-11.0.0-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:5bd2d3bdb846d757055910f0a59792d33b555800813c3b39ada1829c372ccb06"}, + {file = "pillow-11.0.0-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:375b8dd15a1f5d2feafff536d47e22f69625c1aa92f12b339ec0b2ca40263273"}, + {file = "pillow-11.0.0-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:daffdf51ee5db69a82dd127eabecce20729e21f7a3680cf7cbb23f0829189790"}, + {file = "pillow-11.0.0-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:7326a1787e3c7b0429659e0a944725e1b03eeaa10edd945a86dead1913383944"}, + {file = "pillow-11.0.0.tar.gz", hash = "sha256:72bacbaf24ac003fea9bff9837d1eedb6088758d41e100c1552930151f677739"}, +] + +[package.extras] +docs = ["furo", "olefile", "sphinx (>=8.1)", "sphinx-copybutton", "sphinx-inline-tabs", "sphinxext-opengraph"] +fpx = ["olefile"] +mic = ["olefile"] +tests = ["check-manifest", "coverage", "defusedxml", "markdown2", "olefile", "packaging", "pyroma", "pytest", "pytest-cov", "pytest-timeout"] +typing = ["typing-extensions"] +xmp = ["defusedxml"] + +[[package]] +name = "platformdirs" +version = "4.3.6" +description = "A small Python package for determining appropriate platform-specific dirs, e.g. a `user data dir`." +optional = false +python-versions = ">=3.8" +files = [ + {file = "platformdirs-4.3.6-py3-none-any.whl", hash = "sha256:73e575e1408ab8103900836b97580d5307456908a03e92031bab39e4554cc3fb"}, + {file = "platformdirs-4.3.6.tar.gz", hash = "sha256:357fb2acbc885b0419afd3ce3ed34564c13c9b95c89360cd9563f73aa5e2b907"}, +] + +[package.extras] +docs = ["furo (>=2024.8.6)", "proselint (>=0.14)", "sphinx (>=8.0.2)", "sphinx-autodoc-typehints (>=2.4)"] +test = ["appdirs (==1.4.4)", "covdefaults (>=2.3)", "pytest (>=8.3.2)", "pytest-cov (>=5)", "pytest-mock (>=3.14)"] +type = ["mypy (>=1.11.2)"] + +[[package]] +name = "pluggy" +version = "1.5.0" +description = "plugin and hook calling mechanisms for python" +optional = false +python-versions = ">=3.8" +files = [ + {file = "pluggy-1.5.0-py3-none-any.whl", hash = "sha256:44e1ad92c8ca002de6377e165f3e0f1be63266ab4d554740532335b9d75ea669"}, + {file = "pluggy-1.5.0.tar.gz", hash = "sha256:2cffa88e94fdc978c4c574f15f9e59b7f4201d439195c3715ca9e2486f1d0cf1"}, +] + +[package.extras] +dev = ["pre-commit", "tox"] +testing = ["pytest", "pytest-benchmark"] + +[[package]] +name = "prompt-toolkit" +version = "3.0.48" +description = "Library for building powerful interactive command lines in Python" +optional = false +python-versions = ">=3.7.0" +files = [ + {file = "prompt_toolkit-3.0.48-py3-none-any.whl", hash = "sha256:f49a827f90062e411f1ce1f854f2aedb3c23353244f8108b89283587397ac10e"}, + {file = "prompt_toolkit-3.0.48.tar.gz", hash = "sha256:d6623ab0477a80df74e646bdbc93621143f5caf104206aa29294d53de1a03d90"}, +] + +[package.dependencies] +wcwidth = "*" + +[[package]] +name = "proto-plus" +version = "1.25.0" +description = "Beautiful, Pythonic protocol buffers." +optional = false +python-versions = ">=3.7" +files = [ + {file = "proto_plus-1.25.0-py3-none-any.whl", hash = "sha256:c91fc4a65074ade8e458e95ef8bac34d4008daa7cce4a12d6707066fca648961"}, + {file = "proto_plus-1.25.0.tar.gz", hash = "sha256:fbb17f57f7bd05a68b7707e745e26528b0b3c34e378db91eef93912c54982d91"}, +] + +[package.dependencies] +protobuf = ">=3.19.0,<6.0.0dev" + +[package.extras] +testing = ["google-api-core (>=1.31.5)"] + +[[package]] +name = "protobuf" +version = "4.25.5" +description = "" +optional = false +python-versions = ">=3.8" +files = [ + {file = "protobuf-4.25.5-cp310-abi3-win32.whl", hash = "sha256:5e61fd921603f58d2f5acb2806a929b4675f8874ff5f330b7d6f7e2e784bbcd8"}, + {file = "protobuf-4.25.5-cp310-abi3-win_amd64.whl", hash = "sha256:4be0571adcbe712b282a330c6e89eae24281344429ae95c6d85e79e84780f5ea"}, + {file = "protobuf-4.25.5-cp37-abi3-macosx_10_9_universal2.whl", hash = "sha256:b2fde3d805354df675ea4c7c6338c1aecd254dfc9925e88c6d31a2bcb97eb173"}, + {file = "protobuf-4.25.5-cp37-abi3-manylinux2014_aarch64.whl", hash = "sha256:919ad92d9b0310070f8356c24b855c98df2b8bd207ebc1c0c6fcc9ab1e007f3d"}, + {file = "protobuf-4.25.5-cp37-abi3-manylinux2014_x86_64.whl", hash = "sha256:fe14e16c22be926d3abfcb500e60cab068baf10b542b8c858fa27e098123e331"}, + {file = "protobuf-4.25.5-cp38-cp38-win32.whl", hash = "sha256:98d8d8aa50de6a2747efd9cceba361c9034050ecce3e09136f90de37ddba66e1"}, + {file = "protobuf-4.25.5-cp38-cp38-win_amd64.whl", hash = "sha256:b0234dd5a03049e4ddd94b93400b67803c823cfc405689688f59b34e0742381a"}, + {file = "protobuf-4.25.5-cp39-cp39-win32.whl", hash = "sha256:abe32aad8561aa7cc94fc7ba4fdef646e576983edb94a73381b03c53728a626f"}, + {file = "protobuf-4.25.5-cp39-cp39-win_amd64.whl", hash = "sha256:7a183f592dc80aa7c8da7ad9e55091c4ffc9497b3054452d629bb85fa27c2a45"}, + {file = "protobuf-4.25.5-py3-none-any.whl", hash = "sha256:0aebecb809cae990f8129ada5ca273d9d670b76d9bfc9b1809f0a9c02b7dbf41"}, + {file = "protobuf-4.25.5.tar.gz", hash = "sha256:7f8249476b4a9473645db7f8ab42b02fe1488cbe5fb72fddd445e0665afd8584"}, +] + +[[package]] +name = "psutil" +version = "6.1.0" +description = "Cross-platform lib for process and system monitoring in Python." +optional = false +python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,>=2.7" +files = [ + {file = "psutil-6.1.0-cp27-cp27m-macosx_10_9_x86_64.whl", hash = "sha256:ff34df86226c0227c52f38b919213157588a678d049688eded74c76c8ba4a5d0"}, + {file = "psutil-6.1.0-cp27-cp27m-manylinux2010_i686.whl", hash = "sha256:c0e0c00aa18ca2d3b2b991643b799a15fc8f0563d2ebb6040f64ce8dc027b942"}, + {file = "psutil-6.1.0-cp27-cp27m-manylinux2010_x86_64.whl", hash = "sha256:000d1d1ebd634b4efb383f4034437384e44a6d455260aaee2eca1e9c1b55f047"}, + {file = "psutil-6.1.0-cp27-cp27mu-manylinux2010_i686.whl", hash = "sha256:5cd2bcdc75b452ba2e10f0e8ecc0b57b827dd5d7aaffbc6821b2a9a242823a76"}, + {file = "psutil-6.1.0-cp27-cp27mu-manylinux2010_x86_64.whl", hash = "sha256:045f00a43c737f960d273a83973b2511430d61f283a44c96bf13a6e829ba8fdc"}, + {file = "psutil-6.1.0-cp27-none-win32.whl", hash = "sha256:9118f27452b70bb1d9ab3198c1f626c2499384935aaf55388211ad982611407e"}, + {file = "psutil-6.1.0-cp27-none-win_amd64.whl", hash = "sha256:a8506f6119cff7015678e2bce904a4da21025cc70ad283a53b099e7620061d85"}, + {file = "psutil-6.1.0-cp36-abi3-macosx_10_9_x86_64.whl", hash = "sha256:6e2dcd475ce8b80522e51d923d10c7871e45f20918e027ab682f94f1c6351688"}, + {file = "psutil-6.1.0-cp36-abi3-macosx_11_0_arm64.whl", hash = "sha256:0895b8414afafc526712c498bd9de2b063deaac4021a3b3c34566283464aff8e"}, + {file = "psutil-6.1.0-cp36-abi3-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9dcbfce5d89f1d1f2546a2090f4fcf87c7f669d1d90aacb7d7582addece9fb38"}, + {file = "psutil-6.1.0-cp36-abi3-manylinux_2_12_x86_64.manylinux2010_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:498c6979f9c6637ebc3a73b3f87f9eb1ec24e1ce53a7c5173b8508981614a90b"}, + {file = "psutil-6.1.0-cp36-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d905186d647b16755a800e7263d43df08b790d709d575105d419f8b6ef65423a"}, + {file = "psutil-6.1.0-cp36-cp36m-win32.whl", hash = "sha256:6d3fbbc8d23fcdcb500d2c9f94e07b1342df8ed71b948a2649b5cb060a7c94ca"}, + {file = "psutil-6.1.0-cp36-cp36m-win_amd64.whl", hash = "sha256:1209036fbd0421afde505a4879dee3b2fd7b1e14fee81c0069807adcbbcca747"}, + {file = "psutil-6.1.0-cp37-abi3-win32.whl", hash = "sha256:1ad45a1f5d0b608253b11508f80940985d1d0c8f6111b5cb637533a0e6ddc13e"}, + {file = "psutil-6.1.0-cp37-abi3-win_amd64.whl", hash = "sha256:a8fb3752b491d246034fa4d279ff076501588ce8cbcdbb62c32fd7a377d996be"}, + {file = "psutil-6.1.0.tar.gz", hash = "sha256:353815f59a7f64cdaca1c0307ee13558a0512f6db064e92fe833784f08539c7a"}, +] + +[package.extras] +dev = ["black", "check-manifest", "coverage", "packaging", "pylint", "pyperf", "pypinfo", "pytest-cov", "requests", "rstcheck", "ruff", "sphinx", "sphinx_rtd_theme", "toml-sort", "twine", "virtualenv", "wheel"] +test = ["pytest", "pytest-xdist", "setuptools"] + +[[package]] +name = "ptyprocess" +version = "0.7.0" +description = "Run a subprocess in a pseudo terminal" +optional = false +python-versions = "*" +files = [ + {file = "ptyprocess-0.7.0-py2.py3-none-any.whl", hash = "sha256:4b41f3967fce3af57cc7e94b888626c18bf37a083e3651ca8feeb66d492fef35"}, + {file = "ptyprocess-0.7.0.tar.gz", hash = "sha256:5c5d0a3b48ceee0b48485e0c26037c0acd7d29765ca3fbb5cb3831d347423220"}, +] + +[[package]] +name = "pure-eval" +version = "0.2.3" +description = "Safely evaluate AST nodes without side effects" +optional = false +python-versions = "*" +files = [ + {file = "pure_eval-0.2.3-py3-none-any.whl", hash = "sha256:1db8e35b67b3d218d818ae653e27f06c3aa420901fa7b081ca98cbedc874e0d0"}, + {file = "pure_eval-0.2.3.tar.gz", hash = "sha256:5f4e983f40564c576c7c8635ae88db5956bb2229d7e9237d03b3c0b0190eaf42"}, +] + +[package.extras] +tests = ["pytest"] + +[[package]] +name = "py4j" +version = "0.10.9.7" +description = "Enables Python programs to dynamically access arbitrary Java objects" +optional = false +python-versions = "*" +files = [ + {file = "py4j-0.10.9.7-py2.py3-none-any.whl", hash = "sha256:85defdfd2b2376eb3abf5ca6474b51ab7e0de341c75a02f46dc9b5976f5a5c1b"}, + {file = "py4j-0.10.9.7.tar.gz", hash = "sha256:0b6e5315bb3ada5cf62ac651d107bb2ebc02def3dee9d9548e3baac644ea8dbb"}, +] + +[[package]] +name = "pyarrow" +version = "18.0.0" +description = "Python library for Apache Arrow" +optional = false +python-versions = ">=3.9" +files = [ + {file = "pyarrow-18.0.0-cp310-cp310-macosx_12_0_arm64.whl", hash = "sha256:2333f93260674e185cfbf208d2da3007132572e56871f451ba1a556b45dae6e2"}, + {file = "pyarrow-18.0.0-cp310-cp310-macosx_12_0_x86_64.whl", hash = "sha256:4c381857754da44326f3a49b8b199f7f87a51c2faacd5114352fc78de30d3aba"}, + {file = "pyarrow-18.0.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:603cd8ad4976568954598ef0a6d4ed3dfb78aff3d57fa8d6271f470f0ce7d34f"}, + {file = "pyarrow-18.0.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:58a62549a3e0bc9e03df32f350e10e1efb94ec6cf63e3920c3385b26663948ce"}, + {file = "pyarrow-18.0.0-cp310-cp310-manylinux_2_28_aarch64.whl", hash = "sha256:bc97316840a349485fbb137eb8d0f4d7057e1b2c1272b1a20eebbbe1848f5122"}, + {file = "pyarrow-18.0.0-cp310-cp310-manylinux_2_28_x86_64.whl", hash = "sha256:2e549a748fa8b8715e734919923f69318c953e077e9c02140ada13e59d043310"}, + {file = "pyarrow-18.0.0-cp310-cp310-win_amd64.whl", hash = "sha256:606e9a3dcb0f52307c5040698ea962685fb1c852d72379ee9412be7de9c5f9e2"}, + {file = "pyarrow-18.0.0-cp311-cp311-macosx_12_0_arm64.whl", hash = "sha256:d5795e37c0a33baa618c5e054cd61f586cf76850a251e2b21355e4085def6280"}, + {file = "pyarrow-18.0.0-cp311-cp311-macosx_12_0_x86_64.whl", hash = "sha256:5f0510608ccd6e7f02ca8596962afb8c6cc84c453e7be0da4d85f5f4f7b0328a"}, + {file = "pyarrow-18.0.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:616ea2826c03c16e87f517c46296621a7c51e30400f6d0a61be645f203aa2b93"}, + {file = "pyarrow-18.0.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a1824f5b029ddd289919f354bc285992cb4e32da518758c136271cf66046ef22"}, + {file = "pyarrow-18.0.0-cp311-cp311-manylinux_2_28_aarch64.whl", hash = "sha256:6dd1b52d0d58dd8f685ced9971eb49f697d753aa7912f0a8f50833c7a7426319"}, + {file = "pyarrow-18.0.0-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:320ae9bd45ad7ecc12ec858b3e8e462578de060832b98fc4d671dee9f10d9954"}, + {file = "pyarrow-18.0.0-cp311-cp311-win_amd64.whl", hash = "sha256:2c992716cffb1088414f2b478f7af0175fd0a76fea80841b1706baa8fb0ebaad"}, + {file = "pyarrow-18.0.0-cp312-cp312-macosx_12_0_arm64.whl", hash = "sha256:e7ab04f272f98ebffd2a0661e4e126036f6936391ba2889ed2d44c5006237802"}, + {file = "pyarrow-18.0.0-cp312-cp312-macosx_12_0_x86_64.whl", hash = "sha256:03f40b65a43be159d2f97fd64dc998f769d0995a50c00f07aab58b0b3da87e1f"}, + {file = "pyarrow-18.0.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:be08af84808dff63a76860847c48ec0416928a7b3a17c2f49a072cac7c45efbd"}, + {file = "pyarrow-18.0.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8c70c1965cde991b711a98448ccda3486f2a336457cf4ec4dca257a926e149c9"}, + {file = "pyarrow-18.0.0-cp312-cp312-manylinux_2_28_aarch64.whl", hash = "sha256:00178509f379415a3fcf855af020e3340254f990a8534294ec3cf674d6e255fd"}, + {file = "pyarrow-18.0.0-cp312-cp312-manylinux_2_28_x86_64.whl", hash = "sha256:a71ab0589a63a3e987beb2bc172e05f000a5c5be2636b4b263c44034e215b5d7"}, + {file = "pyarrow-18.0.0-cp312-cp312-win_amd64.whl", hash = "sha256:fe92efcdbfa0bcf2fa602e466d7f2905500f33f09eb90bf0bcf2e6ca41b574c8"}, + {file = "pyarrow-18.0.0-cp313-cp313-macosx_12_0_arm64.whl", hash = "sha256:907ee0aa8ca576f5e0cdc20b5aeb2ad4d3953a3b4769fc4b499e00ef0266f02f"}, + {file = "pyarrow-18.0.0-cp313-cp313-macosx_12_0_x86_64.whl", hash = "sha256:66dcc216ebae2eb4c37b223feaf82f15b69d502821dde2da138ec5a3716e7463"}, + {file = "pyarrow-18.0.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bc1daf7c425f58527900876354390ee41b0ae962a73ad0959b9d829def583bb1"}, + {file = "pyarrow-18.0.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:871b292d4b696b09120ed5bde894f79ee2a5f109cb84470546471df264cae136"}, + {file = "pyarrow-18.0.0-cp313-cp313-manylinux_2_28_aarch64.whl", hash = "sha256:082ba62bdcb939824ba1ce10b8acef5ab621da1f4c4805e07bfd153617ac19d4"}, + {file = "pyarrow-18.0.0-cp313-cp313-manylinux_2_28_x86_64.whl", hash = "sha256:2c664ab88b9766413197733c1720d3dcd4190e8fa3bbdc3710384630a0a7207b"}, + {file = "pyarrow-18.0.0-cp313-cp313-win_amd64.whl", hash = "sha256:dc892be34dbd058e8d189b47db1e33a227d965ea8805a235c8a7286f7fd17d3a"}, + {file = "pyarrow-18.0.0-cp313-cp313t-macosx_12_0_arm64.whl", hash = "sha256:28f9c39a56d2c78bf6b87dcc699d520ab850919d4a8c7418cd20eda49874a2ea"}, + {file = "pyarrow-18.0.0-cp313-cp313t-macosx_12_0_x86_64.whl", hash = "sha256:f1a198a50c409ab2d009fbf20956ace84567d67f2c5701511d4dd561fae6f32e"}, + {file = "pyarrow-18.0.0-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b5bd7fd32e3ace012d43925ea4fc8bd1b02cc6cc1e9813b518302950e89b5a22"}, + {file = "pyarrow-18.0.0-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:336addb8b6f5208be1b2398442c703a710b6b937b1a046065ee4db65e782ff5a"}, + {file = "pyarrow-18.0.0-cp313-cp313t-manylinux_2_28_aarch64.whl", hash = "sha256:45476490dd4adec5472c92b4d253e245258745d0ccaabe706f8d03288ed60a79"}, + {file = "pyarrow-18.0.0-cp313-cp313t-manylinux_2_28_x86_64.whl", hash = "sha256:b46591222c864e7da7faa3b19455196416cd8355ff6c2cc2e65726a760a3c420"}, + {file = "pyarrow-18.0.0-cp39-cp39-macosx_12_0_arm64.whl", hash = "sha256:eb7e3abcda7e1e6b83c2dc2909c8d045881017270a119cc6ee7fdcfe71d02df8"}, + {file = "pyarrow-18.0.0-cp39-cp39-macosx_12_0_x86_64.whl", hash = "sha256:09f30690b99ce34e0da64d20dab372ee54431745e4efb78ac938234a282d15f9"}, + {file = "pyarrow-18.0.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4d5ca5d707e158540312e09fd907f9f49bacbe779ab5236d9699ced14d2293b8"}, + {file = "pyarrow-18.0.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d6331f280c6e4521c69b201a42dd978f60f7e129511a55da9e0bfe426b4ebb8d"}, + {file = "pyarrow-18.0.0-cp39-cp39-manylinux_2_28_aarch64.whl", hash = "sha256:3ac24b2be732e78a5a3ac0b3aa870d73766dd00beba6e015ea2ea7394f8b4e55"}, + {file = "pyarrow-18.0.0-cp39-cp39-manylinux_2_28_x86_64.whl", hash = "sha256:b30a927c6dff89ee702686596f27c25160dd6c99be5bcc1513a763ae5b1bfc03"}, + {file = "pyarrow-18.0.0-cp39-cp39-win_amd64.whl", hash = "sha256:8f40ec677e942374e3d7f2fad6a67a4c2811a8b975e8703c6fd26d3b168a90e2"}, + {file = "pyarrow-18.0.0.tar.gz", hash = "sha256:a6aa027b1a9d2970cf328ccd6dbe4a996bc13c39fd427f502782f5bdb9ca20f5"}, +] + +[package.extras] +test = ["cffi", "hypothesis", "pandas", "pytest", "pytz"] + +[[package]] +name = "pyasn1" +version = "0.6.1" +description = "Pure-Python implementation of ASN.1 types and DER/BER/CER codecs (X.208)" +optional = false +python-versions = ">=3.8" +files = [ + {file = "pyasn1-0.6.1-py3-none-any.whl", hash = "sha256:0d632f46f2ba09143da3a8afe9e33fb6f92fa2320ab7e886e2d0f7672af84629"}, + {file = "pyasn1-0.6.1.tar.gz", hash = "sha256:6f580d2bdd84365380830acf45550f2511469f673cb4a5ae3857a3170128b034"}, +] + +[[package]] +name = "pyasn1-modules" +version = "0.4.1" +description = "A collection of ASN.1-based protocols modules" +optional = false +python-versions = ">=3.8" +files = [ + {file = "pyasn1_modules-0.4.1-py3-none-any.whl", hash = "sha256:49bfa96b45a292b711e986f222502c1c9a5e1f4e568fc30e2574a6c7d07838fd"}, + {file = "pyasn1_modules-0.4.1.tar.gz", hash = "sha256:c28e2dbf9c06ad61c71a075c7e0f9fd0f1b0bb2d2ad4377f240d33ac2ab60a7c"}, +] + +[package.dependencies] +pyasn1 = ">=0.4.6,<0.7.0" + +[[package]] +name = "pycparser" +version = "2.22" +description = "C parser in Python" +optional = false +python-versions = ">=3.8" +files = [ + {file = "pycparser-2.22-py3-none-any.whl", hash = "sha256:c3702b6d3dd8c7abc1afa565d7e63d53a1d0bd86cdc24edd75470f4de499cfcc"}, + {file = "pycparser-2.22.tar.gz", hash = "sha256:491c8be9c040f5390f5bf44a5b07752bd07f56edf992381b05c701439eec10f6"}, +] + +[[package]] +name = "pydantic" +version = "2.9.2" +description = "Data validation using Python type hints" +optional = false +python-versions = ">=3.8" +files = [ + {file = "pydantic-2.9.2-py3-none-any.whl", hash = "sha256:f048cec7b26778210e28a0459867920654d48e5e62db0958433636cde4254f12"}, + {file = "pydantic-2.9.2.tar.gz", hash = "sha256:d155cef71265d1e9807ed1c32b4c8deec042a44a50a4188b25ac67ecd81a9c0f"}, +] + +[package.dependencies] +annotated-types = ">=0.6.0" +pydantic-core = "2.23.4" +typing-extensions = [ + {version = ">=4.12.2", markers = "python_version >= \"3.13\""}, + {version = ">=4.6.1", markers = "python_version < \"3.13\""}, +] + +[package.extras] +email = ["email-validator (>=2.0.0)"] +timezone = ["tzdata"] + +[[package]] +name = "pydantic-core" +version = "2.23.4" +description = "Core functionality for Pydantic validation and serialization" +optional = false +python-versions = ">=3.8" +files = [ + {file = "pydantic_core-2.23.4-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:b10bd51f823d891193d4717448fab065733958bdb6a6b351967bd349d48d5c9b"}, + {file = "pydantic_core-2.23.4-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:4fc714bdbfb534f94034efaa6eadd74e5b93c8fa6315565a222f7b6f42ca1166"}, + {file = "pydantic_core-2.23.4-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:63e46b3169866bd62849936de036f901a9356e36376079b05efa83caeaa02ceb"}, + {file = "pydantic_core-2.23.4-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:ed1a53de42fbe34853ba90513cea21673481cd81ed1be739f7f2efb931b24916"}, + {file = "pydantic_core-2.23.4-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:cfdd16ab5e59fc31b5e906d1a3f666571abc367598e3e02c83403acabc092e07"}, + {file = "pydantic_core-2.23.4-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:255a8ef062cbf6674450e668482456abac99a5583bbafb73f9ad469540a3a232"}, + {file = "pydantic_core-2.23.4-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4a7cd62e831afe623fbb7aabbb4fe583212115b3ef38a9f6b71869ba644624a2"}, + {file = "pydantic_core-2.23.4-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:f09e2ff1f17c2b51f2bc76d1cc33da96298f0a036a137f5440ab3ec5360b624f"}, + {file = "pydantic_core-2.23.4-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:e38e63e6f3d1cec5a27e0afe90a085af8b6806ee208b33030e65b6516353f1a3"}, + {file = "pydantic_core-2.23.4-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:0dbd8dbed2085ed23b5c04afa29d8fd2771674223135dc9bc937f3c09284d071"}, + {file = "pydantic_core-2.23.4-cp310-none-win32.whl", hash = "sha256:6531b7ca5f951d663c339002e91aaebda765ec7d61b7d1e3991051906ddde119"}, + {file = "pydantic_core-2.23.4-cp310-none-win_amd64.whl", hash = "sha256:7c9129eb40958b3d4500fa2467e6a83356b3b61bfff1b414c7361d9220f9ae8f"}, + {file = "pydantic_core-2.23.4-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:77733e3892bb0a7fa797826361ce8a9184d25c8dffaec60b7ffe928153680ba8"}, + {file = "pydantic_core-2.23.4-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:1b84d168f6c48fabd1f2027a3d1bdfe62f92cade1fb273a5d68e621da0e44e6d"}, + {file = "pydantic_core-2.23.4-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:df49e7a0861a8c36d089c1ed57d308623d60416dab2647a4a17fe050ba85de0e"}, + {file = "pydantic_core-2.23.4-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:ff02b6d461a6de369f07ec15e465a88895f3223eb75073ffea56b84d9331f607"}, + {file = "pydantic_core-2.23.4-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:996a38a83508c54c78a5f41456b0103c30508fed9abcad0a59b876d7398f25fd"}, + {file = "pydantic_core-2.23.4-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d97683ddee4723ae8c95d1eddac7c192e8c552da0c73a925a89fa8649bf13eea"}, + {file = "pydantic_core-2.23.4-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:216f9b2d7713eb98cb83c80b9c794de1f6b7e3145eef40400c62e86cee5f4e1e"}, + {file = "pydantic_core-2.23.4-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:6f783e0ec4803c787bcea93e13e9932edab72068f68ecffdf86a99fd5918878b"}, + {file = "pydantic_core-2.23.4-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:d0776dea117cf5272382634bd2a5c1b6eb16767c223c6a5317cd3e2a757c61a0"}, + {file = "pydantic_core-2.23.4-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:d5f7a395a8cf1621939692dba2a6b6a830efa6b3cee787d82c7de1ad2930de64"}, + {file = "pydantic_core-2.23.4-cp311-none-win32.whl", hash = "sha256:74b9127ffea03643e998e0c5ad9bd3811d3dac8c676e47db17b0ee7c3c3bf35f"}, + {file = "pydantic_core-2.23.4-cp311-none-win_amd64.whl", hash = "sha256:98d134c954828488b153d88ba1f34e14259284f256180ce659e8d83e9c05eaa3"}, + {file = "pydantic_core-2.23.4-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:f3e0da4ebaef65158d4dfd7d3678aad692f7666877df0002b8a522cdf088f231"}, + {file = "pydantic_core-2.23.4-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:f69a8e0b033b747bb3e36a44e7732f0c99f7edd5cea723d45bc0d6e95377ffee"}, + {file = "pydantic_core-2.23.4-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:723314c1d51722ab28bfcd5240d858512ffd3116449c557a1336cbe3919beb87"}, + {file = "pydantic_core-2.23.4-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:bb2802e667b7051a1bebbfe93684841cc9351004e2badbd6411bf357ab8d5ac8"}, + {file = "pydantic_core-2.23.4-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d18ca8148bebe1b0a382a27a8ee60350091a6ddaf475fa05ef50dc35b5df6327"}, + {file = "pydantic_core-2.23.4-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:33e3d65a85a2a4a0dc3b092b938a4062b1a05f3a9abde65ea93b233bca0e03f2"}, + {file = "pydantic_core-2.23.4-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:128585782e5bfa515c590ccee4b727fb76925dd04a98864182b22e89a4e6ed36"}, + {file = "pydantic_core-2.23.4-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:68665f4c17edcceecc112dfed5dbe6f92261fb9d6054b47d01bf6371a6196126"}, + {file = "pydantic_core-2.23.4-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:20152074317d9bed6b7a95ade3b7d6054845d70584216160860425f4fbd5ee9e"}, + {file = "pydantic_core-2.23.4-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:9261d3ce84fa1d38ed649c3638feefeae23d32ba9182963e465d58d62203bd24"}, + {file = "pydantic_core-2.23.4-cp312-none-win32.whl", hash = "sha256:4ba762ed58e8d68657fc1281e9bb72e1c3e79cc5d464be146e260c541ec12d84"}, + {file = "pydantic_core-2.23.4-cp312-none-win_amd64.whl", hash = "sha256:97df63000f4fea395b2824da80e169731088656d1818a11b95f3b173747b6cd9"}, + {file = "pydantic_core-2.23.4-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:7530e201d10d7d14abce4fb54cfe5b94a0aefc87da539d0346a484ead376c3cc"}, + {file = "pydantic_core-2.23.4-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:df933278128ea1cd77772673c73954e53a1c95a4fdf41eef97c2b779271bd0bd"}, + {file = "pydantic_core-2.23.4-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0cb3da3fd1b6a5d0279a01877713dbda118a2a4fc6f0d821a57da2e464793f05"}, + {file = "pydantic_core-2.23.4-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:42c6dcb030aefb668a2b7009c85b27f90e51e6a3b4d5c9bc4c57631292015b0d"}, + {file = "pydantic_core-2.23.4-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:696dd8d674d6ce621ab9d45b205df149399e4bb9aa34102c970b721554828510"}, + {file = "pydantic_core-2.23.4-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2971bb5ffe72cc0f555c13e19b23c85b654dd2a8f7ab493c262071377bfce9f6"}, + {file = "pydantic_core-2.23.4-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8394d940e5d400d04cad4f75c0598665cbb81aecefaca82ca85bd28264af7f9b"}, + {file = "pydantic_core-2.23.4-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:0dff76e0602ca7d4cdaacc1ac4c005e0ce0dcfe095d5b5259163a80d3a10d327"}, + {file = "pydantic_core-2.23.4-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:7d32706badfe136888bdea71c0def994644e09fff0bfe47441deaed8e96fdbc6"}, + {file = "pydantic_core-2.23.4-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:ed541d70698978a20eb63d8c5d72f2cc6d7079d9d90f6b50bad07826f1320f5f"}, + {file = "pydantic_core-2.23.4-cp313-none-win32.whl", hash = "sha256:3d5639516376dce1940ea36edf408c554475369f5da2abd45d44621cb616f769"}, + {file = "pydantic_core-2.23.4-cp313-none-win_amd64.whl", hash = "sha256:5a1504ad17ba4210df3a045132a7baeeba5a200e930f57512ee02909fc5c4cb5"}, + {file = "pydantic_core-2.23.4-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:d4488a93b071c04dc20f5cecc3631fc78b9789dd72483ba15d423b5b3689b555"}, + {file = "pydantic_core-2.23.4-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:81965a16b675b35e1d09dd14df53f190f9129c0202356ed44ab2728b1c905658"}, + {file = "pydantic_core-2.23.4-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4ffa2ebd4c8530079140dd2d7f794a9d9a73cbb8e9d59ffe24c63436efa8f271"}, + {file = "pydantic_core-2.23.4-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:61817945f2fe7d166e75fbfb28004034b48e44878177fc54d81688e7b85a3665"}, + {file = "pydantic_core-2.23.4-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:29d2c342c4bc01b88402d60189f3df065fb0dda3654744d5a165a5288a657368"}, + {file = "pydantic_core-2.23.4-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5e11661ce0fd30a6790e8bcdf263b9ec5988e95e63cf901972107efc49218b13"}, + {file = "pydantic_core-2.23.4-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9d18368b137c6295db49ce7218b1a9ba15c5bc254c96d7c9f9e924a9bc7825ad"}, + {file = "pydantic_core-2.23.4-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:ec4e55f79b1c4ffb2eecd8a0cfba9955a2588497d96851f4c8f99aa4a1d39b12"}, + {file = "pydantic_core-2.23.4-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:374a5e5049eda9e0a44c696c7ade3ff355f06b1fe0bb945ea3cac2bc336478a2"}, + {file = "pydantic_core-2.23.4-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:5c364564d17da23db1106787675fc7af45f2f7b58b4173bfdd105564e132e6fb"}, + {file = "pydantic_core-2.23.4-cp38-none-win32.whl", hash = "sha256:d7a80d21d613eec45e3d41eb22f8f94ddc758a6c4720842dc74c0581f54993d6"}, + {file = "pydantic_core-2.23.4-cp38-none-win_amd64.whl", hash = "sha256:5f5ff8d839f4566a474a969508fe1c5e59c31c80d9e140566f9a37bba7b8d556"}, + {file = "pydantic_core-2.23.4-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:a4fa4fc04dff799089689f4fd502ce7d59de529fc2f40a2c8836886c03e0175a"}, + {file = "pydantic_core-2.23.4-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:0a7df63886be5e270da67e0966cf4afbae86069501d35c8c1b3b6c168f42cb36"}, + {file = "pydantic_core-2.23.4-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:dcedcd19a557e182628afa1d553c3895a9f825b936415d0dbd3cd0bbcfd29b4b"}, + {file = "pydantic_core-2.23.4-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:5f54b118ce5de9ac21c363d9b3caa6c800341e8c47a508787e5868c6b79c9323"}, + {file = "pydantic_core-2.23.4-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:86d2f57d3e1379a9525c5ab067b27dbb8a0642fb5d454e17a9ac434f9ce523e3"}, + {file = "pydantic_core-2.23.4-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:de6d1d1b9e5101508cb37ab0d972357cac5235f5c6533d1071964c47139257df"}, + {file = "pydantic_core-2.23.4-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1278e0d324f6908e872730c9102b0112477a7f7cf88b308e4fc36ce1bdb6d58c"}, + {file = "pydantic_core-2.23.4-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:9a6b5099eeec78827553827f4c6b8615978bb4b6a88e5d9b93eddf8bb6790f55"}, + {file = "pydantic_core-2.23.4-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:e55541f756f9b3ee346b840103f32779c695a19826a4c442b7954550a0972040"}, + {file = "pydantic_core-2.23.4-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:a5c7ba8ffb6d6f8f2ab08743be203654bb1aaa8c9dcb09f82ddd34eadb695605"}, + {file = "pydantic_core-2.23.4-cp39-none-win32.whl", hash = "sha256:37b0fe330e4a58d3c58b24d91d1eb102aeec675a3db4c292ec3928ecd892a9a6"}, + {file = "pydantic_core-2.23.4-cp39-none-win_amd64.whl", hash = "sha256:1498bec4c05c9c787bde9125cfdcc63a41004ff167f495063191b863399b1a29"}, + {file = "pydantic_core-2.23.4-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:f455ee30a9d61d3e1a15abd5068827773d6e4dc513e795f380cdd59932c782d5"}, + {file = "pydantic_core-2.23.4-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:1e90d2e3bd2c3863d48525d297cd143fe541be8bbf6f579504b9712cb6b643ec"}, + {file = "pydantic_core-2.23.4-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2e203fdf807ac7e12ab59ca2bfcabb38c7cf0b33c41efeb00f8e5da1d86af480"}, + {file = "pydantic_core-2.23.4-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e08277a400de01bc72436a0ccd02bdf596631411f592ad985dcee21445bd0068"}, + {file = "pydantic_core-2.23.4-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:f220b0eea5965dec25480b6333c788fb72ce5f9129e8759ef876a1d805d00801"}, + {file = "pydantic_core-2.23.4-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:d06b0c8da4f16d1d1e352134427cb194a0a6e19ad5db9161bf32b2113409e728"}, + {file = "pydantic_core-2.23.4-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:ba1a0996f6c2773bd83e63f18914c1de3c9dd26d55f4ac302a7efe93fb8e7433"}, + {file = "pydantic_core-2.23.4-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:9a5bce9d23aac8f0cf0836ecfc033896aa8443b501c58d0602dbfd5bd5b37753"}, + {file = "pydantic_core-2.23.4-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:78ddaaa81421a29574a682b3179d4cf9e6d405a09b99d93ddcf7e5239c742e21"}, + {file = "pydantic_core-2.23.4-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:883a91b5dd7d26492ff2f04f40fbb652de40fcc0afe07e8129e8ae779c2110eb"}, + {file = "pydantic_core-2.23.4-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:88ad334a15b32a791ea935af224b9de1bf99bcd62fabf745d5f3442199d86d59"}, + {file = "pydantic_core-2.23.4-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:233710f069d251feb12a56da21e14cca67994eab08362207785cf8c598e74577"}, + {file = "pydantic_core-2.23.4-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:19442362866a753485ba5e4be408964644dd6a09123d9416c54cd49171f50744"}, + {file = "pydantic_core-2.23.4-pp39-pypy39_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:624e278a7d29b6445e4e813af92af37820fafb6dcc55c012c834f9e26f9aaaef"}, + {file = "pydantic_core-2.23.4-pp39-pypy39_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:f5ef8f42bec47f21d07668a043f077d507e5bf4e668d5c6dfe6aaba89de1a5b8"}, + {file = "pydantic_core-2.23.4-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:aea443fffa9fbe3af1a9ba721a87f926fe548d32cab71d188a6ede77d0ff244e"}, + {file = "pydantic_core-2.23.4.tar.gz", hash = "sha256:2584f7cf844ac4d970fba483a717dbe10c1c1c96a969bf65d61ffe94df1b2863"}, +] + +[package.dependencies] +typing-extensions = ">=4.6.0,<4.7.0 || >4.7.0" + +[[package]] +name = "pygments" +version = "2.18.0" +description = "Pygments is a syntax highlighting package written in Python." +optional = false +python-versions = ">=3.8" +files = [ + {file = "pygments-2.18.0-py3-none-any.whl", hash = "sha256:b8e6aca0523f3ab76fee51799c488e38782ac06eafcf95e7ba832985c8e7b13a"}, + {file = "pygments-2.18.0.tar.gz", hash = "sha256:786ff802f32e91311bff3889f6e9a86e81505fe99f2735bb6d60ae0c5004f199"}, +] + +[package.extras] +windows-terminal = ["colorama (>=0.4.6)"] + +[[package]] +name = "pymupdf" +version = "1.24.13" +description = "A high performance Python library for data extraction, analysis, conversion & manipulation of PDF (and other) documents." +optional = false +python-versions = ">=3.9" +files = [ + {file = "PyMuPDF-1.24.13-cp39-abi3-macosx_10_9_x86_64.whl", hash = "sha256:c11bb9198af69d490b4b346421db827d875a28fbc760d239e691d4b3ed12b5ad"}, + {file = "PyMuPDF-1.24.13-cp39-abi3-macosx_11_0_arm64.whl", hash = "sha256:240d5c43daa9278db50d609162b48f673ab256d7e5c73eea67af517c1fc2d47c"}, + {file = "PyMuPDF-1.24.13-cp39-abi3-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:e4c8808e62afbbde0f7b9c4151c4b1a5735911c2d39c34332860df600dba76f8"}, + {file = "PyMuPDF-1.24.13-cp39-abi3-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:c830610e4fde237fcf0532f1f8c1381453f48c164a5eadd0c6e5fd0bea1ca8e3"}, + {file = "PyMuPDF-1.24.13-cp39-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:4520558580ac6b5a7164fda29fbc14e39d3114fd803420721500edbf47d04872"}, + {file = "PyMuPDF-1.24.13-cp39-abi3-win32.whl", hash = "sha256:ab22828d4fc205791ef1332a64893cbfc38cd9c331c5f46ae4537372ffee6fc1"}, + {file = "PyMuPDF-1.24.13-cp39-abi3-win_amd64.whl", hash = "sha256:ec17914e4a560f4070212a2e84db5cc8b561d85d1ead193605a22f9561b03148"}, + {file = "PyMuPDF-1.24.13.tar.gz", hash = "sha256:6ec3ab3c6d5cba60bfcf58daaa2d1a5b700b0366ce52be666445007351461fa4"}, +] + +[[package]] +name = "pymupdf4llm" +version = "0.0.5" +description = "PyMuPDF Utilities for LLM/RAG" +optional = false +python-versions = "*" +files = [ + {file = "pymupdf4llm-0.0.5-py3-none-any.whl", hash = "sha256:9882e42789dcefbad25c8e570d9c8d72eaf34e6a1f16ef3f555faf5f7718654f"}, + {file = "pymupdf4llm-0.0.5.tar.gz", hash = "sha256:3256dbc5feec8ec3149586e2e2688f3f47fa733395c33e21a7af15f9b7531689"}, +] + +[package.dependencies] +pymupdf = ">=1.24.2" + +[[package]] +name = "pypandoc-binary" +version = "1.13" +description = "Thin wrapper for pandoc." +optional = false +python-versions = ">=3.6" +files = [ + {file = "pypandoc_binary-1.13-py3-none-macosx_10_9_x86_64.whl", hash = "sha256:946666388eb79b307d7f497b3b33045ef807750f8e5ef3440e0ba3bbab698044"}, + {file = "pypandoc_binary-1.13-py3-none-macosx_11_0_arm64.whl", hash = "sha256:21ef0345726d36fc45a50211320614daf2caede684b0d0963ce8738292809746"}, + {file = "pypandoc_binary-1.13-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:67c0c7af811bcf3cd4f3221be756a4975ec35b2d7df89d8de4313a8caa2cd54f"}, + {file = "pypandoc_binary-1.13-py3-none-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9455fdd9521cbf4b56d79a56b806afa94c8c22f3c8ef878536e58d941a70f6d6"}, + {file = "pypandoc_binary-1.13-py3-none-musllinux_1_1_aarch64.whl", hash = "sha256:2915f52e4632bd2d0a8fcd2f7e7dfc2ea19b4e1a280fcbc2ddcd142713c4ff12"}, + {file = "pypandoc_binary-1.13-py3-none-musllinux_1_1_x86_64.whl", hash = "sha256:11a2497320eb3dccb74de3c67b6df3e5d3f66cdc2a36a67e9a871708f7e48412"}, + {file = "pypandoc_binary-1.13-py3-none-win_amd64.whl", hash = "sha256:3881aa7c84faec2007c0ae4466d3a1cfc93171206b8540f2defa8ea971bf6fd6"}, +] + +[[package]] +name = "pyparsing" +version = "3.2.0" +description = "pyparsing module - Classes and methods to define and execute parsing grammars" +optional = false +python-versions = ">=3.9" +files = [ + {file = "pyparsing-3.2.0-py3-none-any.whl", hash = "sha256:93d9577b88da0bbea8cc8334ee8b918ed014968fd2ec383e868fb8afb1ccef84"}, + {file = "pyparsing-3.2.0.tar.gz", hash = "sha256:cbf74e27246d595d9a74b186b810f6fbb86726dbf3b9532efb343f6d7294fe9c"}, +] + +[package.extras] +diagrams = ["jinja2", "railroad-diagrams"] + +[[package]] +name = "pytest" +version = "8.3.3" +description = "pytest: simple powerful testing with Python" +optional = false +python-versions = ">=3.8" +files = [ + {file = "pytest-8.3.3-py3-none-any.whl", hash = "sha256:a6853c7375b2663155079443d2e45de913a911a11d669df02a50814944db57b2"}, + {file = "pytest-8.3.3.tar.gz", hash = "sha256:70b98107bd648308a7952b06e6ca9a50bc660be218d53c257cc1fc94fda10181"}, +] + +[package.dependencies] +colorama = {version = "*", markers = "sys_platform == \"win32\""} +iniconfig = "*" +packaging = "*" +pluggy = ">=1.5,<2" + +[package.extras] +dev = ["argcomplete", "attrs (>=19.2)", "hypothesis (>=3.56)", "mock", "pygments (>=2.7.2)", "requests", "setuptools", "xmlschema"] + +[[package]] +name = "python-dateutil" +version = "2.9.0.post0" +description = "Extensions to the standard Python datetime module" +optional = false +python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,>=2.7" +files = [ + {file = "python-dateutil-2.9.0.post0.tar.gz", hash = "sha256:37dd54208da7e1cd875388217d5e00ebd4179249f90fb72437e91a35459a0ad3"}, + {file = "python_dateutil-2.9.0.post0-py2.py3-none-any.whl", hash = "sha256:a8b2bc7bffae282281c8140a97d3aa9c14da0b136dfe83f850eea9a5f7470427"}, +] + +[package.dependencies] +six = ">=1.5" + +[[package]] +name = "pytz" +version = "2024.2" +description = "World timezone definitions, modern and historical" +optional = false +python-versions = "*" +files = [ + {file = "pytz-2024.2-py2.py3-none-any.whl", hash = "sha256:31c7c1817eb7fae7ca4b8c7ee50c72f93aa2dd863de768e1ef4245d426aa0725"}, + {file = "pytz-2024.2.tar.gz", hash = "sha256:2aa355083c50a0f93fa581709deac0c9ad65cca8a9e9beac660adcbd493c798a"}, +] + +[[package]] +name = "pywin32" +version = "308" +description = "Python for Window Extensions" +optional = false +python-versions = "*" +files = [ + {file = "pywin32-308-cp310-cp310-win32.whl", hash = "sha256:796ff4426437896550d2981b9c2ac0ffd75238ad9ea2d3bfa67a1abd546d262e"}, + {file = "pywin32-308-cp310-cp310-win_amd64.whl", hash = "sha256:4fc888c59b3c0bef905ce7eb7e2106a07712015ea1c8234b703a088d46110e8e"}, + {file = "pywin32-308-cp310-cp310-win_arm64.whl", hash = "sha256:a5ab5381813b40f264fa3495b98af850098f814a25a63589a8e9eb12560f450c"}, + {file = "pywin32-308-cp311-cp311-win32.whl", hash = "sha256:5d8c8015b24a7d6855b1550d8e660d8daa09983c80e5daf89a273e5c6fb5095a"}, + {file = "pywin32-308-cp311-cp311-win_amd64.whl", hash = "sha256:575621b90f0dc2695fec346b2d6302faebd4f0f45c05ea29404cefe35d89442b"}, + {file = "pywin32-308-cp311-cp311-win_arm64.whl", hash = "sha256:100a5442b7332070983c4cd03f2e906a5648a5104b8a7f50175f7906efd16bb6"}, + {file = "pywin32-308-cp312-cp312-win32.whl", hash = "sha256:587f3e19696f4bf96fde9d8a57cec74a57021ad5f204c9e627e15c33ff568897"}, + {file = "pywin32-308-cp312-cp312-win_amd64.whl", hash = "sha256:00b3e11ef09ede56c6a43c71f2d31857cf7c54b0ab6e78ac659497abd2834f47"}, + {file = "pywin32-308-cp312-cp312-win_arm64.whl", hash = "sha256:9b4de86c8d909aed15b7011182c8cab38c8850de36e6afb1f0db22b8959e3091"}, + {file = "pywin32-308-cp313-cp313-win32.whl", hash = "sha256:1c44539a37a5b7b21d02ab34e6a4d314e0788f1690d65b48e9b0b89f31abbbed"}, + {file = "pywin32-308-cp313-cp313-win_amd64.whl", hash = "sha256:fd380990e792eaf6827fcb7e187b2b4b1cede0585e3d0c9e84201ec27b9905e4"}, + {file = "pywin32-308-cp313-cp313-win_arm64.whl", hash = "sha256:ef313c46d4c18dfb82a2431e3051ac8f112ccee1a34f29c263c583c568db63cd"}, + {file = "pywin32-308-cp37-cp37m-win32.whl", hash = "sha256:1f696ab352a2ddd63bd07430080dd598e6369152ea13a25ebcdd2f503a38f1ff"}, + {file = "pywin32-308-cp37-cp37m-win_amd64.whl", hash = "sha256:13dcb914ed4347019fbec6697a01a0aec61019c1046c2b905410d197856326a6"}, + {file = "pywin32-308-cp38-cp38-win32.whl", hash = "sha256:5794e764ebcabf4ff08c555b31bd348c9025929371763b2183172ff4708152f0"}, + {file = "pywin32-308-cp38-cp38-win_amd64.whl", hash = "sha256:3b92622e29d651c6b783e368ba7d6722b1634b8e70bd376fd7610fe1992e19de"}, + {file = "pywin32-308-cp39-cp39-win32.whl", hash = "sha256:7873ca4dc60ab3287919881a7d4f88baee4a6e639aa6962de25a98ba6b193341"}, + {file = "pywin32-308-cp39-cp39-win_amd64.whl", hash = "sha256:71b3322d949b4cc20776436a9c9ba0eeedcbc9c650daa536df63f0ff111bb920"}, +] + +[[package]] +name = "pyyaml" +version = "6.0.2" +description = "YAML parser and emitter for Python" +optional = false +python-versions = ">=3.8" +files = [ + {file = "PyYAML-6.0.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:0a9a2848a5b7feac301353437eb7d5957887edbf81d56e903999a75a3d743086"}, + {file = "PyYAML-6.0.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:29717114e51c84ddfba879543fb232a6ed60086602313ca38cce623c1d62cfbf"}, + {file = "PyYAML-6.0.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8824b5a04a04a047e72eea5cec3bc266db09e35de6bdfe34c9436ac5ee27d237"}, + {file = "PyYAML-6.0.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7c36280e6fb8385e520936c3cb3b8042851904eba0e58d277dca80a5cfed590b"}, + {file = "PyYAML-6.0.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ec031d5d2feb36d1d1a24380e4db6d43695f3748343d99434e6f5f9156aaa2ed"}, + {file = "PyYAML-6.0.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:936d68689298c36b53b29f23c6dbb74de12b4ac12ca6cfe0e047bedceea56180"}, + {file = "PyYAML-6.0.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:23502f431948090f597378482b4812b0caae32c22213aecf3b55325e049a6c68"}, + {file = "PyYAML-6.0.2-cp310-cp310-win32.whl", hash = "sha256:2e99c6826ffa974fe6e27cdb5ed0021786b03fc98e5ee3c5bfe1fd5015f42b99"}, + {file = "PyYAML-6.0.2-cp310-cp310-win_amd64.whl", hash = "sha256:a4d3091415f010369ae4ed1fc6b79def9416358877534caf6a0fdd2146c87a3e"}, + {file = "PyYAML-6.0.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:cc1c1159b3d456576af7a3e4d1ba7e6924cb39de8f67111c735f6fc832082774"}, + {file = "PyYAML-6.0.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:1e2120ef853f59c7419231f3bf4e7021f1b936f6ebd222406c3b60212205d2ee"}, + {file = "PyYAML-6.0.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5d225db5a45f21e78dd9358e58a98702a0302f2659a3c6cd320564b75b86f47c"}, + {file = "PyYAML-6.0.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5ac9328ec4831237bec75defaf839f7d4564be1e6b25ac710bd1a96321cc8317"}, + {file = "PyYAML-6.0.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3ad2a3decf9aaba3d29c8f537ac4b243e36bef957511b4766cb0057d32b0be85"}, + {file = "PyYAML-6.0.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:ff3824dc5261f50c9b0dfb3be22b4567a6f938ccce4587b38952d85fd9e9afe4"}, + {file = "PyYAML-6.0.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:797b4f722ffa07cc8d62053e4cff1486fa6dc094105d13fea7b1de7d8bf71c9e"}, + {file = "PyYAML-6.0.2-cp311-cp311-win32.whl", hash = "sha256:11d8f3dd2b9c1207dcaf2ee0bbbfd5991f571186ec9cc78427ba5bd32afae4b5"}, + {file = "PyYAML-6.0.2-cp311-cp311-win_amd64.whl", hash = "sha256:e10ce637b18caea04431ce14fabcf5c64a1c61ec9c56b071a4b7ca131ca52d44"}, + {file = "PyYAML-6.0.2-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:c70c95198c015b85feafc136515252a261a84561b7b1d51e3384e0655ddf25ab"}, + {file = "PyYAML-6.0.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:ce826d6ef20b1bc864f0a68340c8b3287705cae2f8b4b1d932177dcc76721725"}, + {file = "PyYAML-6.0.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1f71ea527786de97d1a0cc0eacd1defc0985dcf6b3f17bb77dcfc8c34bec4dc5"}, + {file = "PyYAML-6.0.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9b22676e8097e9e22e36d6b7bda33190d0d400f345f23d4065d48f4ca7ae0425"}, + {file = "PyYAML-6.0.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:80bab7bfc629882493af4aa31a4cfa43a4c57c83813253626916b8c7ada83476"}, + {file = "PyYAML-6.0.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:0833f8694549e586547b576dcfaba4a6b55b9e96098b36cdc7ebefe667dfed48"}, + {file = "PyYAML-6.0.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:8b9c7197f7cb2738065c481a0461e50ad02f18c78cd75775628afb4d7137fb3b"}, + {file = "PyYAML-6.0.2-cp312-cp312-win32.whl", hash = "sha256:ef6107725bd54b262d6dedcc2af448a266975032bc85ef0172c5f059da6325b4"}, + {file = "PyYAML-6.0.2-cp312-cp312-win_amd64.whl", hash = "sha256:7e7401d0de89a9a855c839bc697c079a4af81cf878373abd7dc625847d25cbd8"}, + {file = "PyYAML-6.0.2-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:efdca5630322a10774e8e98e1af481aad470dd62c3170801852d752aa7a783ba"}, + {file = "PyYAML-6.0.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:50187695423ffe49e2deacb8cd10510bc361faac997de9efef88badc3bb9e2d1"}, + {file = "PyYAML-6.0.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0ffe8360bab4910ef1b9e87fb812d8bc0a308b0d0eef8c8f44e0254ab3b07133"}, + {file = "PyYAML-6.0.2-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:17e311b6c678207928d649faa7cb0d7b4c26a0ba73d41e99c4fff6b6c3276484"}, + {file = "PyYAML-6.0.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:70b189594dbe54f75ab3a1acec5f1e3faa7e8cf2f1e08d9b561cb41b845f69d5"}, + {file = "PyYAML-6.0.2-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:41e4e3953a79407c794916fa277a82531dd93aad34e29c2a514c2c0c5fe971cc"}, + {file = "PyYAML-6.0.2-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:68ccc6023a3400877818152ad9a1033e3db8625d899c72eacb5a668902e4d652"}, + {file = "PyYAML-6.0.2-cp313-cp313-win32.whl", hash = "sha256:bc2fa7c6b47d6bc618dd7fb02ef6fdedb1090ec036abab80d4681424b84c1183"}, + {file = "PyYAML-6.0.2-cp313-cp313-win_amd64.whl", hash = "sha256:8388ee1976c416731879ac16da0aff3f63b286ffdd57cdeb95f3f2e085687563"}, + {file = "PyYAML-6.0.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:24471b829b3bf607e04e88d79542a9d48bb037c2267d7927a874e6c205ca7e9a"}, + {file = "PyYAML-6.0.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d7fded462629cfa4b685c5416b949ebad6cec74af5e2d42905d41e257e0869f5"}, + {file = "PyYAML-6.0.2-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d84a1718ee396f54f3a086ea0a66d8e552b2ab2017ef8b420e92edbc841c352d"}, + {file = "PyYAML-6.0.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9056c1ecd25795207ad294bcf39f2db3d845767be0ea6e6a34d856f006006083"}, + {file = "PyYAML-6.0.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:82d09873e40955485746739bcb8b4586983670466c23382c19cffecbf1fd8706"}, + {file = "PyYAML-6.0.2-cp38-cp38-win32.whl", hash = "sha256:43fa96a3ca0d6b1812e01ced1044a003533c47f6ee8aca31724f78e93ccc089a"}, + {file = "PyYAML-6.0.2-cp38-cp38-win_amd64.whl", hash = "sha256:01179a4a8559ab5de078078f37e5c1a30d76bb88519906844fd7bdea1b7729ff"}, + {file = "PyYAML-6.0.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:688ba32a1cffef67fd2e9398a2efebaea461578b0923624778664cc1c914db5d"}, + {file = "PyYAML-6.0.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:a8786accb172bd8afb8be14490a16625cbc387036876ab6ba70912730faf8e1f"}, + {file = "PyYAML-6.0.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d8e03406cac8513435335dbab54c0d385e4a49e4945d2909a581c83647ca0290"}, + {file = "PyYAML-6.0.2-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f753120cb8181e736c57ef7636e83f31b9c0d1722c516f7e86cf15b7aa57ff12"}, + {file = "PyYAML-6.0.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3b1fdb9dc17f5a7677423d508ab4f243a726dea51fa5e70992e59a7411c89d19"}, + {file = "PyYAML-6.0.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:0b69e4ce7a131fe56b7e4d770c67429700908fc0752af059838b1cfb41960e4e"}, + {file = "PyYAML-6.0.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:a9f8c2e67970f13b16084e04f134610fd1d374bf477b17ec1599185cf611d725"}, + {file = "PyYAML-6.0.2-cp39-cp39-win32.whl", hash = "sha256:6395c297d42274772abc367baaa79683958044e5d3835486c16da75d2a694631"}, + {file = "PyYAML-6.0.2-cp39-cp39-win_amd64.whl", hash = "sha256:39693e1f8320ae4f43943590b49779ffb98acb81f788220ea932a6b6c51004d8"}, + {file = "pyyaml-6.0.2.tar.gz", hash = "sha256:d584d9ec91ad65861cc08d42e834324ef890a082e591037abe114850ff7bbc3e"}, +] + +[[package]] +name = "pyzmq" +version = "26.2.0" +description = "Python bindings for 0MQ" +optional = false +python-versions = ">=3.7" +files = [ + {file = "pyzmq-26.2.0-cp310-cp310-macosx_10_15_universal2.whl", hash = "sha256:ddf33d97d2f52d89f6e6e7ae66ee35a4d9ca6f36eda89c24591b0c40205a3629"}, + {file = "pyzmq-26.2.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:dacd995031a01d16eec825bf30802fceb2c3791ef24bcce48fa98ce40918c27b"}, + {file = "pyzmq-26.2.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:89289a5ee32ef6c439086184529ae060c741334b8970a6855ec0b6ad3ff28764"}, + {file = "pyzmq-26.2.0-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:5506f06d7dc6ecf1efacb4a013b1f05071bb24b76350832c96449f4a2d95091c"}, + {file = "pyzmq-26.2.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8ea039387c10202ce304af74def5021e9adc6297067f3441d348d2b633e8166a"}, + {file = "pyzmq-26.2.0-cp310-cp310-manylinux_2_28_x86_64.whl", hash = "sha256:a2224fa4a4c2ee872886ed00a571f5e967c85e078e8e8c2530a2fb01b3309b88"}, + {file = "pyzmq-26.2.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:28ad5233e9c3b52d76196c696e362508959741e1a005fb8fa03b51aea156088f"}, + {file = "pyzmq-26.2.0-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:1c17211bc037c7d88e85ed8b7d8f7e52db6dc8eca5590d162717c654550f7282"}, + {file = "pyzmq-26.2.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:b8f86dd868d41bea9a5f873ee13bf5551c94cf6bc51baebc6f85075971fe6eea"}, + {file = "pyzmq-26.2.0-cp310-cp310-win32.whl", hash = "sha256:46a446c212e58456b23af260f3d9fb785054f3e3653dbf7279d8f2b5546b21c2"}, + {file = "pyzmq-26.2.0-cp310-cp310-win_amd64.whl", hash = "sha256:49d34ab71db5a9c292a7644ce74190b1dd5a3475612eefb1f8be1d6961441971"}, + {file = "pyzmq-26.2.0-cp310-cp310-win_arm64.whl", hash = "sha256:bfa832bfa540e5b5c27dcf5de5d82ebc431b82c453a43d141afb1e5d2de025fa"}, + {file = "pyzmq-26.2.0-cp311-cp311-macosx_10_15_universal2.whl", hash = "sha256:8f7e66c7113c684c2b3f1c83cdd3376103ee0ce4c49ff80a648643e57fb22218"}, + {file = "pyzmq-26.2.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:3a495b30fc91db2db25120df5847d9833af237546fd59170701acd816ccc01c4"}, + {file = "pyzmq-26.2.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:77eb0968da535cba0470a5165468b2cac7772cfb569977cff92e240f57e31bef"}, + {file = "pyzmq-26.2.0-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6ace4f71f1900a548f48407fc9be59c6ba9d9aaf658c2eea6cf2779e72f9f317"}, + {file = "pyzmq-26.2.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:92a78853d7280bffb93df0a4a6a2498cba10ee793cc8076ef797ef2f74d107cf"}, + {file = "pyzmq-26.2.0-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:689c5d781014956a4a6de61d74ba97b23547e431e9e7d64f27d4922ba96e9d6e"}, + {file = "pyzmq-26.2.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:0aca98bc423eb7d153214b2df397c6421ba6373d3397b26c057af3c904452e37"}, + {file = "pyzmq-26.2.0-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:1f3496d76b89d9429a656293744ceca4d2ac2a10ae59b84c1da9b5165f429ad3"}, + {file = "pyzmq-26.2.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:5c2b3bfd4b9689919db068ac6c9911f3fcb231c39f7dd30e3138be94896d18e6"}, + {file = "pyzmq-26.2.0-cp311-cp311-win32.whl", hash = "sha256:eac5174677da084abf378739dbf4ad245661635f1600edd1221f150b165343f4"}, + {file = "pyzmq-26.2.0-cp311-cp311-win_amd64.whl", hash = "sha256:5a509df7d0a83a4b178d0f937ef14286659225ef4e8812e05580776c70e155d5"}, + {file = "pyzmq-26.2.0-cp311-cp311-win_arm64.whl", hash = "sha256:c0e6091b157d48cbe37bd67233318dbb53e1e6327d6fc3bb284afd585d141003"}, + {file = "pyzmq-26.2.0-cp312-cp312-macosx_10_15_universal2.whl", hash = "sha256:ded0fc7d90fe93ae0b18059930086c51e640cdd3baebdc783a695c77f123dcd9"}, + {file = "pyzmq-26.2.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:17bf5a931c7f6618023cdacc7081f3f266aecb68ca692adac015c383a134ca52"}, + {file = "pyzmq-26.2.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:55cf66647e49d4621a7e20c8d13511ef1fe1efbbccf670811864452487007e08"}, + {file = "pyzmq-26.2.0-cp312-cp312-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4661c88db4a9e0f958c8abc2b97472e23061f0bc737f6f6179d7a27024e1faa5"}, + {file = "pyzmq-26.2.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ea7f69de383cb47522c9c208aec6dd17697db7875a4674c4af3f8cfdac0bdeae"}, + {file = "pyzmq-26.2.0-cp312-cp312-manylinux_2_28_x86_64.whl", hash = "sha256:7f98f6dfa8b8ccaf39163ce872bddacca38f6a67289116c8937a02e30bbe9711"}, + {file = "pyzmq-26.2.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:e3e0210287329272539eea617830a6a28161fbbd8a3271bf4150ae3e58c5d0e6"}, + {file = "pyzmq-26.2.0-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:6b274e0762c33c7471f1a7471d1a2085b1a35eba5cdc48d2ae319f28b6fc4de3"}, + {file = "pyzmq-26.2.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:29c6a4635eef69d68a00321e12a7d2559fe2dfccfa8efae3ffb8e91cd0b36a8b"}, + {file = "pyzmq-26.2.0-cp312-cp312-win32.whl", hash = "sha256:989d842dc06dc59feea09e58c74ca3e1678c812a4a8a2a419046d711031f69c7"}, + {file = "pyzmq-26.2.0-cp312-cp312-win_amd64.whl", hash = "sha256:2a50625acdc7801bc6f74698c5c583a491c61d73c6b7ea4dee3901bb99adb27a"}, + {file = "pyzmq-26.2.0-cp312-cp312-win_arm64.whl", hash = "sha256:4d29ab8592b6ad12ebbf92ac2ed2bedcfd1cec192d8e559e2e099f648570e19b"}, + {file = "pyzmq-26.2.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:9dd8cd1aeb00775f527ec60022004d030ddc51d783d056e3e23e74e623e33726"}, + {file = "pyzmq-26.2.0-cp313-cp313-macosx_10_15_universal2.whl", hash = "sha256:28c812d9757fe8acecc910c9ac9dafd2ce968c00f9e619db09e9f8f54c3a68a3"}, + {file = "pyzmq-26.2.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4d80b1dd99c1942f74ed608ddb38b181b87476c6a966a88a950c7dee118fdf50"}, + {file = "pyzmq-26.2.0-cp313-cp313-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:8c997098cc65e3208eca09303630e84d42718620e83b733d0fd69543a9cab9cb"}, + {file = "pyzmq-26.2.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7ad1bc8d1b7a18497dda9600b12dc193c577beb391beae5cd2349184db40f187"}, + {file = "pyzmq-26.2.0-cp313-cp313-manylinux_2_28_x86_64.whl", hash = "sha256:bea2acdd8ea4275e1278350ced63da0b166421928276c7c8e3f9729d7402a57b"}, + {file = "pyzmq-26.2.0-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:23f4aad749d13698f3f7b64aad34f5fc02d6f20f05999eebc96b89b01262fb18"}, + {file = "pyzmq-26.2.0-cp313-cp313-musllinux_1_1_i686.whl", hash = "sha256:a4f96f0d88accc3dbe4a9025f785ba830f968e21e3e2c6321ccdfc9aef755115"}, + {file = "pyzmq-26.2.0-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:ced65e5a985398827cc9276b93ef6dfabe0273c23de8c7931339d7e141c2818e"}, + {file = "pyzmq-26.2.0-cp313-cp313-win32.whl", hash = "sha256:31507f7b47cc1ead1f6e86927f8ebb196a0bab043f6345ce070f412a59bf87b5"}, + {file = "pyzmq-26.2.0-cp313-cp313-win_amd64.whl", hash = "sha256:70fc7fcf0410d16ebdda9b26cbd8bf8d803d220a7f3522e060a69a9c87bf7bad"}, + {file = "pyzmq-26.2.0-cp313-cp313-win_arm64.whl", hash = "sha256:c3789bd5768ab5618ebf09cef6ec2b35fed88709b104351748a63045f0ff9797"}, + {file = "pyzmq-26.2.0-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:034da5fc55d9f8da09015d368f519478a52675e558c989bfcb5cf6d4e16a7d2a"}, + {file = "pyzmq-26.2.0-cp313-cp313t-macosx_10_15_universal2.whl", hash = "sha256:c92d73464b886931308ccc45b2744e5968cbaade0b1d6aeb40d8ab537765f5bc"}, + {file = "pyzmq-26.2.0-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:794a4562dcb374f7dbbfb3f51d28fb40123b5a2abadee7b4091f93054909add5"}, + {file = "pyzmq-26.2.0-cp313-cp313t-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:aee22939bb6075e7afededabad1a56a905da0b3c4e3e0c45e75810ebe3a52672"}, + {file = "pyzmq-26.2.0-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2ae90ff9dad33a1cfe947d2c40cb9cb5e600d759ac4f0fd22616ce6540f72797"}, + {file = "pyzmq-26.2.0-cp313-cp313t-manylinux_2_28_x86_64.whl", hash = "sha256:43a47408ac52647dfabbc66a25b05b6a61700b5165807e3fbd40063fcaf46386"}, + {file = "pyzmq-26.2.0-cp313-cp313t-musllinux_1_1_aarch64.whl", hash = "sha256:25bf2374a2a8433633c65ccb9553350d5e17e60c8eb4de4d92cc6bd60f01d306"}, + {file = "pyzmq-26.2.0-cp313-cp313t-musllinux_1_1_i686.whl", hash = "sha256:007137c9ac9ad5ea21e6ad97d3489af654381324d5d3ba614c323f60dab8fae6"}, + {file = "pyzmq-26.2.0-cp313-cp313t-musllinux_1_1_x86_64.whl", hash = "sha256:470d4a4f6d48fb34e92d768b4e8a5cc3780db0d69107abf1cd7ff734b9766eb0"}, + {file = "pyzmq-26.2.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:3b55a4229ce5da9497dd0452b914556ae58e96a4381bb6f59f1305dfd7e53fc8"}, + {file = "pyzmq-26.2.0-cp37-cp37m-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:9cb3a6460cdea8fe8194a76de8895707e61ded10ad0be97188cc8463ffa7e3a8"}, + {file = "pyzmq-26.2.0-cp37-cp37m-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:8ab5cad923cc95c87bffee098a27856c859bd5d0af31bd346035aa816b081fe1"}, + {file = "pyzmq-26.2.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9ed69074a610fad1c2fda66180e7b2edd4d31c53f2d1872bc2d1211563904cd9"}, + {file = "pyzmq-26.2.0-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:cccba051221b916a4f5e538997c45d7d136a5646442b1231b916d0164067ea27"}, + {file = "pyzmq-26.2.0-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:0eaa83fc4c1e271c24eaf8fb083cbccef8fde77ec8cd45f3c35a9a123e6da097"}, + {file = "pyzmq-26.2.0-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:9edda2df81daa129b25a39b86cb57dfdfe16f7ec15b42b19bfac503360d27a93"}, + {file = "pyzmq-26.2.0-cp37-cp37m-win32.whl", hash = "sha256:ea0eb6af8a17fa272f7b98d7bebfab7836a0d62738e16ba380f440fceca2d951"}, + {file = "pyzmq-26.2.0-cp37-cp37m-win_amd64.whl", hash = "sha256:4ff9dc6bc1664bb9eec25cd17506ef6672d506115095411e237d571e92a58231"}, + {file = "pyzmq-26.2.0-cp38-cp38-macosx_10_15_universal2.whl", hash = "sha256:2eb7735ee73ca1b0d71e0e67c3739c689067f055c764f73aac4cc8ecf958ee3f"}, + {file = "pyzmq-26.2.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:1a534f43bc738181aa7cbbaf48e3eca62c76453a40a746ab95d4b27b1111a7d2"}, + {file = "pyzmq-26.2.0-cp38-cp38-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:aedd5dd8692635813368e558a05266b995d3d020b23e49581ddd5bbe197a8ab6"}, + {file = "pyzmq-26.2.0-cp38-cp38-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:8be4700cd8bb02cc454f630dcdf7cfa99de96788b80c51b60fe2fe1dac480289"}, + {file = "pyzmq-26.2.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1fcc03fa4997c447dce58264e93b5aa2d57714fbe0f06c07b7785ae131512732"}, + {file = "pyzmq-26.2.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:402b190912935d3db15b03e8f7485812db350d271b284ded2b80d2e5704be780"}, + {file = "pyzmq-26.2.0-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:8685fa9c25ff00f550c1fec650430c4b71e4e48e8d852f7ddcf2e48308038640"}, + {file = "pyzmq-26.2.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:76589c020680778f06b7e0b193f4b6dd66d470234a16e1df90329f5e14a171cd"}, + {file = "pyzmq-26.2.0-cp38-cp38-win32.whl", hash = "sha256:8423c1877d72c041f2c263b1ec6e34360448decfb323fa8b94e85883043ef988"}, + {file = "pyzmq-26.2.0-cp38-cp38-win_amd64.whl", hash = "sha256:76589f2cd6b77b5bdea4fca5992dc1c23389d68b18ccc26a53680ba2dc80ff2f"}, + {file = "pyzmq-26.2.0-cp39-cp39-macosx_10_15_universal2.whl", hash = "sha256:b1d464cb8d72bfc1a3adc53305a63a8e0cac6bc8c5a07e8ca190ab8d3faa43c2"}, + {file = "pyzmq-26.2.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:4da04c48873a6abdd71811c5e163bd656ee1b957971db7f35140a2d573f6949c"}, + {file = "pyzmq-26.2.0-cp39-cp39-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:d049df610ac811dcffdc147153b414147428567fbbc8be43bb8885f04db39d98"}, + {file = "pyzmq-26.2.0-cp39-cp39-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:05590cdbc6b902101d0e65d6a4780af14dc22914cc6ab995d99b85af45362cc9"}, + {file = "pyzmq-26.2.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c811cfcd6a9bf680236c40c6f617187515269ab2912f3d7e8c0174898e2519db"}, + {file = "pyzmq-26.2.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:6835dd60355593de10350394242b5757fbbd88b25287314316f266e24c61d073"}, + {file = "pyzmq-26.2.0-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:bc6bee759a6bddea5db78d7dcd609397449cb2d2d6587f48f3ca613b19410cfc"}, + {file = "pyzmq-26.2.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:c530e1eecd036ecc83c3407f77bb86feb79916d4a33d11394b8234f3bd35b940"}, + {file = "pyzmq-26.2.0-cp39-cp39-win32.whl", hash = "sha256:367b4f689786fca726ef7a6c5ba606958b145b9340a5e4808132cc65759abd44"}, + {file = "pyzmq-26.2.0-cp39-cp39-win_amd64.whl", hash = "sha256:e6fa2e3e683f34aea77de8112f6483803c96a44fd726d7358b9888ae5bb394ec"}, + {file = "pyzmq-26.2.0-cp39-cp39-win_arm64.whl", hash = "sha256:7445be39143a8aa4faec43b076e06944b8f9d0701b669df4af200531b21e40bb"}, + {file = "pyzmq-26.2.0-pp310-pypy310_pp73-macosx_10_15_x86_64.whl", hash = "sha256:706e794564bec25819d21a41c31d4df2d48e1cc4b061e8d345d7fb4dd3e94072"}, + {file = "pyzmq-26.2.0-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8b435f2753621cd36e7c1762156815e21c985c72b19135dac43a7f4f31d28dd1"}, + {file = "pyzmq-26.2.0-pp310-pypy310_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:160c7e0a5eb178011e72892f99f918c04a131f36056d10d9c1afb223fc952c2d"}, + {file = "pyzmq-26.2.0-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2c4a71d5d6e7b28a47a394c0471b7e77a0661e2d651e7ae91e0cab0a587859ca"}, + {file = "pyzmq-26.2.0-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:90412f2db8c02a3864cbfc67db0e3dcdbda336acf1c469526d3e869394fe001c"}, + {file = "pyzmq-26.2.0-pp37-pypy37_pp73-macosx_10_9_x86_64.whl", hash = "sha256:2ea4ad4e6a12e454de05f2949d4beddb52460f3de7c8b9d5c46fbb7d7222e02c"}, + {file = "pyzmq-26.2.0-pp37-pypy37_pp73-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:fc4f7a173a5609631bb0c42c23d12c49df3966f89f496a51d3eb0ec81f4519d6"}, + {file = "pyzmq-26.2.0-pp37-pypy37_pp73-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:878206a45202247781472a2d99df12a176fef806ca175799e1c6ad263510d57c"}, + {file = "pyzmq-26.2.0-pp37-pypy37_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:17c412bad2eb9468e876f556eb4ee910e62d721d2c7a53c7fa31e643d35352e6"}, + {file = "pyzmq-26.2.0-pp37-pypy37_pp73-win_amd64.whl", hash = "sha256:0d987a3ae5a71c6226b203cfd298720e0086c7fe7c74f35fa8edddfbd6597eed"}, + {file = "pyzmq-26.2.0-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:39887ac397ff35b7b775db7201095fc6310a35fdbae85bac4523f7eb3b840e20"}, + {file = "pyzmq-26.2.0-pp38-pypy38_pp73-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:fdb5b3e311d4d4b0eb8b3e8b4d1b0a512713ad7e6a68791d0923d1aec433d919"}, + {file = "pyzmq-26.2.0-pp38-pypy38_pp73-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:226af7dcb51fdb0109f0016449b357e182ea0ceb6b47dfb5999d569e5db161d5"}, + {file = "pyzmq-26.2.0-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0bed0e799e6120b9c32756203fb9dfe8ca2fb8467fed830c34c877e25638c3fc"}, + {file = "pyzmq-26.2.0-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:29c7947c594e105cb9e6c466bace8532dc1ca02d498684128b339799f5248277"}, + {file = "pyzmq-26.2.0-pp39-pypy39_pp73-macosx_10_15_x86_64.whl", hash = "sha256:cdeabcff45d1c219636ee2e54d852262e5c2e085d6cb476d938aee8d921356b3"}, + {file = "pyzmq-26.2.0-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:35cffef589bcdc587d06f9149f8d5e9e8859920a071df5a2671de2213bef592a"}, + {file = "pyzmq-26.2.0-pp39-pypy39_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:18c8dc3b7468d8b4bdf60ce9d7141897da103c7a4690157b32b60acb45e333e6"}, + {file = "pyzmq-26.2.0-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7133d0a1677aec369d67dd78520d3fa96dd7f3dcec99d66c1762870e5ea1a50a"}, + {file = "pyzmq-26.2.0-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:6a96179a24b14fa6428cbfc08641c779a53f8fcec43644030328f44034c7f1f4"}, + {file = "pyzmq-26.2.0-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:4f78c88905461a9203eac9faac157a2a0dbba84a0fd09fd29315db27be40af9f"}, + {file = "pyzmq-26.2.0.tar.gz", hash = "sha256:070672c258581c8e4f640b5159297580a9974b026043bd4ab0470be9ed324f1f"}, +] + +[package.dependencies] +cffi = {version = "*", markers = "implementation_name == \"pypy\""} + +[[package]] +name = "regex" +version = "2024.11.6" +description = "Alternative regular expression module, to replace re." +optional = false +python-versions = ">=3.8" +files = [ + {file = "regex-2024.11.6-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:ff590880083d60acc0433f9c3f713c51f7ac6ebb9adf889c79a261ecf541aa91"}, + {file = "regex-2024.11.6-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:658f90550f38270639e83ce492f27d2c8d2cd63805c65a13a14d36ca126753f0"}, + {file = "regex-2024.11.6-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:164d8b7b3b4bcb2068b97428060b2a53be050085ef94eca7f240e7947f1b080e"}, + {file = "regex-2024.11.6-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d3660c82f209655a06b587d55e723f0b813d3a7db2e32e5e7dc64ac2a9e86fde"}, + {file = "regex-2024.11.6-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d22326fcdef5e08c154280b71163ced384b428343ae16a5ab2b3354aed12436e"}, + {file = "regex-2024.11.6-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f1ac758ef6aebfc8943560194e9fd0fa18bcb34d89fd8bd2af18183afd8da3a2"}, + {file = "regex-2024.11.6-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:997d6a487ff00807ba810e0f8332c18b4eb8d29463cfb7c820dc4b6e7562d0cf"}, + {file = "regex-2024.11.6-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:02a02d2bb04fec86ad61f3ea7f49c015a0681bf76abb9857f945d26159d2968c"}, + {file = "regex-2024.11.6-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:f02f93b92358ee3f78660e43b4b0091229260c5d5c408d17d60bf26b6c900e86"}, + {file = "regex-2024.11.6-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:06eb1be98df10e81ebaded73fcd51989dcf534e3c753466e4b60c4697a003b67"}, + {file = "regex-2024.11.6-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:040df6fe1a5504eb0f04f048e6d09cd7c7110fef851d7c567a6b6e09942feb7d"}, + {file = "regex-2024.11.6-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:fdabbfc59f2c6edba2a6622c647b716e34e8e3867e0ab975412c5c2f79b82da2"}, + {file = "regex-2024.11.6-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:8447d2d39b5abe381419319f942de20b7ecd60ce86f16a23b0698f22e1b70008"}, + {file = "regex-2024.11.6-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:da8f5fc57d1933de22a9e23eec290a0d8a5927a5370d24bda9a6abe50683fe62"}, + {file = "regex-2024.11.6-cp310-cp310-win32.whl", hash = "sha256:b489578720afb782f6ccf2840920f3a32e31ba28a4b162e13900c3e6bd3f930e"}, + {file = "regex-2024.11.6-cp310-cp310-win_amd64.whl", hash = "sha256:5071b2093e793357c9d8b2929dfc13ac5f0a6c650559503bb81189d0a3814519"}, + {file = "regex-2024.11.6-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:5478c6962ad548b54a591778e93cd7c456a7a29f8eca9c49e4f9a806dcc5d638"}, + {file = "regex-2024.11.6-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:2c89a8cc122b25ce6945f0423dc1352cb9593c68abd19223eebbd4e56612c5b7"}, + {file = "regex-2024.11.6-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:94d87b689cdd831934fa3ce16cc15cd65748e6d689f5d2b8f4f4df2065c9fa20"}, + {file = "regex-2024.11.6-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1062b39a0a2b75a9c694f7a08e7183a80c63c0d62b301418ffd9c35f55aaa114"}, + {file = "regex-2024.11.6-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:167ed4852351d8a750da48712c3930b031f6efdaa0f22fa1933716bfcd6bf4a3"}, + {file = "regex-2024.11.6-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2d548dafee61f06ebdb584080621f3e0c23fff312f0de1afc776e2a2ba99a74f"}, + {file = "regex-2024.11.6-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f2a19f302cd1ce5dd01a9099aaa19cae6173306d1302a43b627f62e21cf18ac0"}, + {file = "regex-2024.11.6-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:bec9931dfb61ddd8ef2ebc05646293812cb6b16b60cf7c9511a832b6f1854b55"}, + {file = "regex-2024.11.6-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:9714398225f299aa85267fd222f7142fcb5c769e73d7733344efc46f2ef5cf89"}, + {file = "regex-2024.11.6-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:202eb32e89f60fc147a41e55cb086db2a3f8cb82f9a9a88440dcfc5d37faae8d"}, + {file = "regex-2024.11.6-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:4181b814e56078e9b00427ca358ec44333765f5ca1b45597ec7446d3a1ef6e34"}, + {file = "regex-2024.11.6-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:068376da5a7e4da51968ce4c122a7cd31afaaec4fccc7856c92f63876e57b51d"}, + {file = "regex-2024.11.6-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:ac10f2c4184420d881a3475fb2c6f4d95d53a8d50209a2500723d831036f7c45"}, + {file = "regex-2024.11.6-cp311-cp311-win32.whl", hash = "sha256:c36f9b6f5f8649bb251a5f3f66564438977b7ef8386a52460ae77e6070d309d9"}, + {file = "regex-2024.11.6-cp311-cp311-win_amd64.whl", hash = "sha256:02e28184be537f0e75c1f9b2f8847dc51e08e6e171c6bde130b2687e0c33cf60"}, + {file = "regex-2024.11.6-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:52fb28f528778f184f870b7cf8f225f5eef0a8f6e3778529bdd40c7b3920796a"}, + {file = "regex-2024.11.6-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:fdd6028445d2460f33136c55eeb1f601ab06d74cb3347132e1c24250187500d9"}, + {file = "regex-2024.11.6-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:805e6b60c54bf766b251e94526ebad60b7de0c70f70a4e6210ee2891acb70bf2"}, + {file = "regex-2024.11.6-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b85c2530be953a890eaffde05485238f07029600e8f098cdf1848d414a8b45e4"}, + {file = "regex-2024.11.6-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:bb26437975da7dc36b7efad18aa9dd4ea569d2357ae6b783bf1118dabd9ea577"}, + {file = "regex-2024.11.6-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:abfa5080c374a76a251ba60683242bc17eeb2c9818d0d30117b4486be10c59d3"}, + {file = "regex-2024.11.6-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:70b7fa6606c2881c1db9479b0eaa11ed5dfa11c8d60a474ff0e095099f39d98e"}, + {file = "regex-2024.11.6-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0c32f75920cf99fe6b6c539c399a4a128452eaf1af27f39bce8909c9a3fd8cbe"}, + {file = "regex-2024.11.6-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:982e6d21414e78e1f51cf595d7f321dcd14de1f2881c5dc6a6e23bbbbd68435e"}, + {file = "regex-2024.11.6-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:a7c2155f790e2fb448faed6dd241386719802296ec588a8b9051c1f5c481bc29"}, + {file = "regex-2024.11.6-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:149f5008d286636e48cd0b1dd65018548944e495b0265b45e1bffecce1ef7f39"}, + {file = "regex-2024.11.6-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:e5364a4502efca094731680e80009632ad6624084aff9a23ce8c8c6820de3e51"}, + {file = "regex-2024.11.6-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:0a86e7eeca091c09e021db8eb72d54751e527fa47b8d5787caf96d9831bd02ad"}, + {file = "regex-2024.11.6-cp312-cp312-win32.whl", hash = "sha256:32f9a4c643baad4efa81d549c2aadefaeba12249b2adc5af541759237eee1c54"}, + {file = "regex-2024.11.6-cp312-cp312-win_amd64.whl", hash = "sha256:a93c194e2df18f7d264092dc8539b8ffb86b45b899ab976aa15d48214138e81b"}, + {file = "regex-2024.11.6-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:a6ba92c0bcdf96cbf43a12c717eae4bc98325ca3730f6b130ffa2e3c3c723d84"}, + {file = "regex-2024.11.6-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:525eab0b789891ac3be914d36893bdf972d483fe66551f79d3e27146191a37d4"}, + {file = "regex-2024.11.6-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:086a27a0b4ca227941700e0b31425e7a28ef1ae8e5e05a33826e17e47fbfdba0"}, + {file = "regex-2024.11.6-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bde01f35767c4a7899b7eb6e823b125a64de314a8ee9791367c9a34d56af18d0"}, + {file = "regex-2024.11.6-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b583904576650166b3d920d2bcce13971f6f9e9a396c673187f49811b2769dc7"}, + {file = "regex-2024.11.6-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:1c4de13f06a0d54fa0d5ab1b7138bfa0d883220965a29616e3ea61b35d5f5fc7"}, + {file = "regex-2024.11.6-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3cde6e9f2580eb1665965ce9bf17ff4952f34f5b126beb509fee8f4e994f143c"}, + {file = "regex-2024.11.6-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0d7f453dca13f40a02b79636a339c5b62b670141e63efd511d3f8f73fba162b3"}, + {file = "regex-2024.11.6-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:59dfe1ed21aea057a65c6b586afd2a945de04fc7db3de0a6e3ed5397ad491b07"}, + {file = "regex-2024.11.6-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:b97c1e0bd37c5cd7902e65f410779d39eeda155800b65fc4d04cc432efa9bc6e"}, + {file = "regex-2024.11.6-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:f9d1e379028e0fc2ae3654bac3cbbef81bf3fd571272a42d56c24007979bafb6"}, + {file = "regex-2024.11.6-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:13291b39131e2d002a7940fb176e120bec5145f3aeb7621be6534e46251912c4"}, + {file = "regex-2024.11.6-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:4f51f88c126370dcec4908576c5a627220da6c09d0bff31cfa89f2523843316d"}, + {file = "regex-2024.11.6-cp313-cp313-win32.whl", hash = "sha256:63b13cfd72e9601125027202cad74995ab26921d8cd935c25f09c630436348ff"}, + {file = "regex-2024.11.6-cp313-cp313-win_amd64.whl", hash = "sha256:2b3361af3198667e99927da8b84c1b010752fa4b1115ee30beaa332cabc3ef1a"}, + {file = "regex-2024.11.6-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:3a51ccc315653ba012774efca4f23d1d2a8a8f278a6072e29c7147eee7da446b"}, + {file = "regex-2024.11.6-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:ad182d02e40de7459b73155deb8996bbd8e96852267879396fb274e8700190e3"}, + {file = "regex-2024.11.6-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:ba9b72e5643641b7d41fa1f6d5abda2c9a263ae835b917348fc3c928182ad467"}, + {file = "regex-2024.11.6-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:40291b1b89ca6ad8d3f2b82782cc33807f1406cf68c8d440861da6304d8ffbbd"}, + {file = "regex-2024.11.6-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:cdf58d0e516ee426a48f7b2c03a332a4114420716d55769ff7108c37a09951bf"}, + {file = "regex-2024.11.6-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a36fdf2af13c2b14738f6e973aba563623cb77d753bbbd8d414d18bfaa3105dd"}, + {file = "regex-2024.11.6-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d1cee317bfc014c2419a76bcc87f071405e3966da434e03e13beb45f8aced1a6"}, + {file = "regex-2024.11.6-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:50153825ee016b91549962f970d6a4442fa106832e14c918acd1c8e479916c4f"}, + {file = "regex-2024.11.6-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:ea1bfda2f7162605f6e8178223576856b3d791109f15ea99a9f95c16a7636fb5"}, + {file = "regex-2024.11.6-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:df951c5f4a1b1910f1a99ff42c473ff60f8225baa1cdd3539fe2819d9543e9df"}, + {file = "regex-2024.11.6-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:072623554418a9911446278f16ecb398fb3b540147a7828c06e2011fa531e773"}, + {file = "regex-2024.11.6-cp38-cp38-musllinux_1_2_ppc64le.whl", hash = "sha256:f654882311409afb1d780b940234208a252322c24a93b442ca714d119e68086c"}, + {file = "regex-2024.11.6-cp38-cp38-musllinux_1_2_s390x.whl", hash = "sha256:89d75e7293d2b3e674db7d4d9b1bee7f8f3d1609428e293771d1a962617150cc"}, + {file = "regex-2024.11.6-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:f65557897fc977a44ab205ea871b690adaef6b9da6afda4790a2484b04293a5f"}, + {file = "regex-2024.11.6-cp38-cp38-win32.whl", hash = "sha256:6f44ec28b1f858c98d3036ad5d7d0bfc568bdd7a74f9c24e25f41ef1ebfd81a4"}, + {file = "regex-2024.11.6-cp38-cp38-win_amd64.whl", hash = "sha256:bb8f74f2f10dbf13a0be8de623ba4f9491faf58c24064f32b65679b021ed0001"}, + {file = "regex-2024.11.6-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:5704e174f8ccab2026bd2f1ab6c510345ae8eac818b613d7d73e785f1310f839"}, + {file = "regex-2024.11.6-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:220902c3c5cc6af55d4fe19ead504de80eb91f786dc102fbd74894b1551f095e"}, + {file = "regex-2024.11.6-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:5e7e351589da0850c125f1600a4c4ba3c722efefe16b297de54300f08d734fbf"}, + {file = "regex-2024.11.6-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5056b185ca113c88e18223183aa1a50e66507769c9640a6ff75859619d73957b"}, + {file = "regex-2024.11.6-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:2e34b51b650b23ed3354b5a07aab37034d9f923db2a40519139af34f485f77d0"}, + {file = "regex-2024.11.6-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5670bce7b200273eee1840ef307bfa07cda90b38ae56e9a6ebcc9f50da9c469b"}, + {file = "regex-2024.11.6-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:08986dce1339bc932923e7d1232ce9881499a0e02925f7402fb7c982515419ef"}, + {file = "regex-2024.11.6-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:93c0b12d3d3bc25af4ebbf38f9ee780a487e8bf6954c115b9f015822d3bb8e48"}, + {file = "regex-2024.11.6-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:764e71f22ab3b305e7f4c21f1a97e1526a25ebdd22513e251cf376760213da13"}, + {file = "regex-2024.11.6-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:f056bf21105c2515c32372bbc057f43eb02aae2fda61052e2f7622c801f0b4e2"}, + {file = "regex-2024.11.6-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:69ab78f848845569401469da20df3e081e6b5a11cb086de3eed1d48f5ed57c95"}, + {file = "regex-2024.11.6-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:86fddba590aad9208e2fa8b43b4c098bb0ec74f15718bb6a704e3c63e2cef3e9"}, + {file = "regex-2024.11.6-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:684d7a212682996d21ca12ef3c17353c021fe9de6049e19ac8481ec35574a70f"}, + {file = "regex-2024.11.6-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:a03e02f48cd1abbd9f3b7e3586d97c8f7a9721c436f51a5245b3b9483044480b"}, + {file = "regex-2024.11.6-cp39-cp39-win32.whl", hash = "sha256:41758407fc32d5c3c5de163888068cfee69cb4c2be844e7ac517a52770f9af57"}, + {file = "regex-2024.11.6-cp39-cp39-win_amd64.whl", hash = "sha256:b2837718570f95dd41675328e111345f9b7095d821bac435aac173ac80b19983"}, + {file = "regex-2024.11.6.tar.gz", hash = "sha256:7ab159b063c52a0333c884e4679f8d7a85112ee3078fe3d9004b2dd875585519"}, +] + +[[package]] +name = "requests" +version = "2.32.3" +description = "Python HTTP for Humans." +optional = false +python-versions = ">=3.8" +files = [ + {file = "requests-2.32.3-py3-none-any.whl", hash = "sha256:70761cfe03c773ceb22aa2f671b4757976145175cdfca038c02654d061d6dcc6"}, + {file = "requests-2.32.3.tar.gz", hash = "sha256:55365417734eb18255590a9ff9eb97e9e1da868d4ccd6402399eaf68af20a760"}, +] + +[package.dependencies] +certifi = ">=2017.4.17" +charset-normalizer = ">=2,<4" +idna = ">=2.5,<4" +urllib3 = ">=1.21.1,<3" + +[package.extras] +socks = ["PySocks (>=1.5.6,!=1.5.7)"] +use-chardet-on-py3 = ["chardet (>=3.0.2,<6)"] + +[[package]] +name = "requests-toolbelt" +version = "1.0.0" +description = "A utility belt for advanced users of python-requests" +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" +files = [ + {file = "requests-toolbelt-1.0.0.tar.gz", hash = "sha256:7681a0a3d047012b5bdc0ee37d7f8f07ebe76ab08caeccfc3921ce23c88d5bc6"}, + {file = "requests_toolbelt-1.0.0-py2.py3-none-any.whl", hash = "sha256:cccfdd665f0a24fcf4726e690f65639d272bb0637b9b92dfd91a5568ccf6bd06"}, +] + +[package.dependencies] +requests = ">=2.0.1,<3.0.0" + +[[package]] +name = "rsa" +version = "4.9" +description = "Pure-Python RSA implementation" +optional = false +python-versions = ">=3.6,<4" +files = [ + {file = "rsa-4.9-py3-none-any.whl", hash = "sha256:90260d9058e514786967344d0ef75fa8727eed8a7d2e43ce9f4bcf1b536174f7"}, + {file = "rsa-4.9.tar.gz", hash = "sha256:e38464a49c6c85d7f1351b0126661487a7e0a14a50f1675ec50eb34d4f20ef21"}, +] + +[package.dependencies] +pyasn1 = ">=0.1.3" + +[[package]] +name = "s3transfer" +version = "0.10.3" +description = "An Amazon S3 Transfer Manager" +optional = false +python-versions = ">=3.8" +files = [ + {file = "s3transfer-0.10.3-py3-none-any.whl", hash = "sha256:263ed587a5803c6c708d3ce44dc4dfedaab4c1a32e8329bab818933d79ddcf5d"}, + {file = "s3transfer-0.10.3.tar.gz", hash = "sha256:4f50ed74ab84d474ce614475e0b8d5047ff080810aac5d01ea25231cfc944b0c"}, +] + +[package.dependencies] +botocore = ">=1.33.2,<2.0a.0" + +[package.extras] +crt = ["botocore[crt] (>=1.33.2,<2.0a.0)"] + +[[package]] +name = "safetensors" +version = "0.4.5" +description = "" +optional = false +python-versions = ">=3.7" +files = [ + {file = "safetensors-0.4.5-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:a63eaccd22243c67e4f2b1c3e258b257effc4acd78f3b9d397edc8cf8f1298a7"}, + {file = "safetensors-0.4.5-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:23fc9b4ec7b602915cbb4ec1a7c1ad96d2743c322f20ab709e2c35d1b66dad27"}, + {file = "safetensors-0.4.5-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6885016f34bef80ea1085b7e99b3c1f92cb1be78a49839203060f67b40aee761"}, + {file = "safetensors-0.4.5-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:133620f443450429322f238fda74d512c4008621227fccf2f8cf4a76206fea7c"}, + {file = "safetensors-0.4.5-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4fb3e0609ec12d2a77e882f07cced530b8262027f64b75d399f1504ffec0ba56"}, + {file = "safetensors-0.4.5-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d0f1dd769f064adc33831f5e97ad07babbd728427f98e3e1db6902e369122737"}, + {file = "safetensors-0.4.5-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c6d156bdb26732feada84f9388a9f135528c1ef5b05fae153da365ad4319c4c5"}, + {file = "safetensors-0.4.5-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:9e347d77e2c77eb7624400ccd09bed69d35c0332f417ce8c048d404a096c593b"}, + {file = "safetensors-0.4.5-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:9f556eea3aec1d3d955403159fe2123ddd68e880f83954ee9b4a3f2e15e716b6"}, + {file = "safetensors-0.4.5-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:9483f42be3b6bc8ff77dd67302de8ae411c4db39f7224dec66b0eb95822e4163"}, + {file = "safetensors-0.4.5-cp310-none-win32.whl", hash = "sha256:7389129c03fadd1ccc37fd1ebbc773f2b031483b04700923c3511d2a939252cc"}, + {file = "safetensors-0.4.5-cp310-none-win_amd64.whl", hash = "sha256:e98ef5524f8b6620c8cdef97220c0b6a5c1cef69852fcd2f174bb96c2bb316b1"}, + {file = "safetensors-0.4.5-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:21f848d7aebd5954f92538552d6d75f7c1b4500f51664078b5b49720d180e47c"}, + {file = "safetensors-0.4.5-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:bb07000b19d41e35eecef9a454f31a8b4718a185293f0d0b1c4b61d6e4487971"}, + {file = "safetensors-0.4.5-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:09dedf7c2fda934ee68143202acff6e9e8eb0ddeeb4cfc24182bef999efa9f42"}, + {file = "safetensors-0.4.5-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:59b77e4b7a708988d84f26de3ebead61ef1659c73dcbc9946c18f3b1786d2688"}, + {file = "safetensors-0.4.5-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5d3bc83e14d67adc2e9387e511097f254bd1b43c3020440e708858c684cbac68"}, + {file = "safetensors-0.4.5-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:39371fc551c1072976073ab258c3119395294cf49cdc1f8476794627de3130df"}, + {file = "safetensors-0.4.5-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a6c19feda32b931cae0acd42748a670bdf56bee6476a046af20181ad3fee4090"}, + {file = "safetensors-0.4.5-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:a659467495de201e2f282063808a41170448c78bada1e62707b07a27b05e6943"}, + {file = "safetensors-0.4.5-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:bad5e4b2476949bcd638a89f71b6916fa9a5cae5c1ae7eede337aca2100435c0"}, + {file = "safetensors-0.4.5-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:a3a315a6d0054bc6889a17f5668a73f94f7fe55121ff59e0a199e3519c08565f"}, + {file = "safetensors-0.4.5-cp311-none-win32.whl", hash = "sha256:a01e232e6d3d5cf8b1667bc3b657a77bdab73f0743c26c1d3c5dd7ce86bd3a92"}, + {file = "safetensors-0.4.5-cp311-none-win_amd64.whl", hash = "sha256:cbd39cae1ad3e3ef6f63a6f07296b080c951f24cec60188378e43d3713000c04"}, + {file = "safetensors-0.4.5-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:473300314e026bd1043cef391bb16a8689453363381561b8a3e443870937cc1e"}, + {file = "safetensors-0.4.5-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:801183a0f76dc647f51a2d9141ad341f9665602a7899a693207a82fb102cc53e"}, + {file = "safetensors-0.4.5-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1524b54246e422ad6fb6aea1ac71edeeb77666efa67230e1faf6999df9b2e27f"}, + {file = "safetensors-0.4.5-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:b3139098e3e8b2ad7afbca96d30ad29157b50c90861084e69fcb80dec7430461"}, + {file = "safetensors-0.4.5-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:65573dc35be9059770808e276b017256fa30058802c29e1038eb1c00028502ea"}, + {file = "safetensors-0.4.5-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:fd33da8e9407559f8779c82a0448e2133737f922d71f884da27184549416bfed"}, + {file = "safetensors-0.4.5-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3685ce7ed036f916316b567152482b7e959dc754fcc4a8342333d222e05f407c"}, + {file = "safetensors-0.4.5-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:dde2bf390d25f67908278d6f5d59e46211ef98e44108727084d4637ee70ab4f1"}, + {file = "safetensors-0.4.5-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:7469d70d3de970b1698d47c11ebbf296a308702cbaae7fcb993944751cf985f4"}, + {file = "safetensors-0.4.5-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:3a6ba28118636a130ccbb968bc33d4684c48678695dba2590169d5ab03a45646"}, + {file = "safetensors-0.4.5-cp312-none-win32.whl", hash = "sha256:c859c7ed90b0047f58ee27751c8e56951452ed36a67afee1b0a87847d065eec6"}, + {file = "safetensors-0.4.5-cp312-none-win_amd64.whl", hash = "sha256:b5a8810ad6a6f933fff6c276eae92c1da217b39b4d8b1bc1c0b8af2d270dc532"}, + {file = "safetensors-0.4.5-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:25e5f8e2e92a74f05b4ca55686234c32aac19927903792b30ee6d7bd5653d54e"}, + {file = "safetensors-0.4.5-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:81efb124b58af39fcd684254c645e35692fea81c51627259cdf6d67ff4458916"}, + {file = "safetensors-0.4.5-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:585f1703a518b437f5103aa9cf70e9bd437cb78eea9c51024329e4fb8a3e3679"}, + {file = "safetensors-0.4.5-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:4b99fbf72e3faf0b2f5f16e5e3458b93b7d0a83984fe8d5364c60aa169f2da89"}, + {file = "safetensors-0.4.5-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b17b299ca9966ca983ecda1c0791a3f07f9ca6ab5ded8ef3d283fff45f6bcd5f"}, + {file = "safetensors-0.4.5-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:76ded72f69209c9780fdb23ea89e56d35c54ae6abcdec67ccb22af8e696e449a"}, + {file = "safetensors-0.4.5-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2783956926303dcfeb1de91a4d1204cd4089ab441e622e7caee0642281109db3"}, + {file = "safetensors-0.4.5-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:d94581aab8c6b204def4d7320f07534d6ee34cd4855688004a4354e63b639a35"}, + {file = "safetensors-0.4.5-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:67e1e7cb8678bb1b37ac48ec0df04faf689e2f4e9e81e566b5c63d9f23748523"}, + {file = "safetensors-0.4.5-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:dbd280b07e6054ea68b0cb4b16ad9703e7d63cd6890f577cb98acc5354780142"}, + {file = "safetensors-0.4.5-cp37-cp37m-macosx_10_12_x86_64.whl", hash = "sha256:77d9b228da8374c7262046a36c1f656ba32a93df6cc51cd4453af932011e77f1"}, + {file = "safetensors-0.4.5-cp37-cp37m-macosx_11_0_arm64.whl", hash = "sha256:500cac01d50b301ab7bb192353317035011c5ceeef0fca652f9f43c000bb7f8d"}, + {file = "safetensors-0.4.5-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:75331c0c746f03158ded32465b7d0b0e24c5a22121743662a2393439c43a45cf"}, + {file = "safetensors-0.4.5-cp37-cp37m-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:670e95fe34e0d591d0529e5e59fd9d3d72bc77b1444fcaa14dccda4f36b5a38b"}, + {file = "safetensors-0.4.5-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:098923e2574ff237c517d6e840acada8e5b311cb1fa226019105ed82e9c3b62f"}, + {file = "safetensors-0.4.5-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:13ca0902d2648775089fa6a0c8fc9e6390c5f8ee576517d33f9261656f851e3f"}, + {file = "safetensors-0.4.5-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5f0032bedc869c56f8d26259fe39cd21c5199cd57f2228d817a0e23e8370af25"}, + {file = "safetensors-0.4.5-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:f4b15f51b4f8f2a512341d9ce3475cacc19c5fdfc5db1f0e19449e75f95c7dc8"}, + {file = "safetensors-0.4.5-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:f6594d130d0ad933d885c6a7b75c5183cb0e8450f799b80a39eae2b8508955eb"}, + {file = "safetensors-0.4.5-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:60c828a27e852ded2c85fc0f87bf1ec20e464c5cd4d56ff0e0711855cc2e17f8"}, + {file = "safetensors-0.4.5-cp37-none-win32.whl", hash = "sha256:6d3de65718b86c3eeaa8b73a9c3d123f9307a96bbd7be9698e21e76a56443af5"}, + {file = "safetensors-0.4.5-cp37-none-win_amd64.whl", hash = "sha256:5a2d68a523a4cefd791156a4174189a4114cf0bf9c50ceb89f261600f3b2b81a"}, + {file = "safetensors-0.4.5-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:e7a97058f96340850da0601a3309f3d29d6191b0702b2da201e54c6e3e44ccf0"}, + {file = "safetensors-0.4.5-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:63bfd425e25f5c733f572e2246e08a1c38bd6f2e027d3f7c87e2e43f228d1345"}, + {file = "safetensors-0.4.5-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f3664ac565d0e809b0b929dae7ccd74e4d3273cd0c6d1220c6430035befb678e"}, + {file = "safetensors-0.4.5-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:313514b0b9b73ff4ddfb4edd71860696dbe3c1c9dc4d5cc13dbd74da283d2cbf"}, + {file = "safetensors-0.4.5-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:31fa33ee326f750a2f2134a6174773c281d9a266ccd000bd4686d8021f1f3dac"}, + {file = "safetensors-0.4.5-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:09566792588d77b68abe53754c9f1308fadd35c9f87be939e22c623eaacbed6b"}, + {file = "safetensors-0.4.5-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:309aaec9b66cbf07ad3a2e5cb8a03205663324fea024ba391594423d0f00d9fe"}, + {file = "safetensors-0.4.5-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:53946c5813b8f9e26103c5efff4a931cc45d874f45229edd68557ffb35ffb9f8"}, + {file = "safetensors-0.4.5-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:868f9df9e99ad1e7f38c52194063a982bc88fedc7d05096f4f8160403aaf4bd6"}, + {file = "safetensors-0.4.5-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:9cc9449bd0b0bc538bd5e268221f0c5590bc5c14c1934a6ae359d44410dc68c4"}, + {file = "safetensors-0.4.5-cp38-none-win32.whl", hash = "sha256:83c4f13a9e687335c3928f615cd63a37e3f8ef072a3f2a0599fa09f863fb06a2"}, + {file = "safetensors-0.4.5-cp38-none-win_amd64.whl", hash = "sha256:b98d40a2ffa560653f6274e15b27b3544e8e3713a44627ce268f419f35c49478"}, + {file = "safetensors-0.4.5-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:cf727bb1281d66699bef5683b04d98c894a2803442c490a8d45cd365abfbdeb2"}, + {file = "safetensors-0.4.5-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:96f1d038c827cdc552d97e71f522e1049fef0542be575421f7684756a748e457"}, + {file = "safetensors-0.4.5-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:139fbee92570ecea774e6344fee908907db79646d00b12c535f66bc78bd5ea2c"}, + {file = "safetensors-0.4.5-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:c36302c1c69eebb383775a89645a32b9d266878fab619819ce660309d6176c9b"}, + {file = "safetensors-0.4.5-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d641f5b8149ea98deb5ffcf604d764aad1de38a8285f86771ce1abf8e74c4891"}, + {file = "safetensors-0.4.5-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b4db6a61d968de73722b858038c616a1bebd4a86abe2688e46ca0cc2d17558f2"}, + {file = "safetensors-0.4.5-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b75a616e02f21b6f1d5785b20cecbab5e2bd3f6358a90e8925b813d557666ec1"}, + {file = "safetensors-0.4.5-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:788ee7d04cc0e0e7f944c52ff05f52a4415b312f5efd2ee66389fb7685ee030c"}, + {file = "safetensors-0.4.5-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:87bc42bd04fd9ca31396d3ca0433db0be1411b6b53ac5a32b7845a85d01ffc2e"}, + {file = "safetensors-0.4.5-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:4037676c86365a721a8c9510323a51861d703b399b78a6b4486a54a65a975fca"}, + {file = "safetensors-0.4.5-cp39-none-win32.whl", hash = "sha256:1500418454529d0ed5c1564bda376c4ddff43f30fce9517d9bee7bcce5a8ef50"}, + {file = "safetensors-0.4.5-cp39-none-win_amd64.whl", hash = "sha256:9d1a94b9d793ed8fe35ab6d5cea28d540a46559bafc6aae98f30ee0867000cab"}, + {file = "safetensors-0.4.5-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:fdadf66b5a22ceb645d5435a0be7a0292ce59648ca1d46b352f13cff3ea80410"}, + {file = "safetensors-0.4.5-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:d42ffd4c2259f31832cb17ff866c111684c87bd930892a1ba53fed28370c918c"}, + {file = "safetensors-0.4.5-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:dd8a1f6d2063a92cd04145c7fd9e31a1c7d85fbec20113a14b487563fdbc0597"}, + {file = "safetensors-0.4.5-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:951d2fcf1817f4fb0ef0b48f6696688a4e852a95922a042b3f96aaa67eedc920"}, + {file = "safetensors-0.4.5-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:6ac85d9a8c1af0e3132371d9f2d134695a06a96993c2e2f0bbe25debb9e3f67a"}, + {file = "safetensors-0.4.5-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:e3cec4a29eb7fe8da0b1c7988bc3828183080439dd559f720414450de076fcab"}, + {file = "safetensors-0.4.5-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:21742b391b859e67b26c0b2ac37f52c9c0944a879a25ad2f9f9f3cd61e7fda8f"}, + {file = "safetensors-0.4.5-pp37-pypy37_pp73-macosx_10_12_x86_64.whl", hash = "sha256:c7db3006a4915151ce1913652e907cdede299b974641a83fbc092102ac41b644"}, + {file = "safetensors-0.4.5-pp37-pypy37_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f68bf99ea970960a237f416ea394e266e0361895753df06e3e06e6ea7907d98b"}, + {file = "safetensors-0.4.5-pp37-pypy37_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8158938cf3324172df024da511839d373c40fbfaa83e9abf467174b2910d7b4c"}, + {file = "safetensors-0.4.5-pp37-pypy37_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:540ce6c4bf6b58cb0fd93fa5f143bc0ee341c93bb4f9287ccd92cf898cc1b0dd"}, + {file = "safetensors-0.4.5-pp37-pypy37_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:bfeaa1a699c6b9ed514bd15e6a91e74738b71125a9292159e3d6b7f0a53d2cde"}, + {file = "safetensors-0.4.5-pp37-pypy37_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:01c8f00da537af711979e1b42a69a8ec9e1d7112f208e0e9b8a35d2c381085ef"}, + {file = "safetensors-0.4.5-pp38-pypy38_pp73-macosx_10_12_x86_64.whl", hash = "sha256:a0dd565f83b30f2ca79b5d35748d0d99dd4b3454f80e03dfb41f0038e3bdf180"}, + {file = "safetensors-0.4.5-pp38-pypy38_pp73-macosx_11_0_arm64.whl", hash = "sha256:023b6e5facda76989f4cba95a861b7e656b87e225f61811065d5c501f78cdb3f"}, + {file = "safetensors-0.4.5-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9633b663393d5796f0b60249549371e392b75a0b955c07e9c6f8708a87fc841f"}, + {file = "safetensors-0.4.5-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:78dd8adfb48716233c45f676d6e48534d34b4bceb50162c13d1f0bdf6f78590a"}, + {file = "safetensors-0.4.5-pp38-pypy38_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:8e8deb16c4321d61ae72533b8451ec4a9af8656d1c61ff81aa49f966406e4b68"}, + {file = "safetensors-0.4.5-pp38-pypy38_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:52452fa5999dc50c4decaf0c53aa28371f7f1e0fe5c2dd9129059fbe1e1599c7"}, + {file = "safetensors-0.4.5-pp38-pypy38_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:d5f23198821e227cfc52d50fa989813513db381255c6d100927b012f0cfec63d"}, + {file = "safetensors-0.4.5-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:f4beb84b6073b1247a773141a6331117e35d07134b3bb0383003f39971d414bb"}, + {file = "safetensors-0.4.5-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:68814d599d25ed2fdd045ed54d370d1d03cf35e02dce56de44c651f828fb9b7b"}, + {file = "safetensors-0.4.5-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f0b6453c54c57c1781292c46593f8a37254b8b99004c68d6c3ce229688931a22"}, + {file = "safetensors-0.4.5-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:adaa9c6dead67e2dd90d634f89131e43162012479d86e25618e821a03d1eb1dc"}, + {file = "safetensors-0.4.5-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:73e7d408e9012cd17511b382b43547850969c7979efc2bc353f317abaf23c84c"}, + {file = "safetensors-0.4.5-pp39-pypy39_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:775409ce0fcc58b10773fdb4221ed1eb007de10fe7adbdf8f5e8a56096b6f0bc"}, + {file = "safetensors-0.4.5-pp39-pypy39_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:834001bed193e4440c4a3950a31059523ee5090605c907c66808664c932b549c"}, + {file = "safetensors-0.4.5.tar.gz", hash = "sha256:d73de19682deabb02524b3d5d1f8b3aaba94c72f1bbfc7911b9b9d5d391c0310"}, +] + +[package.extras] +all = ["safetensors[jax]", "safetensors[numpy]", "safetensors[paddlepaddle]", "safetensors[pinned-tf]", "safetensors[quality]", "safetensors[testing]", "safetensors[torch]"] +dev = ["safetensors[all]"] +jax = ["flax (>=0.6.3)", "jax (>=0.3.25)", "jaxlib (>=0.3.25)", "safetensors[numpy]"] +mlx = ["mlx (>=0.0.9)"] +numpy = ["numpy (>=1.21.6)"] +paddlepaddle = ["paddlepaddle (>=2.4.1)", "safetensors[numpy]"] +pinned-tf = ["safetensors[numpy]", "tensorflow (==2.11.0)"] +quality = ["black (==22.3)", "click (==8.0.4)", "flake8 (>=3.8.3)", "isort (>=5.5.4)"] +tensorflow = ["safetensors[numpy]", "tensorflow (>=2.11.0)"] +testing = ["h5py (>=3.7.0)", "huggingface-hub (>=0.12.1)", "hypothesis (>=6.70.2)", "pytest (>=7.2.0)", "pytest-benchmark (>=4.0.0)", "safetensors[numpy]", "setuptools-rust (>=1.5.2)"] +torch = ["safetensors[numpy]", "torch (>=1.10)"] + +[[package]] +name = "scikit-learn" +version = "1.5.2" +description = "A set of python modules for machine learning and data mining" +optional = false +python-versions = ">=3.9" +files = [ + {file = "scikit_learn-1.5.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:299406827fb9a4f862626d0fe6c122f5f87f8910b86fe5daa4c32dcd742139b6"}, + {file = "scikit_learn-1.5.2-cp310-cp310-macosx_12_0_arm64.whl", hash = "sha256:2d4cad1119c77930b235579ad0dc25e65c917e756fe80cab96aa3b9428bd3fb0"}, + {file = "scikit_learn-1.5.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8c412ccc2ad9bf3755915e3908e677b367ebc8d010acbb3f182814524f2e5540"}, + {file = "scikit_learn-1.5.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3a686885a4b3818d9e62904d91b57fa757fc2bed3e465c8b177be652f4dd37c8"}, + {file = "scikit_learn-1.5.2-cp310-cp310-win_amd64.whl", hash = "sha256:c15b1ca23d7c5f33cc2cb0a0d6aaacf893792271cddff0edbd6a40e8319bc113"}, + {file = "scikit_learn-1.5.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:03b6158efa3faaf1feea3faa884c840ebd61b6484167c711548fce208ea09445"}, + {file = "scikit_learn-1.5.2-cp311-cp311-macosx_12_0_arm64.whl", hash = "sha256:1ff45e26928d3b4eb767a8f14a9a6efbf1cbff7c05d1fb0f95f211a89fd4f5de"}, + {file = "scikit_learn-1.5.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f763897fe92d0e903aa4847b0aec0e68cadfff77e8a0687cabd946c89d17e675"}, + {file = "scikit_learn-1.5.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f8b0ccd4a902836493e026c03256e8b206656f91fbcc4fde28c57a5b752561f1"}, + {file = "scikit_learn-1.5.2-cp311-cp311-win_amd64.whl", hash = "sha256:6c16d84a0d45e4894832b3c4d0bf73050939e21b99b01b6fd59cbb0cf39163b6"}, + {file = "scikit_learn-1.5.2-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:f932a02c3f4956dfb981391ab24bda1dbd90fe3d628e4b42caef3e041c67707a"}, + {file = "scikit_learn-1.5.2-cp312-cp312-macosx_12_0_arm64.whl", hash = "sha256:3b923d119d65b7bd555c73be5423bf06c0105678ce7e1f558cb4b40b0a5502b1"}, + {file = "scikit_learn-1.5.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f60021ec1574e56632be2a36b946f8143bf4e5e6af4a06d85281adc22938e0dd"}, + {file = "scikit_learn-1.5.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:394397841449853c2290a32050382edaec3da89e35b3e03d6cc966aebc6a8ae6"}, + {file = "scikit_learn-1.5.2-cp312-cp312-win_amd64.whl", hash = "sha256:57cc1786cfd6bd118220a92ede80270132aa353647684efa385a74244a41e3b1"}, + {file = "scikit_learn-1.5.2-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:e9a702e2de732bbb20d3bad29ebd77fc05a6b427dc49964300340e4c9328b3f5"}, + {file = "scikit_learn-1.5.2-cp313-cp313-macosx_12_0_arm64.whl", hash = "sha256:b0768ad641981f5d3a198430a1d31c3e044ed2e8a6f22166b4d546a5116d7908"}, + {file = "scikit_learn-1.5.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:178ddd0a5cb0044464fc1bfc4cca5b1833bfc7bb022d70b05db8530da4bb3dd3"}, + {file = "scikit_learn-1.5.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f7284ade780084d94505632241bf78c44ab3b6f1e8ccab3d2af58e0e950f9c12"}, + {file = "scikit_learn-1.5.2-cp313-cp313-win_amd64.whl", hash = "sha256:b7b0f9a0b1040830d38c39b91b3a44e1b643f4b36e36567b80b7c6bd2202a27f"}, + {file = "scikit_learn-1.5.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:757c7d514ddb00ae249832fe87100d9c73c6ea91423802872d9e74970a0e40b9"}, + {file = "scikit_learn-1.5.2-cp39-cp39-macosx_12_0_arm64.whl", hash = "sha256:52788f48b5d8bca5c0736c175fa6bdaab2ef00a8f536cda698db61bd89c551c1"}, + {file = "scikit_learn-1.5.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:643964678f4b5fbdc95cbf8aec638acc7aa70f5f79ee2cdad1eec3df4ba6ead8"}, + {file = "scikit_learn-1.5.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ca64b3089a6d9b9363cd3546f8978229dcbb737aceb2c12144ee3f70f95684b7"}, + {file = "scikit_learn-1.5.2-cp39-cp39-win_amd64.whl", hash = "sha256:3bed4909ba187aca80580fe2ef370d9180dcf18e621a27c4cf2ef10d279a7efe"}, + {file = "scikit_learn-1.5.2.tar.gz", hash = "sha256:b4237ed7b3fdd0a4882792e68ef2545d5baa50aca3bb45aa7df468138ad8f94d"}, +] + +[package.dependencies] +joblib = ">=1.2.0" +numpy = ">=1.19.5" +scipy = ">=1.6.0" +threadpoolctl = ">=3.1.0" + +[package.extras] +benchmark = ["matplotlib (>=3.3.4)", "memory_profiler (>=0.57.0)", "pandas (>=1.1.5)"] +build = ["cython (>=3.0.10)", "meson-python (>=0.16.0)", "numpy (>=1.19.5)", "scipy (>=1.6.0)"] +docs = ["Pillow (>=7.1.2)", "matplotlib (>=3.3.4)", "memory_profiler (>=0.57.0)", "numpydoc (>=1.2.0)", "pandas (>=1.1.5)", "plotly (>=5.14.0)", "polars (>=0.20.30)", "pooch (>=1.6.0)", "pydata-sphinx-theme (>=0.15.3)", "scikit-image (>=0.17.2)", "seaborn (>=0.9.0)", "sphinx (>=7.3.7)", "sphinx-copybutton (>=0.5.2)", "sphinx-design (>=0.5.0)", "sphinx-design (>=0.6.0)", "sphinx-gallery (>=0.16.0)", "sphinx-prompt (>=1.4.0)", "sphinx-remove-toctrees (>=1.0.0.post1)", "sphinxcontrib-sass (>=0.3.4)", "sphinxext-opengraph (>=0.9.1)"] +examples = ["matplotlib (>=3.3.4)", "pandas (>=1.1.5)", "plotly (>=5.14.0)", "pooch (>=1.6.0)", "scikit-image (>=0.17.2)", "seaborn (>=0.9.0)"] +install = ["joblib (>=1.2.0)", "numpy (>=1.19.5)", "scipy (>=1.6.0)", "threadpoolctl (>=3.1.0)"] +maintenance = ["conda-lock (==2.5.6)"] +tests = ["black (>=24.3.0)", "matplotlib (>=3.3.4)", "mypy (>=1.9)", "numpydoc (>=1.2.0)", "pandas (>=1.1.5)", "polars (>=0.20.30)", "pooch (>=1.6.0)", "pyamg (>=4.0.0)", "pyarrow (>=12.0.0)", "pytest (>=7.1.2)", "pytest-cov (>=2.9.0)", "ruff (>=0.2.1)", "scikit-image (>=0.17.2)"] + +[[package]] +name = "scipy" +version = "1.14.1" +description = "Fundamental algorithms for scientific computing in Python" +optional = false +python-versions = ">=3.10" +files = [ + {file = "scipy-1.14.1-cp310-cp310-macosx_10_13_x86_64.whl", hash = "sha256:b28d2ca4add7ac16ae8bb6632a3c86e4b9e4d52d3e34267f6e1b0c1f8d87e389"}, + {file = "scipy-1.14.1-cp310-cp310-macosx_12_0_arm64.whl", hash = "sha256:d0d2821003174de06b69e58cef2316a6622b60ee613121199cb2852a873f8cf3"}, + {file = "scipy-1.14.1-cp310-cp310-macosx_14_0_arm64.whl", hash = "sha256:8bddf15838ba768bb5f5083c1ea012d64c9a444e16192762bd858f1e126196d0"}, + {file = "scipy-1.14.1-cp310-cp310-macosx_14_0_x86_64.whl", hash = "sha256:97c5dddd5932bd2a1a31c927ba5e1463a53b87ca96b5c9bdf5dfd6096e27efc3"}, + {file = "scipy-1.14.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2ff0a7e01e422c15739ecd64432743cf7aae2b03f3084288f399affcefe5222d"}, + {file = "scipy-1.14.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8e32dced201274bf96899e6491d9ba3e9a5f6b336708656466ad0522d8528f69"}, + {file = "scipy-1.14.1-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:8426251ad1e4ad903a4514712d2fa8fdd5382c978010d1c6f5f37ef286a713ad"}, + {file = "scipy-1.14.1-cp310-cp310-win_amd64.whl", hash = "sha256:a49f6ed96f83966f576b33a44257d869756df6cf1ef4934f59dd58b25e0327e5"}, + {file = "scipy-1.14.1-cp311-cp311-macosx_10_13_x86_64.whl", hash = "sha256:2da0469a4ef0ecd3693761acbdc20f2fdeafb69e6819cc081308cc978153c675"}, + {file = "scipy-1.14.1-cp311-cp311-macosx_12_0_arm64.whl", hash = "sha256:c0ee987efa6737242745f347835da2cc5bb9f1b42996a4d97d5c7ff7928cb6f2"}, + {file = "scipy-1.14.1-cp311-cp311-macosx_14_0_arm64.whl", hash = "sha256:3a1b111fac6baec1c1d92f27e76511c9e7218f1695d61b59e05e0fe04dc59617"}, + {file = "scipy-1.14.1-cp311-cp311-macosx_14_0_x86_64.whl", hash = "sha256:8475230e55549ab3f207bff11ebfc91c805dc3463ef62eda3ccf593254524ce8"}, + {file = "scipy-1.14.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:278266012eb69f4a720827bdd2dc54b2271c97d84255b2faaa8f161a158c3b37"}, + {file = "scipy-1.14.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fef8c87f8abfb884dac04e97824b61299880c43f4ce675dd2cbeadd3c9b466d2"}, + {file = "scipy-1.14.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:b05d43735bb2f07d689f56f7b474788a13ed8adc484a85aa65c0fd931cf9ccd2"}, + {file = "scipy-1.14.1-cp311-cp311-win_amd64.whl", hash = "sha256:716e389b694c4bb564b4fc0c51bc84d381735e0d39d3f26ec1af2556ec6aad94"}, + {file = "scipy-1.14.1-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:631f07b3734d34aced009aaf6fedfd0eb3498a97e581c3b1e5f14a04164a456d"}, + {file = "scipy-1.14.1-cp312-cp312-macosx_12_0_arm64.whl", hash = "sha256:af29a935803cc707ab2ed7791c44288a682f9c8107bc00f0eccc4f92c08d6e07"}, + {file = "scipy-1.14.1-cp312-cp312-macosx_14_0_arm64.whl", hash = "sha256:2843f2d527d9eebec9a43e6b406fb7266f3af25a751aa91d62ff416f54170bc5"}, + {file = "scipy-1.14.1-cp312-cp312-macosx_14_0_x86_64.whl", hash = "sha256:eb58ca0abd96911932f688528977858681a59d61a7ce908ffd355957f7025cfc"}, + {file = "scipy-1.14.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:30ac8812c1d2aab7131a79ba62933a2a76f582d5dbbc695192453dae67ad6310"}, + {file = "scipy-1.14.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8f9ea80f2e65bdaa0b7627fb00cbeb2daf163caa015e59b7516395fe3bd1e066"}, + {file = "scipy-1.14.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:edaf02b82cd7639db00dbff629995ef185c8df4c3ffa71a5562a595765a06ce1"}, + {file = "scipy-1.14.1-cp312-cp312-win_amd64.whl", hash = "sha256:2ff38e22128e6c03ff73b6bb0f85f897d2362f8c052e3b8ad00532198fbdae3f"}, + {file = "scipy-1.14.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:1729560c906963fc8389f6aac023739ff3983e727b1a4d87696b7bf108316a79"}, + {file = "scipy-1.14.1-cp313-cp313-macosx_12_0_arm64.whl", hash = "sha256:4079b90df244709e675cdc8b93bfd8a395d59af40b72e339c2287c91860deb8e"}, + {file = "scipy-1.14.1-cp313-cp313-macosx_14_0_arm64.whl", hash = "sha256:e0cf28db0f24a38b2a0ca33a85a54852586e43cf6fd876365c86e0657cfe7d73"}, + {file = "scipy-1.14.1-cp313-cp313-macosx_14_0_x86_64.whl", hash = "sha256:0c2f95de3b04e26f5f3ad5bb05e74ba7f68b837133a4492414b3afd79dfe540e"}, + {file = "scipy-1.14.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b99722ea48b7ea25e8e015e8341ae74624f72e5f21fc2abd45f3a93266de4c5d"}, + {file = "scipy-1.14.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5149e3fd2d686e42144a093b206aef01932a0059c2a33ddfa67f5f035bdfe13e"}, + {file = "scipy-1.14.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:e4f5a7c49323533f9103d4dacf4e4f07078f360743dec7f7596949149efeec06"}, + {file = "scipy-1.14.1-cp313-cp313-win_amd64.whl", hash = "sha256:baff393942b550823bfce952bb62270ee17504d02a1801d7fd0719534dfb9c84"}, + {file = "scipy-1.14.1.tar.gz", hash = "sha256:5a275584e726026a5699459aa72f828a610821006228e841b94275c4a7c08417"}, +] + +[package.dependencies] +numpy = ">=1.23.5,<2.3" + +[package.extras] +dev = ["cython-lint (>=0.12.2)", "doit (>=0.36.0)", "mypy (==1.10.0)", "pycodestyle", "pydevtool", "rich-click", "ruff (>=0.0.292)", "types-psutil", "typing_extensions"] +doc = ["jupyterlite-pyodide-kernel", "jupyterlite-sphinx (>=0.13.1)", "jupytext", "matplotlib (>=3.5)", "myst-nb", "numpydoc", "pooch", "pydata-sphinx-theme (>=0.15.2)", "sphinx (>=5.0.0,<=7.3.7)", "sphinx-design (>=0.4.0)"] +test = ["Cython", "array-api-strict (>=2.0)", "asv", "gmpy2", "hypothesis (>=6.30)", "meson", "mpmath", "ninja", "pooch", "pytest", "pytest-cov", "pytest-timeout", "pytest-xdist", "scikit-umfpack", "threadpoolctl"] + +[[package]] +name = "six" +version = "1.16.0" +description = "Python 2 and 3 compatibility utilities" +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*" +files = [ + {file = "six-1.16.0-py2.py3-none-any.whl", hash = "sha256:8abb2f1d86890a2dfb989f9a77cfcfd3e47c2a354b01111771326f8aa26e0254"}, + {file = "six-1.16.0.tar.gz", hash = "sha256:1e61c37477a1626458e36f7b1d82aa5c9b094fa4802892072e49de9c60c4c926"}, +] + +[[package]] +name = "smmap" +version = "5.0.1" +description = "A pure Python implementation of a sliding window memory map manager" +optional = false +python-versions = ">=3.7" +files = [ + {file = "smmap-5.0.1-py3-none-any.whl", hash = "sha256:e6d8668fa5f93e706934a62d7b4db19c8d9eb8cf2adbb75ef1b675aa332b69da"}, + {file = "smmap-5.0.1.tar.gz", hash = "sha256:dceeb6c0028fdb6734471eb07c0cd2aae706ccaecab45965ee83f11c8d3b1f62"}, +] + +[[package]] +name = "sniffio" +version = "1.3.1" +description = "Sniff out which async library your code is running under" +optional = false +python-versions = ">=3.7" +files = [ + {file = "sniffio-1.3.1-py3-none-any.whl", hash = "sha256:2f6da418d1f1e0fddd844478f41680e794e6051915791a034ff65e5f100525a2"}, + {file = "sniffio-1.3.1.tar.gz", hash = "sha256:f4324edc670a0f49750a81b895f35c3adb843cca46f0530f79fc1babb23789dc"}, +] + +[[package]] +name = "soupsieve" +version = "2.6" +description = "A modern CSS selector implementation for Beautiful Soup." +optional = false +python-versions = ">=3.8" +files = [ + {file = "soupsieve-2.6-py3-none-any.whl", hash = "sha256:e72c4ff06e4fb6e4b5a9f0f55fe6e81514581fca1515028625d0f299c602ccc9"}, + {file = "soupsieve-2.6.tar.gz", hash = "sha256:e2e68417777af359ec65daac1057404a3c8a5455bb8abc36f1a9866ab1a51abb"}, +] + +[[package]] +name = "sqlalchemy" +version = "2.0.36" +description = "Database Abstraction Library" +optional = false +python-versions = ">=3.7" +files = [ + {file = "SQLAlchemy-2.0.36-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:59b8f3adb3971929a3e660337f5dacc5942c2cdb760afcabb2614ffbda9f9f72"}, + {file = "SQLAlchemy-2.0.36-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:37350015056a553e442ff672c2d20e6f4b6d0b2495691fa239d8aa18bb3bc908"}, + {file = "SQLAlchemy-2.0.36-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8318f4776c85abc3f40ab185e388bee7a6ea99e7fa3a30686580b209eaa35c08"}, + {file = "SQLAlchemy-2.0.36-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c245b1fbade9c35e5bd3b64270ab49ce990369018289ecfde3f9c318411aaa07"}, + {file = "SQLAlchemy-2.0.36-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:69f93723edbca7342624d09f6704e7126b152eaed3cdbb634cb657a54332a3c5"}, + {file = "SQLAlchemy-2.0.36-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:f9511d8dd4a6e9271d07d150fb2f81874a3c8c95e11ff9af3a2dfc35fe42ee44"}, + {file = "SQLAlchemy-2.0.36-cp310-cp310-win32.whl", hash = "sha256:c3f3631693003d8e585d4200730616b78fafd5a01ef8b698f6967da5c605b3fa"}, + {file = "SQLAlchemy-2.0.36-cp310-cp310-win_amd64.whl", hash = "sha256:a86bfab2ef46d63300c0f06936bd6e6c0105faa11d509083ba8f2f9d237fb5b5"}, + {file = "SQLAlchemy-2.0.36-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:fd3a55deef00f689ce931d4d1b23fa9f04c880a48ee97af488fd215cf24e2a6c"}, + {file = "SQLAlchemy-2.0.36-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:4f5e9cd989b45b73bd359f693b935364f7e1f79486e29015813c338450aa5a71"}, + {file = "SQLAlchemy-2.0.36-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d0ddd9db6e59c44875211bc4c7953a9f6638b937b0a88ae6d09eb46cced54eff"}, + {file = "SQLAlchemy-2.0.36-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2519f3a5d0517fc159afab1015e54bb81b4406c278749779be57a569d8d1bb0d"}, + {file = "SQLAlchemy-2.0.36-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:59b1ee96617135f6e1d6f275bbe988f419c5178016f3d41d3c0abb0c819f75bb"}, + {file = "SQLAlchemy-2.0.36-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:39769a115f730d683b0eb7b694db9789267bcd027326cccc3125e862eb03bfd8"}, + {file = "SQLAlchemy-2.0.36-cp311-cp311-win32.whl", hash = "sha256:66bffbad8d6271bb1cc2f9a4ea4f86f80fe5e2e3e501a5ae2a3dc6a76e604e6f"}, + {file = "SQLAlchemy-2.0.36-cp311-cp311-win_amd64.whl", hash = "sha256:23623166bfefe1487d81b698c423f8678e80df8b54614c2bf4b4cfcd7c711959"}, + {file = "SQLAlchemy-2.0.36-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:f7b64e6ec3f02c35647be6b4851008b26cff592a95ecb13b6788a54ef80bbdd4"}, + {file = "SQLAlchemy-2.0.36-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:46331b00096a6db1fdc052d55b101dbbfc99155a548e20a0e4a8e5e4d1362855"}, + {file = "SQLAlchemy-2.0.36-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fdf3386a801ea5aba17c6410dd1dc8d39cf454ca2565541b5ac42a84e1e28f53"}, + {file = "SQLAlchemy-2.0.36-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ac9dfa18ff2a67b09b372d5db8743c27966abf0e5344c555d86cc7199f7ad83a"}, + {file = "SQLAlchemy-2.0.36-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:90812a8933df713fdf748b355527e3af257a11e415b613dd794512461eb8a686"}, + {file = "SQLAlchemy-2.0.36-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:1bc330d9d29c7f06f003ab10e1eaced295e87940405afe1b110f2eb93a233588"}, + {file = "SQLAlchemy-2.0.36-cp312-cp312-win32.whl", hash = "sha256:79d2e78abc26d871875b419e1fd3c0bca31a1cb0043277d0d850014599626c2e"}, + {file = "SQLAlchemy-2.0.36-cp312-cp312-win_amd64.whl", hash = "sha256:b544ad1935a8541d177cb402948b94e871067656b3a0b9e91dbec136b06a2ff5"}, + {file = "SQLAlchemy-2.0.36-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:b5cc79df7f4bc3d11e4b542596c03826063092611e481fcf1c9dfee3c94355ef"}, + {file = "SQLAlchemy-2.0.36-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:3c01117dd36800f2ecaa238c65365b7b16497adc1522bf84906e5710ee9ba0e8"}, + {file = "SQLAlchemy-2.0.36-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9bc633f4ee4b4c46e7adcb3a9b5ec083bf1d9a97c1d3854b92749d935de40b9b"}, + {file = "SQLAlchemy-2.0.36-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9e46ed38affdfc95d2c958de328d037d87801cfcbea6d421000859e9789e61c2"}, + {file = "SQLAlchemy-2.0.36-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:b2985c0b06e989c043f1dc09d4fe89e1616aadd35392aea2844f0458a989eacf"}, + {file = "SQLAlchemy-2.0.36-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:4a121d62ebe7d26fec9155f83f8be5189ef1405f5973ea4874a26fab9f1e262c"}, + {file = "SQLAlchemy-2.0.36-cp313-cp313-win32.whl", hash = "sha256:0572f4bd6f94752167adfd7c1bed84f4b240ee6203a95e05d1e208d488d0d436"}, + {file = "SQLAlchemy-2.0.36-cp313-cp313-win_amd64.whl", hash = "sha256:8c78ac40bde930c60e0f78b3cd184c580f89456dd87fc08f9e3ee3ce8765ce88"}, + {file = "SQLAlchemy-2.0.36-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:be9812b766cad94a25bc63bec11f88c4ad3629a0cec1cd5d4ba48dc23860486b"}, + {file = "SQLAlchemy-2.0.36-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:50aae840ebbd6cdd41af1c14590e5741665e5272d2fee999306673a1bb1fdb4d"}, + {file = "SQLAlchemy-2.0.36-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4557e1f11c5f653ebfdd924f3f9d5ebfc718283b0b9beebaa5dd6b77ec290971"}, + {file = "SQLAlchemy-2.0.36-cp37-cp37m-musllinux_1_2_aarch64.whl", hash = "sha256:07b441f7d03b9a66299ce7ccf3ef2900abc81c0db434f42a5694a37bd73870f2"}, + {file = "SQLAlchemy-2.0.36-cp37-cp37m-musllinux_1_2_x86_64.whl", hash = "sha256:28120ef39c92c2dd60f2721af9328479516844c6b550b077ca450c7d7dc68575"}, + {file = "SQLAlchemy-2.0.36-cp37-cp37m-win32.whl", hash = "sha256:b81ee3d84803fd42d0b154cb6892ae57ea6b7c55d8359a02379965706c7efe6c"}, + {file = "SQLAlchemy-2.0.36-cp37-cp37m-win_amd64.whl", hash = "sha256:f942a799516184c855e1a32fbc7b29d7e571b52612647866d4ec1c3242578fcb"}, + {file = "SQLAlchemy-2.0.36-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:3d6718667da04294d7df1670d70eeddd414f313738d20a6f1d1f379e3139a545"}, + {file = "SQLAlchemy-2.0.36-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:72c28b84b174ce8af8504ca28ae9347d317f9dba3999e5981a3cd441f3712e24"}, + {file = "SQLAlchemy-2.0.36-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b11d0cfdd2b095e7b0686cf5fabeb9c67fae5b06d265d8180715b8cfa86522e3"}, + {file = "SQLAlchemy-2.0.36-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e32092c47011d113dc01ab3e1d3ce9f006a47223b18422c5c0d150af13a00687"}, + {file = "SQLAlchemy-2.0.36-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:6a440293d802d3011028e14e4226da1434b373cbaf4a4bbb63f845761a708346"}, + {file = "SQLAlchemy-2.0.36-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:c54a1e53a0c308a8e8a7dffb59097bff7facda27c70c286f005327f21b2bd6b1"}, + {file = "SQLAlchemy-2.0.36-cp38-cp38-win32.whl", hash = "sha256:1e0d612a17581b6616ff03c8e3d5eff7452f34655c901f75d62bd86449d9750e"}, + {file = "SQLAlchemy-2.0.36-cp38-cp38-win_amd64.whl", hash = "sha256:8958b10490125124463095bbdadda5aa22ec799f91958e410438ad6c97a7b793"}, + {file = "SQLAlchemy-2.0.36-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:dc022184d3e5cacc9579e41805a681187650e170eb2fd70e28b86192a479dcaa"}, + {file = "SQLAlchemy-2.0.36-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:b817d41d692bf286abc181f8af476c4fbef3fd05e798777492618378448ee689"}, + {file = "SQLAlchemy-2.0.36-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a4e46a888b54be23d03a89be510f24a7652fe6ff660787b96cd0e57a4ebcb46d"}, + {file = "SQLAlchemy-2.0.36-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c4ae3005ed83f5967f961fd091f2f8c5329161f69ce8480aa8168b2d7fe37f06"}, + {file = "SQLAlchemy-2.0.36-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:03e08af7a5f9386a43919eda9de33ffda16b44eb11f3b313e6822243770e9763"}, + {file = "SQLAlchemy-2.0.36-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:3dbb986bad3ed5ceaf090200eba750b5245150bd97d3e67343a3cfed06feecf7"}, + {file = "SQLAlchemy-2.0.36-cp39-cp39-win32.whl", hash = "sha256:9fe53b404f24789b5ea9003fc25b9a3988feddebd7e7b369c8fac27ad6f52f28"}, + {file = "SQLAlchemy-2.0.36-cp39-cp39-win_amd64.whl", hash = "sha256:af148a33ff0349f53512a049c6406923e4e02bf2f26c5fb285f143faf4f0e46a"}, + {file = "SQLAlchemy-2.0.36-py3-none-any.whl", hash = "sha256:fddbe92b4760c6f5d48162aef14824add991aeda8ddadb3c31d56eb15ca69f8e"}, + {file = "sqlalchemy-2.0.36.tar.gz", hash = "sha256:7f2767680b6d2398aea7082e45a774b2b0767b5c8d8ffb9c8b683088ea9b29c5"}, +] + +[package.dependencies] +greenlet = {version = "!=0.4.17", markers = "python_version < \"3.13\" and (platform_machine == \"aarch64\" or platform_machine == \"ppc64le\" or platform_machine == \"x86_64\" or platform_machine == \"amd64\" or platform_machine == \"AMD64\" or platform_machine == \"win32\" or platform_machine == \"WIN32\")"} +typing-extensions = ">=4.6.0" + +[package.extras] +aiomysql = ["aiomysql (>=0.2.0)", "greenlet (!=0.4.17)"] +aioodbc = ["aioodbc", "greenlet (!=0.4.17)"] +aiosqlite = ["aiosqlite", "greenlet (!=0.4.17)", "typing_extensions (!=3.10.0.1)"] +asyncio = ["greenlet (!=0.4.17)"] +asyncmy = ["asyncmy (>=0.2.3,!=0.2.4,!=0.2.6)", "greenlet (!=0.4.17)"] +mariadb-connector = ["mariadb (>=1.0.1,!=1.1.2,!=1.1.5,!=1.1.10)"] +mssql = ["pyodbc"] +mssql-pymssql = ["pymssql"] +mssql-pyodbc = ["pyodbc"] +mypy = ["mypy (>=0.910)"] +mysql = ["mysqlclient (>=1.4.0)"] +mysql-connector = ["mysql-connector-python"] +oracle = ["cx_oracle (>=8)"] +oracle-oracledb = ["oracledb (>=1.0.1)"] +postgresql = ["psycopg2 (>=2.7)"] +postgresql-asyncpg = ["asyncpg", "greenlet (!=0.4.17)"] +postgresql-pg8000 = ["pg8000 (>=1.29.1)"] +postgresql-psycopg = ["psycopg (>=3.0.7)"] +postgresql-psycopg2binary = ["psycopg2-binary"] +postgresql-psycopg2cffi = ["psycopg2cffi"] +postgresql-psycopgbinary = ["psycopg[binary] (>=3.0.7)"] +pymysql = ["pymysql"] +sqlcipher = ["sqlcipher3_binary"] + +[[package]] +name = "sqlparse" +version = "0.5.2" +description = "A non-validating SQL parser." +optional = false +python-versions = ">=3.8" +files = [ + {file = "sqlparse-0.5.2-py3-none-any.whl", hash = "sha256:e99bc85c78160918c3e1d9230834ab8d80fc06c59d03f8db2618f65f65dda55e"}, + {file = "sqlparse-0.5.2.tar.gz", hash = "sha256:9e37b35e16d1cc652a2545f0997c1deb23ea28fa1f3eefe609eee3063c3b105f"}, +] + +[package.extras] +dev = ["build", "hatch"] +doc = ["sphinx"] + +[[package]] +name = "stack-data" +version = "0.6.3" +description = "Extract data from python stack frames and tracebacks for informative displays" +optional = false +python-versions = "*" +files = [ + {file = "stack_data-0.6.3-py3-none-any.whl", hash = "sha256:d5558e0c25a4cb0853cddad3d77da9891a08cb85dd9f9f91b9f8cd66e511e695"}, + {file = "stack_data-0.6.3.tar.gz", hash = "sha256:836a778de4fec4dcd1dcd89ed8abff8a221f58308462e1c4aa2a3cf30148f0b9"}, +] + +[package.dependencies] +asttokens = ">=2.1.0" +executing = ">=1.2.0" +pure-eval = "*" + +[package.extras] +tests = ["cython", "littleutils", "pygments", "pytest", "typeguard"] + +[[package]] +name = "sympy" +version = "1.13.3" +description = "Computer algebra system (CAS) in Python" +optional = false +python-versions = ">=3.8" +files = [ + {file = "sympy-1.13.3-py3-none-any.whl", hash = "sha256:54612cf55a62755ee71824ce692986f23c88ffa77207b30c1368eda4a7060f73"}, + {file = "sympy-1.13.3.tar.gz", hash = "sha256:b27fd2c6530e0ab39e275fc9b683895367e51d5da91baa8d3d64db2565fec4d9"}, +] + +[package.dependencies] +mpmath = ">=1.1.0,<1.4" + +[package.extras] +dev = ["hypothesis (>=6.70.0)", "pytest (>=7.1.0)"] + +[[package]] +name = "tabulate" +version = "0.9.0" +description = "Pretty-print tabular data" +optional = false +python-versions = ">=3.7" +files = [ + {file = "tabulate-0.9.0-py3-none-any.whl", hash = "sha256:024ca478df22e9340661486f85298cff5f6dcdba14f3813e8830015b9ed1948f"}, + {file = "tabulate-0.9.0.tar.gz", hash = "sha256:0095b12bf5966de529c0feb1fa08671671b3368eec77d7ef7ab114be2c068b3c"}, +] + +[package.extras] +widechars = ["wcwidth"] + +[[package]] +name = "tbb" +version = "2021.13.1" +description = "Intel® oneAPI Threading Building Blocks (oneTBB)" +optional = false +python-versions = "*" +files = [ + {file = "tbb-2021.13.1-py2.py3-none-manylinux1_i686.whl", hash = "sha256:bb5bdea0c0e9e6ad0739e7a8796c2635ce9eccca86dd48c426cd8027ac70fb1d"}, + {file = "tbb-2021.13.1-py2.py3-none-manylinux1_x86_64.whl", hash = "sha256:d916359dc685579d09e4b344241550afc1cc034f7f5ec7234c258b6680912d70"}, + {file = "tbb-2021.13.1-py3-none-win32.whl", hash = "sha256:00f5e5a70051650ddd0ab6247c0549521968339ec21002e475cd23b1cbf46d66"}, + {file = "tbb-2021.13.1-py3-none-win_amd64.whl", hash = "sha256:cbf024b2463fdab3ebe3fa6ff453026358e6b903839c80d647e08ad6d0796ee9"}, +] + +[[package]] +name = "tenacity" +version = "8.5.0" +description = "Retry code until it succeeds" +optional = false +python-versions = ">=3.8" +files = [ + {file = "tenacity-8.5.0-py3-none-any.whl", hash = "sha256:b594c2a5945830c267ce6b79a166228323ed52718f30302c1359836112346687"}, + {file = "tenacity-8.5.0.tar.gz", hash = "sha256:8bc6c0c8a09b31e6cad13c47afbed1a567518250a9a171418582ed8d9c20ca78"}, +] + +[package.extras] +doc = ["reno", "sphinx"] +test = ["pytest", "tornado (>=4.5)", "typeguard"] + +[[package]] +name = "threadpoolctl" +version = "3.5.0" +description = "threadpoolctl" +optional = false +python-versions = ">=3.8" +files = [ + {file = "threadpoolctl-3.5.0-py3-none-any.whl", hash = "sha256:56c1e26c150397e58c4926da8eeee87533b1e32bef131bd4bf6a2f45f3185467"}, + {file = "threadpoolctl-3.5.0.tar.gz", hash = "sha256:082433502dd922bf738de0d8bcc4fdcbf0979ff44c42bd40f5af8a282f6fa107"}, +] + +[[package]] +name = "tiktoken" +version = "0.7.0" +description = "tiktoken is a fast BPE tokeniser for use with OpenAI's models" +optional = false +python-versions = ">=3.8" +files = [ + {file = "tiktoken-0.7.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:485f3cc6aba7c6b6ce388ba634fbba656d9ee27f766216f45146beb4ac18b25f"}, + {file = "tiktoken-0.7.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:e54be9a2cd2f6d6ffa3517b064983fb695c9a9d8aa7d574d1ef3c3f931a99225"}, + {file = "tiktoken-0.7.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:79383a6e2c654c6040e5f8506f3750db9ddd71b550c724e673203b4f6b4b4590"}, + {file = "tiktoken-0.7.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5d4511c52caacf3c4981d1ae2df85908bd31853f33d30b345c8b6830763f769c"}, + {file = "tiktoken-0.7.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:13c94efacdd3de9aff824a788353aa5749c0faee1fbe3816df365ea450b82311"}, + {file = "tiktoken-0.7.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:8e58c7eb29d2ab35a7a8929cbeea60216a4ccdf42efa8974d8e176d50c9a3df5"}, + {file = "tiktoken-0.7.0-cp310-cp310-win_amd64.whl", hash = "sha256:21a20c3bd1dd3e55b91c1331bf25f4af522c525e771691adbc9a69336fa7f702"}, + {file = "tiktoken-0.7.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:10c7674f81e6e350fcbed7c09a65bca9356eaab27fb2dac65a1e440f2bcfe30f"}, + {file = "tiktoken-0.7.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:084cec29713bc9d4189a937f8a35dbdfa785bd1235a34c1124fe2323821ee93f"}, + {file = "tiktoken-0.7.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:811229fde1652fedcca7c6dfe76724d0908775b353556d8a71ed74d866f73f7b"}, + {file = "tiktoken-0.7.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:86b6e7dc2e7ad1b3757e8a24597415bafcfb454cebf9a33a01f2e6ba2e663992"}, + {file = "tiktoken-0.7.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:1063c5748be36344c7e18c7913c53e2cca116764c2080177e57d62c7ad4576d1"}, + {file = "tiktoken-0.7.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:20295d21419bfcca092644f7e2f2138ff947a6eb8cfc732c09cc7d76988d4a89"}, + {file = "tiktoken-0.7.0-cp311-cp311-win_amd64.whl", hash = "sha256:959d993749b083acc57a317cbc643fb85c014d055b2119b739487288f4e5d1cb"}, + {file = "tiktoken-0.7.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:71c55d066388c55a9c00f61d2c456a6086673ab7dec22dd739c23f77195b1908"}, + {file = "tiktoken-0.7.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:09ed925bccaa8043e34c519fbb2f99110bd07c6fd67714793c21ac298e449410"}, + {file = "tiktoken-0.7.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:03c6c40ff1db0f48a7b4d2dafeae73a5607aacb472fa11f125e7baf9dce73704"}, + {file = "tiktoken-0.7.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d20b5c6af30e621b4aca094ee61777a44118f52d886dbe4f02b70dfe05c15350"}, + {file = "tiktoken-0.7.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:d427614c3e074004efa2f2411e16c826f9df427d3c70a54725cae860f09e4bf4"}, + {file = "tiktoken-0.7.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:8c46d7af7b8c6987fac9b9f61041b452afe92eb087d29c9ce54951280f899a97"}, + {file = "tiktoken-0.7.0-cp312-cp312-win_amd64.whl", hash = "sha256:0bc603c30b9e371e7c4c7935aba02af5994a909fc3c0fe66e7004070858d3f8f"}, + {file = "tiktoken-0.7.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:2398fecd38c921bcd68418675a6d155fad5f5e14c2e92fcf5fe566fa5485a858"}, + {file = "tiktoken-0.7.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:8f5f6afb52fb8a7ea1c811e435e4188f2bef81b5e0f7a8635cc79b0eef0193d6"}, + {file = "tiktoken-0.7.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:861f9ee616766d736be4147abac500732b505bf7013cfaf019b85892637f235e"}, + {file = "tiktoken-0.7.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:54031f95c6939f6b78122c0aa03a93273a96365103793a22e1793ee86da31685"}, + {file = "tiktoken-0.7.0-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:fffdcb319b614cf14f04d02a52e26b1d1ae14a570f90e9b55461a72672f7b13d"}, + {file = "tiktoken-0.7.0-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:c72baaeaefa03ff9ba9688624143c858d1f6b755bb85d456d59e529e17234769"}, + {file = "tiktoken-0.7.0-cp38-cp38-win_amd64.whl", hash = "sha256:131b8aeb043a8f112aad9f46011dced25d62629091e51d9dc1adbf4a1cc6aa98"}, + {file = "tiktoken-0.7.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:cabc6dc77460df44ec5b879e68692c63551ae4fae7460dd4ff17181df75f1db7"}, + {file = "tiktoken-0.7.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:8d57f29171255f74c0aeacd0651e29aa47dff6f070cb9f35ebc14c82278f3b25"}, + {file = "tiktoken-0.7.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2ee92776fdbb3efa02a83f968c19d4997a55c8e9ce7be821ceee04a1d1ee149c"}, + {file = "tiktoken-0.7.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e215292e99cb41fbc96988ef62ea63bb0ce1e15f2c147a61acc319f8b4cbe5bf"}, + {file = "tiktoken-0.7.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:8a81bac94769cab437dd3ab0b8a4bc4e0f9cf6835bcaa88de71f39af1791727a"}, + {file = "tiktoken-0.7.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:d6d73ea93e91d5ca771256dfc9d1d29f5a554b83821a1dc0891987636e0ae226"}, + {file = "tiktoken-0.7.0-cp39-cp39-win_amd64.whl", hash = "sha256:2bcb28ddf79ffa424f171dfeef9a4daff61a94c631ca6813f43967cb263b83b9"}, + {file = "tiktoken-0.7.0.tar.gz", hash = "sha256:1077266e949c24e0291f6c350433c6f0971365ece2b173a23bc3b9f9defef6b6"}, +] + +[package.dependencies] +regex = ">=2022.1.18" +requests = ">=2.26.0" + +[package.extras] +blobfile = ["blobfile (>=2)"] + +[[package]] +name = "tokenizers" +version = "0.19.1" +description = "" +optional = false +python-versions = ">=3.7" +files = [ + {file = "tokenizers-0.19.1-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:952078130b3d101e05ecfc7fc3640282d74ed26bcf691400f872563fca15ac97"}, + {file = "tokenizers-0.19.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:82c8b8063de6c0468f08e82c4e198763e7b97aabfe573fd4cf7b33930ca4df77"}, + {file = "tokenizers-0.19.1-cp310-cp310-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:f03727225feaf340ceeb7e00604825addef622d551cbd46b7b775ac834c1e1c4"}, + {file = "tokenizers-0.19.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:453e4422efdfc9c6b6bf2eae00d5e323f263fff62b29a8c9cd526c5003f3f642"}, + {file = "tokenizers-0.19.1-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:02e81bf089ebf0e7f4df34fa0207519f07e66d8491d963618252f2e0729e0b46"}, + {file = "tokenizers-0.19.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b07c538ba956843833fee1190cf769c60dc62e1cf934ed50d77d5502194d63b1"}, + {file = "tokenizers-0.19.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e28cab1582e0eec38b1f38c1c1fb2e56bce5dc180acb1724574fc5f47da2a4fe"}, + {file = "tokenizers-0.19.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8b01afb7193d47439f091cd8f070a1ced347ad0f9144952a30a41836902fe09e"}, + {file = "tokenizers-0.19.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:7fb297edec6c6841ab2e4e8f357209519188e4a59b557ea4fafcf4691d1b4c98"}, + {file = "tokenizers-0.19.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:2e8a3dd055e515df7054378dc9d6fa8c8c34e1f32777fb9a01fea81496b3f9d3"}, + {file = "tokenizers-0.19.1-cp310-none-win32.whl", hash = "sha256:7ff898780a155ea053f5d934925f3902be2ed1f4d916461e1a93019cc7250837"}, + {file = "tokenizers-0.19.1-cp310-none-win_amd64.whl", hash = "sha256:bea6f9947e9419c2fda21ae6c32871e3d398cba549b93f4a65a2d369662d9403"}, + {file = "tokenizers-0.19.1-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:5c88d1481f1882c2e53e6bb06491e474e420d9ac7bdff172610c4f9ad3898059"}, + {file = "tokenizers-0.19.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:ddf672ed719b4ed82b51499100f5417d7d9f6fb05a65e232249268f35de5ed14"}, + {file = "tokenizers-0.19.1-cp311-cp311-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:dadc509cc8a9fe460bd274c0e16ac4184d0958117cf026e0ea8b32b438171594"}, + {file = "tokenizers-0.19.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:dfedf31824ca4915b511b03441784ff640378191918264268e6923da48104acc"}, + {file = "tokenizers-0.19.1-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:ac11016d0a04aa6487b1513a3a36e7bee7eec0e5d30057c9c0408067345c48d2"}, + {file = "tokenizers-0.19.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:76951121890fea8330d3a0df9a954b3f2a37e3ec20e5b0530e9a0044ca2e11fe"}, + {file = "tokenizers-0.19.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b342d2ce8fc8d00f376af068e3274e2e8649562e3bc6ae4a67784ded6b99428d"}, + {file = "tokenizers-0.19.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d16ff18907f4909dca9b076b9c2d899114dd6abceeb074eca0c93e2353f943aa"}, + {file = "tokenizers-0.19.1-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:706a37cc5332f85f26efbe2bdc9ef8a9b372b77e4645331a405073e4b3a8c1c6"}, + {file = "tokenizers-0.19.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:16baac68651701364b0289979ecec728546133e8e8fe38f66fe48ad07996b88b"}, + {file = "tokenizers-0.19.1-cp311-none-win32.whl", hash = "sha256:9ed240c56b4403e22b9584ee37d87b8bfa14865134e3e1c3fb4b2c42fafd3256"}, + {file = "tokenizers-0.19.1-cp311-none-win_amd64.whl", hash = "sha256:ad57d59341710b94a7d9dbea13f5c1e7d76fd8d9bcd944a7a6ab0b0da6e0cc66"}, + {file = "tokenizers-0.19.1-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:621d670e1b1c281a1c9698ed89451395d318802ff88d1fc1accff0867a06f153"}, + {file = "tokenizers-0.19.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:d924204a3dbe50b75630bd16f821ebda6a5f729928df30f582fb5aade90c818a"}, + {file = "tokenizers-0.19.1-cp312-cp312-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:4f3fefdc0446b1a1e6d81cd4c07088ac015665d2e812f6dbba4a06267d1a2c95"}, + {file = "tokenizers-0.19.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9620b78e0b2d52ef07b0d428323fb34e8ea1219c5eac98c2596311f20f1f9266"}, + {file = "tokenizers-0.19.1-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:04ce49e82d100594715ac1b2ce87d1a36e61891a91de774755f743babcd0dd52"}, + {file = "tokenizers-0.19.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c5c2ff13d157afe413bf7e25789879dd463e5a4abfb529a2d8f8473d8042e28f"}, + {file = "tokenizers-0.19.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3174c76efd9d08f836bfccaca7cfec3f4d1c0a4cf3acbc7236ad577cc423c840"}, + {file = "tokenizers-0.19.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7c9d5b6c0e7a1e979bec10ff960fae925e947aab95619a6fdb4c1d8ff3708ce3"}, + {file = "tokenizers-0.19.1-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:a179856d1caee06577220ebcfa332af046d576fb73454b8f4d4b0ba8324423ea"}, + {file = "tokenizers-0.19.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:952b80dac1a6492170f8c2429bd11fcaa14377e097d12a1dbe0ef2fb2241e16c"}, + {file = "tokenizers-0.19.1-cp312-none-win32.whl", hash = "sha256:01d62812454c188306755c94755465505836fd616f75067abcae529c35edeb57"}, + {file = "tokenizers-0.19.1-cp312-none-win_amd64.whl", hash = "sha256:b70bfbe3a82d3e3fb2a5e9b22a39f8d1740c96c68b6ace0086b39074f08ab89a"}, + {file = "tokenizers-0.19.1-cp37-cp37m-macosx_10_12_x86_64.whl", hash = "sha256:bb9dfe7dae85bc6119d705a76dc068c062b8b575abe3595e3c6276480e67e3f1"}, + {file = "tokenizers-0.19.1-cp37-cp37m-macosx_11_0_arm64.whl", hash = "sha256:1f0360cbea28ea99944ac089c00de7b2e3e1c58f479fb8613b6d8d511ce98267"}, + {file = "tokenizers-0.19.1-cp37-cp37m-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:71e3ec71f0e78780851fef28c2a9babe20270404c921b756d7c532d280349214"}, + {file = "tokenizers-0.19.1-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b82931fa619dbad979c0ee8e54dd5278acc418209cc897e42fac041f5366d626"}, + {file = "tokenizers-0.19.1-cp37-cp37m-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:e8ff5b90eabdcdaa19af697885f70fe0b714ce16709cf43d4952f1f85299e73a"}, + {file = "tokenizers-0.19.1-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e742d76ad84acbdb1a8e4694f915fe59ff6edc381c97d6dfdd054954e3478ad4"}, + {file = "tokenizers-0.19.1-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d8c5d59d7b59885eab559d5bc082b2985555a54cda04dda4c65528d90ad252ad"}, + {file = "tokenizers-0.19.1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6b2da5c32ed869bebd990c9420df49813709e953674c0722ff471a116d97b22d"}, + {file = "tokenizers-0.19.1-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:638e43936cc8b2cbb9f9d8dde0fe5e7e30766a3318d2342999ae27f68fdc9bd6"}, + {file = "tokenizers-0.19.1-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:78e769eb3b2c79687d9cb0f89ef77223e8e279b75c0a968e637ca7043a84463f"}, + {file = "tokenizers-0.19.1-cp37-none-win32.whl", hash = "sha256:72791f9bb1ca78e3ae525d4782e85272c63faaef9940d92142aa3eb79f3407a3"}, + {file = "tokenizers-0.19.1-cp37-none-win_amd64.whl", hash = "sha256:f3bbb7a0c5fcb692950b041ae11067ac54826204318922da754f908d95619fbc"}, + {file = "tokenizers-0.19.1-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:07f9295349bbbcedae8cefdbcfa7f686aa420be8aca5d4f7d1ae6016c128c0c5"}, + {file = "tokenizers-0.19.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:10a707cc6c4b6b183ec5dbfc5c34f3064e18cf62b4a938cb41699e33a99e03c1"}, + {file = "tokenizers-0.19.1-cp38-cp38-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:6309271f57b397aa0aff0cbbe632ca9d70430839ca3178bf0f06f825924eca22"}, + {file = "tokenizers-0.19.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4ad23d37d68cf00d54af184586d79b84075ada495e7c5c0f601f051b162112dc"}, + {file = "tokenizers-0.19.1-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:427c4f0f3df9109314d4f75b8d1f65d9477033e67ffaec4bca53293d3aca286d"}, + {file = "tokenizers-0.19.1-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e83a31c9cf181a0a3ef0abad2b5f6b43399faf5da7e696196ddd110d332519ee"}, + {file = "tokenizers-0.19.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c27b99889bd58b7e301468c0838c5ed75e60c66df0d4db80c08f43462f82e0d3"}, + {file = "tokenizers-0.19.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bac0b0eb952412b0b196ca7a40e7dce4ed6f6926489313414010f2e6b9ec2adf"}, + {file = "tokenizers-0.19.1-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:8a6298bde623725ca31c9035a04bf2ef63208d266acd2bed8c2cb7d2b7d53ce6"}, + {file = "tokenizers-0.19.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:08a44864e42fa6d7d76d7be4bec62c9982f6f6248b4aa42f7302aa01e0abfd26"}, + {file = "tokenizers-0.19.1-cp38-none-win32.whl", hash = "sha256:1de5bc8652252d9357a666e609cb1453d4f8e160eb1fb2830ee369dd658e8975"}, + {file = "tokenizers-0.19.1-cp38-none-win_amd64.whl", hash = "sha256:0bcce02bf1ad9882345b34d5bd25ed4949a480cf0e656bbd468f4d8986f7a3f1"}, + {file = "tokenizers-0.19.1-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:0b9394bd204842a2a1fd37fe29935353742be4a3460b6ccbaefa93f58a8df43d"}, + {file = "tokenizers-0.19.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:4692ab92f91b87769d950ca14dbb61f8a9ef36a62f94bad6c82cc84a51f76f6a"}, + {file = "tokenizers-0.19.1-cp39-cp39-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:6258c2ef6f06259f70a682491c78561d492e885adeaf9f64f5389f78aa49a051"}, + {file = "tokenizers-0.19.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c85cf76561fbd01e0d9ea2d1cbe711a65400092bc52b5242b16cfd22e51f0c58"}, + {file = "tokenizers-0.19.1-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:670b802d4d82bbbb832ddb0d41df7015b3e549714c0e77f9bed3e74d42400fbe"}, + {file = "tokenizers-0.19.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:85aa3ab4b03d5e99fdd31660872249df5e855334b6c333e0bc13032ff4469c4a"}, + {file = "tokenizers-0.19.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:cbf001afbbed111a79ca47d75941e9e5361297a87d186cbfc11ed45e30b5daba"}, + {file = "tokenizers-0.19.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b4c89aa46c269e4e70c4d4f9d6bc644fcc39bb409cb2a81227923404dd6f5227"}, + {file = "tokenizers-0.19.1-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:39c1ec76ea1027438fafe16ecb0fb84795e62e9d643444c1090179e63808c69d"}, + {file = "tokenizers-0.19.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:c2a0d47a89b48d7daa241e004e71fb5a50533718897a4cd6235cb846d511a478"}, + {file = "tokenizers-0.19.1-cp39-none-win32.whl", hash = "sha256:61b7fe8886f2e104d4caf9218b157b106207e0f2a4905c9c7ac98890688aabeb"}, + {file = "tokenizers-0.19.1-cp39-none-win_amd64.whl", hash = "sha256:f97660f6c43efd3e0bfd3f2e3e5615bf215680bad6ee3d469df6454b8c6e8256"}, + {file = "tokenizers-0.19.1-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:3b11853f17b54c2fe47742c56d8a33bf49ce31caf531e87ac0d7d13d327c9334"}, + {file = "tokenizers-0.19.1-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:d26194ef6c13302f446d39972aaa36a1dda6450bc8949f5eb4c27f51191375bd"}, + {file = "tokenizers-0.19.1-pp310-pypy310_pp73-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:e8d1ed93beda54bbd6131a2cb363a576eac746d5c26ba5b7556bc6f964425594"}, + {file = "tokenizers-0.19.1-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ca407133536f19bdec44b3da117ef0d12e43f6d4b56ac4c765f37eca501c7bda"}, + {file = "tokenizers-0.19.1-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ce05fde79d2bc2e46ac08aacbc142bead21614d937aac950be88dc79f9db9022"}, + {file = "tokenizers-0.19.1-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:35583cd46d16f07c054efd18b5d46af4a2f070a2dd0a47914e66f3ff5efb2b1e"}, + {file = "tokenizers-0.19.1-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:43350270bfc16b06ad3f6f07eab21f089adb835544417afda0f83256a8bf8b75"}, + {file = "tokenizers-0.19.1-pp37-pypy37_pp73-macosx_10_12_x86_64.whl", hash = "sha256:b4399b59d1af5645bcee2072a463318114c39b8547437a7c2d6a186a1b5a0e2d"}, + {file = "tokenizers-0.19.1-pp37-pypy37_pp73-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:6852c5b2a853b8b0ddc5993cd4f33bfffdca4fcc5d52f89dd4b8eada99379285"}, + {file = "tokenizers-0.19.1-pp37-pypy37_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bcd266ae85c3d39df2f7e7d0e07f6c41a55e9a3123bb11f854412952deacd828"}, + {file = "tokenizers-0.19.1-pp37-pypy37_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ecb2651956eea2aa0a2d099434134b1b68f1c31f9a5084d6d53f08ed43d45ff2"}, + {file = "tokenizers-0.19.1-pp37-pypy37_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:b279ab506ec4445166ac476fb4d3cc383accde1ea152998509a94d82547c8e2a"}, + {file = "tokenizers-0.19.1-pp37-pypy37_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:89183e55fb86e61d848ff83753f64cded119f5d6e1f553d14ffee3700d0a4a49"}, + {file = "tokenizers-0.19.1-pp38-pypy38_pp73-macosx_10_12_x86_64.whl", hash = "sha256:b2edbc75744235eea94d595a8b70fe279dd42f3296f76d5a86dde1d46e35f574"}, + {file = "tokenizers-0.19.1-pp38-pypy38_pp73-macosx_11_0_arm64.whl", hash = "sha256:0e64bfde9a723274e9a71630c3e9494ed7b4c0f76a1faacf7fe294cd26f7ae7c"}, + {file = "tokenizers-0.19.1-pp38-pypy38_pp73-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:0b5ca92bfa717759c052e345770792d02d1f43b06f9e790ca0a1db62838816f3"}, + {file = "tokenizers-0.19.1-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6f8a20266e695ec9d7a946a019c1d5ca4eddb6613d4f466888eee04f16eedb85"}, + {file = "tokenizers-0.19.1-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:63c38f45d8f2a2ec0f3a20073cccb335b9f99f73b3c69483cd52ebc75369d8a1"}, + {file = "tokenizers-0.19.1-pp38-pypy38_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:dd26e3afe8a7b61422df3176e06664503d3f5973b94f45d5c45987e1cb711876"}, + {file = "tokenizers-0.19.1-pp38-pypy38_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:eddd5783a4a6309ce23432353cdb36220e25cbb779bfa9122320666508b44b88"}, + {file = "tokenizers-0.19.1-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:56ae39d4036b753994476a1b935584071093b55c7a72e3b8288e68c313ca26e7"}, + {file = "tokenizers-0.19.1-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:f9939ca7e58c2758c01b40324a59c034ce0cebad18e0d4563a9b1beab3018243"}, + {file = "tokenizers-0.19.1-pp39-pypy39_pp73-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:6c330c0eb815d212893c67a032e9dc1b38a803eccb32f3e8172c19cc69fbb439"}, + {file = "tokenizers-0.19.1-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ec11802450a2487cdf0e634b750a04cbdc1c4d066b97d94ce7dd2cb51ebb325b"}, + {file = "tokenizers-0.19.1-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a2b718f316b596f36e1dae097a7d5b91fc5b85e90bf08b01ff139bd8953b25af"}, + {file = "tokenizers-0.19.1-pp39-pypy39_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:ed69af290c2b65169f0ba9034d1dc39a5db9459b32f1dd8b5f3f32a3fcf06eab"}, + {file = "tokenizers-0.19.1-pp39-pypy39_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:f8a9c828277133af13f3859d1b6bf1c3cb6e9e1637df0e45312e6b7c2e622b1f"}, + {file = "tokenizers-0.19.1.tar.gz", hash = "sha256:ee59e6680ed0fdbe6b724cf38bd70400a0c1dd623b07ac729087270caeac88e3"}, +] + +[package.dependencies] +huggingface-hub = ">=0.16.4,<1.0" + +[package.extras] +dev = ["tokenizers[testing]"] +docs = ["setuptools-rust", "sphinx", "sphinx-rtd-theme"] +testing = ["black (==22.3)", "datasets", "numpy", "pytest", "requests", "ruff"] + +[[package]] +name = "torch" +version = "2.3.0" +description = "Tensors and Dynamic neural networks in Python with strong GPU acceleration" +optional = false +python-versions = ">=3.8.0" +files = [ + {file = "torch-2.3.0-cp310-cp310-manylinux1_x86_64.whl", hash = "sha256:d8ea5a465dbfd8501f33c937d1f693176c9aef9d1c1b0ca1d44ed7b0a18c52ac"}, + {file = "torch-2.3.0-cp310-cp310-manylinux2014_aarch64.whl", hash = "sha256:09c81c5859a5b819956c6925a405ef1cdda393c9d8a01ce3851453f699d3358c"}, + {file = "torch-2.3.0-cp310-cp310-win_amd64.whl", hash = "sha256:1bf023aa20902586f614f7682fedfa463e773e26c58820b74158a72470259459"}, + {file = "torch-2.3.0-cp310-none-macosx_11_0_arm64.whl", hash = "sha256:758ef938de87a2653bba74b91f703458c15569f1562bf4b6c63c62d9c5a0c1f5"}, + {file = "torch-2.3.0-cp311-cp311-manylinux1_x86_64.whl", hash = "sha256:493d54ee2f9df100b5ce1d18c96dbb8d14908721f76351e908c9d2622773a788"}, + {file = "torch-2.3.0-cp311-cp311-manylinux2014_aarch64.whl", hash = "sha256:bce43af735c3da16cc14c7de2be7ad038e2fbf75654c2e274e575c6c05772ace"}, + {file = "torch-2.3.0-cp311-cp311-win_amd64.whl", hash = "sha256:729804e97b7cf19ae9ab4181f91f5e612af07956f35c8b2c8e9d9f3596a8e877"}, + {file = "torch-2.3.0-cp311-none-macosx_11_0_arm64.whl", hash = "sha256:d24e328226d8e2af7cf80fcb1d2f1d108e0de32777fab4aaa2b37b9765d8be73"}, + {file = "torch-2.3.0-cp312-cp312-manylinux1_x86_64.whl", hash = "sha256:b0de2bdc0486ea7b14fc47ff805172df44e421a7318b7c4d92ef589a75d27410"}, + {file = "torch-2.3.0-cp312-cp312-manylinux2014_aarch64.whl", hash = "sha256:a306c87a3eead1ed47457822c01dfbd459fe2920f2d38cbdf90de18f23f72542"}, + {file = "torch-2.3.0-cp312-cp312-win_amd64.whl", hash = "sha256:f9b98bf1a3c8af2d4c41f0bf1433920900896c446d1ddc128290ff146d1eb4bd"}, + {file = "torch-2.3.0-cp312-none-macosx_11_0_arm64.whl", hash = "sha256:dca986214267b34065a79000cee54232e62b41dff1ec2cab9abc3fc8b3dee0ad"}, + {file = "torch-2.3.0-cp38-cp38-manylinux1_x86_64.whl", hash = "sha256:20572f426965dd8a04e92a473d7e445fa579e09943cc0354f3e6fef6130ce061"}, + {file = "torch-2.3.0-cp38-cp38-manylinux2014_aarch64.whl", hash = "sha256:e65ba85ae292909cde0dde6369826d51165a3fc8823dc1854cd9432d7f79b932"}, + {file = "torch-2.3.0-cp38-cp38-win_amd64.whl", hash = "sha256:5515503a193781fd1b3f5c474e89c9dfa2faaa782b2795cc4a7ab7e67de923f6"}, + {file = "torch-2.3.0-cp38-none-macosx_11_0_arm64.whl", hash = "sha256:6ae9f64b09516baa4ef890af0672dc981c20b1f0d829ce115d4420a247e88fba"}, + {file = "torch-2.3.0-cp39-cp39-manylinux1_x86_64.whl", hash = "sha256:cd0dc498b961ab19cb3f8dbf0c6c50e244f2f37dbfa05754ab44ea057c944ef9"}, + {file = "torch-2.3.0-cp39-cp39-manylinux2014_aarch64.whl", hash = "sha256:e05f836559251e4096f3786ee99f4a8cbe67bc7fbedba8ad5e799681e47c5e80"}, + {file = "torch-2.3.0-cp39-cp39-win_amd64.whl", hash = "sha256:4fb27b35dbb32303c2927da86e27b54a92209ddfb7234afb1949ea2b3effffea"}, + {file = "torch-2.3.0-cp39-none-macosx_11_0_arm64.whl", hash = "sha256:760f8bedff506ce9e6e103498f9b1e9e15809e008368594c3a66bf74a8a51380"}, +] + +[package.dependencies] +filelock = "*" +fsspec = "*" +jinja2 = "*" +mkl = {version = ">=2021.1.1,<=2021.4.0", markers = "platform_system == \"Windows\""} +networkx = "*" +nvidia-cublas-cu12 = {version = "12.1.3.1", markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\""} +nvidia-cuda-cupti-cu12 = {version = "12.1.105", markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\""} +nvidia-cuda-nvrtc-cu12 = {version = "12.1.105", markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\""} +nvidia-cuda-runtime-cu12 = {version = "12.1.105", markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\""} +nvidia-cudnn-cu12 = {version = "8.9.2.26", markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\""} +nvidia-cufft-cu12 = {version = "11.0.2.54", markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\""} +nvidia-curand-cu12 = {version = "10.3.2.106", markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\""} +nvidia-cusolver-cu12 = {version = "11.4.5.107", markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\""} +nvidia-cusparse-cu12 = {version = "12.1.0.106", markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\""} +nvidia-nccl-cu12 = {version = "2.20.5", markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\""} +nvidia-nvtx-cu12 = {version = "12.1.105", markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\""} +sympy = "*" +triton = {version = "2.3.0", markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\" and python_version < \"3.12\""} +typing-extensions = ">=4.8.0" + +[package.extras] +opt-einsum = ["opt-einsum (>=3.3)"] +optree = ["optree (>=0.9.1)"] + +[[package]] +name = "tornado" +version = "6.4.1" +description = "Tornado is a Python web framework and asynchronous networking library, originally developed at FriendFeed." +optional = false +python-versions = ">=3.8" +files = [ + {file = "tornado-6.4.1-cp38-abi3-macosx_10_9_universal2.whl", hash = "sha256:163b0aafc8e23d8cdc3c9dfb24c5368af84a81e3364745ccb4427669bf84aec8"}, + {file = "tornado-6.4.1-cp38-abi3-macosx_10_9_x86_64.whl", hash = "sha256:6d5ce3437e18a2b66fbadb183c1d3364fb03f2be71299e7d10dbeeb69f4b2a14"}, + {file = "tornado-6.4.1-cp38-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e2e20b9113cd7293f164dc46fffb13535266e713cdb87bd2d15ddb336e96cfc4"}, + {file = "tornado-6.4.1-cp38-abi3-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:8ae50a504a740365267b2a8d1a90c9fbc86b780a39170feca9bcc1787ff80842"}, + {file = "tornado-6.4.1-cp38-abi3-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:613bf4ddf5c7a95509218b149b555621497a6cc0d46ac341b30bd9ec19eac7f3"}, + {file = "tornado-6.4.1-cp38-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:25486eb223babe3eed4b8aecbac33b37e3dd6d776bc730ca14e1bf93888b979f"}, + {file = "tornado-6.4.1-cp38-abi3-musllinux_1_2_i686.whl", hash = "sha256:454db8a7ecfcf2ff6042dde58404164d969b6f5d58b926da15e6b23817950fc4"}, + {file = "tornado-6.4.1-cp38-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:a02a08cc7a9314b006f653ce40483b9b3c12cda222d6a46d4ac63bb6c9057698"}, + {file = "tornado-6.4.1-cp38-abi3-win32.whl", hash = "sha256:d9a566c40b89757c9aa8e6f032bcdb8ca8795d7c1a9762910c722b1635c9de4d"}, + {file = "tornado-6.4.1-cp38-abi3-win_amd64.whl", hash = "sha256:b24b8982ed444378d7f21d563f4180a2de31ced9d8d84443907a0a64da2072e7"}, + {file = "tornado-6.4.1.tar.gz", hash = "sha256:92d3ab53183d8c50f8204a51e6f91d18a15d5ef261e84d452800d4ff6fc504e9"}, +] + +[[package]] +name = "tqdm" +version = "4.67.0" +description = "Fast, Extensible Progress Meter" +optional = false +python-versions = ">=3.7" +files = [ + {file = "tqdm-4.67.0-py3-none-any.whl", hash = "sha256:0cd8af9d56911acab92182e88d763100d4788bdf421d251616040cc4d44863be"}, + {file = "tqdm-4.67.0.tar.gz", hash = "sha256:fe5a6f95e6fe0b9755e9469b77b9c3cf850048224ecaa8293d7d2d31f97d869a"}, +] + +[package.dependencies] +colorama = {version = "*", markers = "platform_system == \"Windows\""} + +[package.extras] +dev = ["pytest (>=6)", "pytest-cov", "pytest-timeout", "pytest-xdist"] +discord = ["requests"] +notebook = ["ipywidgets (>=6)"] +slack = ["slack-sdk"] +telegram = ["requests"] + +[[package]] +name = "traitlets" +version = "5.14.3" +description = "Traitlets Python configuration system" +optional = false +python-versions = ">=3.8" +files = [ + {file = "traitlets-5.14.3-py3-none-any.whl", hash = "sha256:b74e89e397b1ed28cc831db7aea759ba6640cb3de13090ca145426688ff1ac4f"}, + {file = "traitlets-5.14.3.tar.gz", hash = "sha256:9ed0579d3502c94b4b3732ac120375cda96f923114522847de4b3bb98b96b6b7"}, +] + +[package.extras] +docs = ["myst-parser", "pydata-sphinx-theme", "sphinx"] +test = ["argcomplete (>=3.0.3)", "mypy (>=1.7.0)", "pre-commit", "pytest (>=7.0,<8.2)", "pytest-mock", "pytest-mypy-testing"] + +[[package]] +name = "transformers" +version = "4.41.1" +description = "State-of-the-art Machine Learning for JAX, PyTorch and TensorFlow" +optional = false +python-versions = ">=3.8.0" +files = [ + {file = "transformers-4.41.1-py3-none-any.whl", hash = "sha256:f0680e0b1a01067eccd11f62f0522409422c7d6f91d532fe0f50b136a406129d"}, + {file = "transformers-4.41.1.tar.gz", hash = "sha256:fa859e4c66f0896633a3bf534e0d9a29a9a88478a49f94c5d8270537dc61cc42"}, +] + +[package.dependencies] +filelock = "*" +huggingface-hub = ">=0.23.0,<1.0" +numpy = ">=1.17" +packaging = ">=20.0" +pyyaml = ">=5.1" +regex = "!=2019.12.17" +requests = "*" +safetensors = ">=0.4.1" +tokenizers = ">=0.19,<0.20" +tqdm = ">=4.27" + +[package.extras] +accelerate = ["accelerate (>=0.21.0)"] +agents = ["Pillow (>=10.0.1,<=15.0)", "accelerate (>=0.21.0)", "datasets (!=2.5.0)", "diffusers", "opencv-python", "sentencepiece (>=0.1.91,!=0.1.92)", "torch"] +all = ["Pillow (>=10.0.1,<=15.0)", "accelerate (>=0.21.0)", "av (==9.2.0)", "codecarbon (==1.2.0)", "decord (==0.6.0)", "flax (>=0.4.1,<=0.7.0)", "jax (>=0.4.1,<=0.4.13)", "jaxlib (>=0.4.1,<=0.4.13)", "kenlm", "keras-nlp (>=0.3.1)", "librosa", "onnxconverter-common", "optax (>=0.0.8,<=0.1.4)", "optuna", "phonemizer", "protobuf", "pyctcdecode (>=0.4.0)", "ray[tune] (>=2.7.0)", "scipy (<1.13.0)", "sentencepiece (>=0.1.91,!=0.1.92)", "sigopt", "tensorflow (>2.9,<2.16)", "tensorflow-text (<2.16)", "tf2onnx", "timm", "tokenizers (>=0.19,<0.20)", "torch", "torchaudio", "torchvision"] +audio = ["kenlm", "librosa", "phonemizer", "pyctcdecode (>=0.4.0)"] +codecarbon = ["codecarbon (==1.2.0)"] +deepspeed = ["accelerate (>=0.21.0)", "deepspeed (>=0.9.3)"] +deepspeed-testing = ["GitPython (<3.1.19)", "accelerate (>=0.21.0)", "beautifulsoup4", "cookiecutter (==1.7.3)", "datasets (!=2.5.0)", "deepspeed (>=0.9.3)", "dill (<0.3.5)", "evaluate (>=0.2.0)", "faiss-cpu", "nltk", "optuna", "parameterized", "protobuf", "psutil", "pydantic", "pytest (>=7.2.0,<8.0.0)", "pytest-rich", "pytest-timeout", "pytest-xdist", "rjieba", "rouge-score (!=0.0.7,!=0.0.8,!=0.1,!=0.1.1)", "ruff (==0.1.5)", "sacrebleu (>=1.4.12,<2.0.0)", "sacremoses", "sentencepiece (>=0.1.91,!=0.1.92)", "tensorboard", "timeout-decorator"] +dev = ["GitPython (<3.1.19)", "Pillow (>=10.0.1,<=15.0)", "accelerate (>=0.21.0)", "av (==9.2.0)", "beautifulsoup4", "codecarbon (==1.2.0)", "cookiecutter (==1.7.3)", "datasets (!=2.5.0)", "decord (==0.6.0)", "dill (<0.3.5)", "evaluate (>=0.2.0)", "faiss-cpu", "flax (>=0.4.1,<=0.7.0)", "fugashi (>=1.0)", "ipadic (>=1.0.0,<2.0)", "isort (>=5.5.4)", "jax (>=0.4.1,<=0.4.13)", "jaxlib (>=0.4.1,<=0.4.13)", "kenlm", "keras-nlp (>=0.3.1)", "librosa", "nltk", "onnxconverter-common", "optax (>=0.0.8,<=0.1.4)", "optuna", "parameterized", "phonemizer", "protobuf", "psutil", "pyctcdecode (>=0.4.0)", "pydantic", "pytest (>=7.2.0,<8.0.0)", "pytest-rich", "pytest-timeout", "pytest-xdist", "ray[tune] (>=2.7.0)", "rhoknp (>=1.1.0,<1.3.1)", "rjieba", "rouge-score (!=0.0.7,!=0.0.8,!=0.1,!=0.1.1)", "ruff (==0.1.5)", "sacrebleu (>=1.4.12,<2.0.0)", "sacremoses", "scikit-learn", "scipy (<1.13.0)", "sentencepiece (>=0.1.91,!=0.1.92)", "sigopt", "sudachidict-core (>=20220729)", "sudachipy (>=0.6.6)", "tensorboard", "tensorflow (>2.9,<2.16)", "tensorflow-text (<2.16)", "tf2onnx", "timeout-decorator", "timm", "tokenizers (>=0.19,<0.20)", "torch", "torchaudio", "torchvision", "unidic (>=1.0.2)", "unidic-lite (>=1.0.7)", "urllib3 (<2.0.0)"] +dev-tensorflow = ["GitPython (<3.1.19)", "Pillow (>=10.0.1,<=15.0)", "beautifulsoup4", "cookiecutter (==1.7.3)", "datasets (!=2.5.0)", "dill (<0.3.5)", "evaluate (>=0.2.0)", "faiss-cpu", "isort (>=5.5.4)", "kenlm", "keras-nlp (>=0.3.1)", "librosa", "nltk", "onnxconverter-common", "onnxruntime (>=1.4.0)", "onnxruntime-tools (>=1.4.2)", "parameterized", "phonemizer", "protobuf", "psutil", "pyctcdecode (>=0.4.0)", "pydantic", "pytest (>=7.2.0,<8.0.0)", "pytest-rich", "pytest-timeout", "pytest-xdist", "rjieba", "rouge-score (!=0.0.7,!=0.0.8,!=0.1,!=0.1.1)", "ruff (==0.1.5)", "sacrebleu (>=1.4.12,<2.0.0)", "sacremoses", "scikit-learn", "sentencepiece (>=0.1.91,!=0.1.92)", "tensorboard", "tensorflow (>2.9,<2.16)", "tensorflow-text (<2.16)", "tf2onnx", "timeout-decorator", "tokenizers (>=0.19,<0.20)", "urllib3 (<2.0.0)"] +dev-torch = ["GitPython (<3.1.19)", "Pillow (>=10.0.1,<=15.0)", "accelerate (>=0.21.0)", "beautifulsoup4", "codecarbon (==1.2.0)", "cookiecutter (==1.7.3)", "datasets (!=2.5.0)", "dill (<0.3.5)", "evaluate (>=0.2.0)", "faiss-cpu", "fugashi (>=1.0)", "ipadic (>=1.0.0,<2.0)", "isort (>=5.5.4)", "kenlm", "librosa", "nltk", "onnxruntime (>=1.4.0)", "onnxruntime-tools (>=1.4.2)", "optuna", "parameterized", "phonemizer", "protobuf", "psutil", "pyctcdecode (>=0.4.0)", "pydantic", "pytest (>=7.2.0,<8.0.0)", "pytest-rich", "pytest-timeout", "pytest-xdist", "ray[tune] (>=2.7.0)", "rhoknp (>=1.1.0,<1.3.1)", "rjieba", "rouge-score (!=0.0.7,!=0.0.8,!=0.1,!=0.1.1)", "ruff (==0.1.5)", "sacrebleu (>=1.4.12,<2.0.0)", "sacremoses", "scikit-learn", "sentencepiece (>=0.1.91,!=0.1.92)", "sigopt", "sudachidict-core (>=20220729)", "sudachipy (>=0.6.6)", "tensorboard", "timeout-decorator", "timm", "tokenizers (>=0.19,<0.20)", "torch", "torchaudio", "torchvision", "unidic (>=1.0.2)", "unidic-lite (>=1.0.7)", "urllib3 (<2.0.0)"] +flax = ["flax (>=0.4.1,<=0.7.0)", "jax (>=0.4.1,<=0.4.13)", "jaxlib (>=0.4.1,<=0.4.13)", "optax (>=0.0.8,<=0.1.4)", "scipy (<1.13.0)"] +flax-speech = ["kenlm", "librosa", "phonemizer", "pyctcdecode (>=0.4.0)"] +ftfy = ["ftfy"] +integrations = ["optuna", "ray[tune] (>=2.7.0)", "sigopt"] +ja = ["fugashi (>=1.0)", "ipadic (>=1.0.0,<2.0)", "rhoknp (>=1.1.0,<1.3.1)", "sudachidict-core (>=20220729)", "sudachipy (>=0.6.6)", "unidic (>=1.0.2)", "unidic-lite (>=1.0.7)"] +modelcreation = ["cookiecutter (==1.7.3)"] +natten = ["natten (>=0.14.6,<0.15.0)"] +onnx = ["onnxconverter-common", "onnxruntime (>=1.4.0)", "onnxruntime-tools (>=1.4.2)", "tf2onnx"] +onnxruntime = ["onnxruntime (>=1.4.0)", "onnxruntime-tools (>=1.4.2)"] +optuna = ["optuna"] +quality = ["GitPython (<3.1.19)", "datasets (!=2.5.0)", "isort (>=5.5.4)", "ruff (==0.1.5)", "urllib3 (<2.0.0)"] +ray = ["ray[tune] (>=2.7.0)"] +retrieval = ["datasets (!=2.5.0)", "faiss-cpu"] +sagemaker = ["sagemaker (>=2.31.0)"] +sentencepiece = ["protobuf", "sentencepiece (>=0.1.91,!=0.1.92)"] +serving = ["fastapi", "pydantic", "starlette", "uvicorn"] +sigopt = ["sigopt"] +sklearn = ["scikit-learn"] +speech = ["kenlm", "librosa", "phonemizer", "pyctcdecode (>=0.4.0)", "torchaudio"] +testing = ["GitPython (<3.1.19)", "beautifulsoup4", "cookiecutter (==1.7.3)", "datasets (!=2.5.0)", "dill (<0.3.5)", "evaluate (>=0.2.0)", "faiss-cpu", "nltk", "parameterized", "psutil", "pydantic", "pytest (>=7.2.0,<8.0.0)", "pytest-rich", "pytest-timeout", "pytest-xdist", "rjieba", "rouge-score (!=0.0.7,!=0.0.8,!=0.1,!=0.1.1)", "ruff (==0.1.5)", "sacrebleu (>=1.4.12,<2.0.0)", "sacremoses", "sentencepiece (>=0.1.91,!=0.1.92)", "tensorboard", "timeout-decorator"] +tf = ["keras-nlp (>=0.3.1)", "onnxconverter-common", "tensorflow (>2.9,<2.16)", "tensorflow-text (<2.16)", "tf2onnx"] +tf-cpu = ["keras (>2.9,<2.16)", "keras-nlp (>=0.3.1)", "onnxconverter-common", "tensorflow-cpu (>2.9,<2.16)", "tensorflow-probability (<2.16)", "tensorflow-text (<2.16)", "tf2onnx"] +tf-speech = ["kenlm", "librosa", "phonemizer", "pyctcdecode (>=0.4.0)"] +timm = ["timm"] +tokenizers = ["tokenizers (>=0.19,<0.20)"] +torch = ["accelerate (>=0.21.0)", "torch"] +torch-speech = ["kenlm", "librosa", "phonemizer", "pyctcdecode (>=0.4.0)", "torchaudio"] +torch-vision = ["Pillow (>=10.0.1,<=15.0)", "torchvision"] +torchhub = ["filelock", "huggingface-hub (>=0.23.0,<1.0)", "importlib-metadata", "numpy (>=1.17)", "packaging (>=20.0)", "protobuf", "regex (!=2019.12.17)", "requests", "sentencepiece (>=0.1.91,!=0.1.92)", "tokenizers (>=0.19,<0.20)", "torch", "tqdm (>=4.27)"] +video = ["av (==9.2.0)", "decord (==0.6.0)"] +vision = ["Pillow (>=10.0.1,<=15.0)"] + +[[package]] +name = "triton" +version = "2.3.0" +description = "A language and compiler for custom Deep Learning operations" +optional = false +python-versions = "*" +files = [ + {file = "triton-2.3.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5ce4b8ff70c48e47274c66f269cce8861cf1dc347ceeb7a67414ca151b1822d8"}, + {file = "triton-2.3.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3c3d9607f85103afdb279938fc1dd2a66e4f5999a58eb48a346bd42738f986dd"}, + {file = "triton-2.3.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:218d742e67480d9581bafb73ed598416cc8a56f6316152e5562ee65e33de01c0"}, + {file = "triton-2.3.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:381ec6b3dac06922d3e4099cfc943ef032893b25415de295e82b1a82b0359d2c"}, + {file = "triton-2.3.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:038e06a09c06a164fef9c48de3af1e13a63dc1ba3c792871e61a8e79720ea440"}, + {file = "triton-2.3.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6d8f636e0341ac348899a47a057c3daea99ea7db31528a225a3ba4ded28ccc65"}, +] + +[package.dependencies] +filelock = "*" + +[package.extras] +build = ["cmake (>=3.20)", "lit"] +tests = ["autopep8", "flake8", "isort", "numpy", "pytest", "scipy (>=1.7.1)", "torch"] +tutorials = ["matplotlib", "pandas", "tabulate", "torch"] + +[[package]] +name = "trove-classifiers" +version = "2024.10.21.16" +description = "Canonical source for classifiers on PyPI (pypi.org)." +optional = false +python-versions = "*" +files = [ + {file = "trove_classifiers-2024.10.21.16-py3-none-any.whl", hash = "sha256:0fb11f1e995a757807a8ef1c03829fbd4998d817319abcef1f33165750f103be"}, + {file = "trove_classifiers-2024.10.21.16.tar.gz", hash = "sha256:17cbd055d67d5e9d9de63293a8732943fabc21574e4c7b74edf112b4928cf5f3"}, +] + +[[package]] +name = "typing-extensions" +version = "4.12.2" +description = "Backported and Experimental Type Hints for Python 3.8+" +optional = false +python-versions = ">=3.8" +files = [ + {file = "typing_extensions-4.12.2-py3-none-any.whl", hash = "sha256:04e5ca0351e0f3f85c6853954072df659d0d13fac324d0072316b67d7794700d"}, + {file = "typing_extensions-4.12.2.tar.gz", hash = "sha256:1a7ead55c7e559dd4dee8856e3a88b41225abfe1ce8df57b7c13915fe121ffb8"}, +] + +[[package]] +name = "tzdata" +version = "2024.2" +description = "Provider of IANA time zone data" +optional = false +python-versions = ">=2" +files = [ + {file = "tzdata-2024.2-py2.py3-none-any.whl", hash = "sha256:a48093786cdcde33cad18c2555e8532f34422074448fbc874186f0abd79565cd"}, + {file = "tzdata-2024.2.tar.gz", hash = "sha256:7d85cc416e9382e69095b7bdf4afd9e3880418a2413feec7069d533d6b4e31cc"}, +] + +[[package]] +name = "unitycatalog-ai" +version = "0.1.0" +description = "Official Python library for Unity Catalog AI support" +optional = false +python-versions = ">=3.9" +files = [] +develop = false + +[package.dependencies] +pydantic = "*" +typing-extensions = "*" + +[package.extras] +databricks = ["databricks-connect (==15.1.0)", "databricks-sdk (>=0.32.0)", "pandas"] +databricks-dev = ["databricks-connect (==15.1.0)", "databricks-sdk (>=0.32.0)", "hatch", "pandas", "pytest", "ruff (==0.6.4)"] +dev = ["databricks-connect", "databricks-sdk (>=0.32.0)", "hatch", "pandas", "pytest", "ruff (==0.6.4)"] + +[package.source] +type = "git" +url = "https://github.com/unitycatalog/unitycatalog.git" +reference = "HEAD" +resolved_reference = "11bff8c95c8888f5bb2a8cf251a60757dfdbc7c6" +subdirectory = "ai/core" + +[[package]] +name = "unitycatalog-openai" +version = "0.1.0" +description = "Support for Unity Catalog functions as OpenAI tools" +optional = false +python-versions = ">=3.9" +files = [] +develop = false + +[package.dependencies] +openai = ">=1.46.1" +pydantic = ">=2,<3" +unitycatalog-ai = "*" + +[package.extras] +dev = ["databricks-connect", "databricks-sdk (>=0.32.0)", "pytest", "ruff (==0.6.4)", "typing-extensions"] + +[package.source] +type = "git" +url = "https://github.com/unitycatalog/unitycatalog.git" +reference = "HEAD" +resolved_reference = "11bff8c95c8888f5bb2a8cf251a60757dfdbc7c6" +subdirectory = "ai/integrations/openai" + +[[package]] +name = "urllib3" +version = "2.2.3" +description = "HTTP library with thread-safe connection pooling, file post, and more." +optional = false +python-versions = ">=3.8" +files = [ + {file = "urllib3-2.2.3-py3-none-any.whl", hash = "sha256:ca899ca043dcb1bafa3e262d73aa25c465bfb49e0bd9dd5d59f1d0acba2f8fac"}, + {file = "urllib3-2.2.3.tar.gz", hash = "sha256:e7d814a81dad81e6caf2ec9fdedb284ecc9c73076b62654547cc64ccdcae26e9"}, +] + +[package.extras] +brotli = ["brotli (>=1.0.9)", "brotlicffi (>=0.8.0)"] +h2 = ["h2 (>=4,<5)"] +socks = ["pysocks (>=1.5.6,!=1.5.7,<2.0)"] +zstd = ["zstandard (>=0.18.0)"] + +[[package]] +name = "waitress" +version = "3.0.2" +description = "Waitress WSGI server" +optional = false +python-versions = ">=3.9.0" +files = [ + {file = "waitress-3.0.2-py3-none-any.whl", hash = "sha256:c56d67fd6e87c2ee598b76abdd4e96cfad1f24cacdea5078d382b1f9d7b5ed2e"}, + {file = "waitress-3.0.2.tar.gz", hash = "sha256:682aaaf2af0c44ada4abfb70ded36393f0e307f4ab9456a215ce0020baefc31f"}, +] + +[package.extras] +docs = ["Sphinx (>=1.8.1)", "docutils", "pylons-sphinx-themes (>=1.0.9)"] +testing = ["coverage (>=7.6.0)", "pytest", "pytest-cov"] + +[[package]] +name = "wcwidth" +version = "0.2.13" +description = "Measures the displayed width of unicode strings in a terminal" +optional = false +python-versions = "*" +files = [ + {file = "wcwidth-0.2.13-py2.py3-none-any.whl", hash = "sha256:3da69048e4540d84af32131829ff948f1e022c1c6bdb8d6102117aac784f6859"}, + {file = "wcwidth-0.2.13.tar.gz", hash = "sha256:72ea0c06399eb286d978fdedb6923a9eb47e1c486ce63e9b4e64fc18303972b5"}, +] + +[[package]] +name = "werkzeug" +version = "3.1.3" +description = "The comprehensive WSGI web application library." +optional = false +python-versions = ">=3.9" +files = [ + {file = "werkzeug-3.1.3-py3-none-any.whl", hash = "sha256:54b78bf3716d19a65be4fceccc0d1d7b89e608834989dfae50ea87564639213e"}, + {file = "werkzeug-3.1.3.tar.gz", hash = "sha256:60723ce945c19328679790e3282cc758aa4a6040e4bb330f53d30fa546d44746"}, +] + +[package.dependencies] +MarkupSafe = ">=2.1.1" + +[package.extras] +watchdog = ["watchdog (>=2.3)"] + +[[package]] +name = "widgetsnbextension" +version = "4.0.13" +description = "Jupyter interactive widgets for Jupyter Notebook" +optional = false +python-versions = ">=3.7" +files = [ + {file = "widgetsnbextension-4.0.13-py3-none-any.whl", hash = "sha256:74b2692e8500525cc38c2b877236ba51d34541e6385eeed5aec15a70f88a6c71"}, + {file = "widgetsnbextension-4.0.13.tar.gz", hash = "sha256:ffcb67bc9febd10234a362795f643927f4e0c05d9342c727b65d2384f8feacb6"}, +] + +[[package]] +name = "wrapt" +version = "1.16.0" +description = "Module for decorators, wrappers and monkey patching." +optional = false +python-versions = ">=3.6" +files = [ + {file = "wrapt-1.16.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:ffa565331890b90056c01db69c0fe634a776f8019c143a5ae265f9c6bc4bd6d4"}, + {file = "wrapt-1.16.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:e4fdb9275308292e880dcbeb12546df7f3e0f96c6b41197e0cf37d2826359020"}, + {file = "wrapt-1.16.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bb2dee3874a500de01c93d5c71415fcaef1d858370d405824783e7a8ef5db440"}, + {file = "wrapt-1.16.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2a88e6010048489cda82b1326889ec075a8c856c2e6a256072b28eaee3ccf487"}, + {file = "wrapt-1.16.0-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ac83a914ebaf589b69f7d0a1277602ff494e21f4c2f743313414378f8f50a4cf"}, + {file = "wrapt-1.16.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:73aa7d98215d39b8455f103de64391cb79dfcad601701a3aa0dddacf74911d72"}, + {file = "wrapt-1.16.0-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:807cc8543a477ab7422f1120a217054f958a66ef7314f76dd9e77d3f02cdccd0"}, + {file = "wrapt-1.16.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:bf5703fdeb350e36885f2875d853ce13172ae281c56e509f4e6eca049bdfb136"}, + {file = "wrapt-1.16.0-cp310-cp310-win32.whl", hash = "sha256:f6b2d0c6703c988d334f297aa5df18c45e97b0af3679bb75059e0e0bd8b1069d"}, + {file = "wrapt-1.16.0-cp310-cp310-win_amd64.whl", hash = "sha256:decbfa2f618fa8ed81c95ee18a387ff973143c656ef800c9f24fb7e9c16054e2"}, + {file = "wrapt-1.16.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:1a5db485fe2de4403f13fafdc231b0dbae5eca4359232d2efc79025527375b09"}, + {file = "wrapt-1.16.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:75ea7d0ee2a15733684badb16de6794894ed9c55aa5e9903260922f0482e687d"}, + {file = "wrapt-1.16.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a452f9ca3e3267cd4d0fcf2edd0d035b1934ac2bd7e0e57ac91ad6b95c0c6389"}, + {file = "wrapt-1.16.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:43aa59eadec7890d9958748db829df269f0368521ba6dc68cc172d5d03ed8060"}, + {file = "wrapt-1.16.0-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:72554a23c78a8e7aa02abbd699d129eead8b147a23c56e08d08dfc29cfdddca1"}, + {file = "wrapt-1.16.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:d2efee35b4b0a347e0d99d28e884dfd82797852d62fcd7ebdeee26f3ceb72cf3"}, + {file = "wrapt-1.16.0-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:6dcfcffe73710be01d90cae08c3e548d90932d37b39ef83969ae135d36ef3956"}, + {file = "wrapt-1.16.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:eb6e651000a19c96f452c85132811d25e9264d836951022d6e81df2fff38337d"}, + {file = "wrapt-1.16.0-cp311-cp311-win32.whl", hash = "sha256:66027d667efe95cc4fa945af59f92c5a02c6f5bb6012bff9e60542c74c75c362"}, + {file = "wrapt-1.16.0-cp311-cp311-win_amd64.whl", hash = "sha256:aefbc4cb0a54f91af643660a0a150ce2c090d3652cf4052a5397fb2de549cd89"}, + {file = "wrapt-1.16.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:5eb404d89131ec9b4f748fa5cfb5346802e5ee8836f57d516576e61f304f3b7b"}, + {file = "wrapt-1.16.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:9090c9e676d5236a6948330e83cb89969f433b1943a558968f659ead07cb3b36"}, + {file = "wrapt-1.16.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:94265b00870aa407bd0cbcfd536f17ecde43b94fb8d228560a1e9d3041462d73"}, + {file = "wrapt-1.16.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f2058f813d4f2b5e3a9eb2eb3faf8f1d99b81c3e51aeda4b168406443e8ba809"}, + {file = "wrapt-1.16.0-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:98b5e1f498a8ca1858a1cdbffb023bfd954da4e3fa2c0cb5853d40014557248b"}, + {file = "wrapt-1.16.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:14d7dc606219cdd7405133c713f2c218d4252f2a469003f8c46bb92d5d095d81"}, + {file = "wrapt-1.16.0-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:49aac49dc4782cb04f58986e81ea0b4768e4ff197b57324dcbd7699c5dfb40b9"}, + {file = "wrapt-1.16.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:418abb18146475c310d7a6dc71143d6f7adec5b004ac9ce08dc7a34e2babdc5c"}, + {file = "wrapt-1.16.0-cp312-cp312-win32.whl", hash = "sha256:685f568fa5e627e93f3b52fda002c7ed2fa1800b50ce51f6ed1d572d8ab3e7fc"}, + {file = "wrapt-1.16.0-cp312-cp312-win_amd64.whl", hash = "sha256:dcdba5c86e368442528f7060039eda390cc4091bfd1dca41e8046af7c910dda8"}, + {file = "wrapt-1.16.0-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:d462f28826f4657968ae51d2181a074dfe03c200d6131690b7d65d55b0f360f8"}, + {file = "wrapt-1.16.0-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a33a747400b94b6d6b8a165e4480264a64a78c8a4c734b62136062e9a248dd39"}, + {file = "wrapt-1.16.0-cp36-cp36m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b3646eefa23daeba62643a58aac816945cadc0afaf21800a1421eeba5f6cfb9c"}, + {file = "wrapt-1.16.0-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3ebf019be5c09d400cf7b024aa52b1f3aeebeff51550d007e92c3c1c4afc2a40"}, + {file = "wrapt-1.16.0-cp36-cp36m-musllinux_1_1_aarch64.whl", hash = "sha256:0d2691979e93d06a95a26257adb7bfd0c93818e89b1406f5a28f36e0d8c1e1fc"}, + {file = "wrapt-1.16.0-cp36-cp36m-musllinux_1_1_i686.whl", hash = "sha256:1acd723ee2a8826f3d53910255643e33673e1d11db84ce5880675954183ec47e"}, + {file = "wrapt-1.16.0-cp36-cp36m-musllinux_1_1_x86_64.whl", hash = "sha256:bc57efac2da352a51cc4658878a68d2b1b67dbe9d33c36cb826ca449d80a8465"}, + {file = "wrapt-1.16.0-cp36-cp36m-win32.whl", hash = "sha256:da4813f751142436b075ed7aa012a8778aa43a99f7b36afe9b742d3ed8bdc95e"}, + {file = "wrapt-1.16.0-cp36-cp36m-win_amd64.whl", hash = "sha256:6f6eac2360f2d543cc875a0e5efd413b6cbd483cb3ad7ebf888884a6e0d2e966"}, + {file = "wrapt-1.16.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:a0ea261ce52b5952bf669684a251a66df239ec6d441ccb59ec7afa882265d593"}, + {file = "wrapt-1.16.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7bd2d7ff69a2cac767fbf7a2b206add2e9a210e57947dd7ce03e25d03d2de292"}, + {file = "wrapt-1.16.0-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9159485323798c8dc530a224bd3ffcf76659319ccc7bbd52e01e73bd0241a0c5"}, + {file = "wrapt-1.16.0-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a86373cf37cd7764f2201b76496aba58a52e76dedfaa698ef9e9688bfd9e41cf"}, + {file = "wrapt-1.16.0-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:73870c364c11f03ed072dda68ff7aea6d2a3a5c3fe250d917a429c7432e15228"}, + {file = "wrapt-1.16.0-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:b935ae30c6e7400022b50f8d359c03ed233d45b725cfdd299462f41ee5ffba6f"}, + {file = "wrapt-1.16.0-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:db98ad84a55eb09b3c32a96c576476777e87c520a34e2519d3e59c44710c002c"}, + {file = "wrapt-1.16.0-cp37-cp37m-win32.whl", hash = "sha256:9153ed35fc5e4fa3b2fe97bddaa7cbec0ed22412b85bcdaf54aeba92ea37428c"}, + {file = "wrapt-1.16.0-cp37-cp37m-win_amd64.whl", hash = "sha256:66dfbaa7cfa3eb707bbfcd46dab2bc6207b005cbc9caa2199bcbc81d95071a00"}, + {file = "wrapt-1.16.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:1dd50a2696ff89f57bd8847647a1c363b687d3d796dc30d4dd4a9d1689a706f0"}, + {file = "wrapt-1.16.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:44a2754372e32ab315734c6c73b24351d06e77ffff6ae27d2ecf14cf3d229202"}, + {file = "wrapt-1.16.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8e9723528b9f787dc59168369e42ae1c3b0d3fadb2f1a71de14531d321ee05b0"}, + {file = "wrapt-1.16.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:dbed418ba5c3dce92619656802cc5355cb679e58d0d89b50f116e4a9d5a9603e"}, + {file = "wrapt-1.16.0-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:941988b89b4fd6b41c3f0bfb20e92bd23746579736b7343283297c4c8cbae68f"}, + {file = "wrapt-1.16.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:6a42cd0cfa8ffc1915aef79cb4284f6383d8a3e9dcca70c445dcfdd639d51267"}, + {file = "wrapt-1.16.0-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:1ca9b6085e4f866bd584fb135a041bfc32cab916e69f714a7d1d397f8c4891ca"}, + {file = "wrapt-1.16.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:d5e49454f19ef621089e204f862388d29e6e8d8b162efce05208913dde5b9ad6"}, + {file = "wrapt-1.16.0-cp38-cp38-win32.whl", hash = "sha256:c31f72b1b6624c9d863fc095da460802f43a7c6868c5dda140f51da24fd47d7b"}, + {file = "wrapt-1.16.0-cp38-cp38-win_amd64.whl", hash = "sha256:490b0ee15c1a55be9c1bd8609b8cecd60e325f0575fc98f50058eae366e01f41"}, + {file = "wrapt-1.16.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:9b201ae332c3637a42f02d1045e1d0cccfdc41f1f2f801dafbaa7e9b4797bfc2"}, + {file = "wrapt-1.16.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:2076fad65c6736184e77d7d4729b63a6d1ae0b70da4868adeec40989858eb3fb"}, + {file = "wrapt-1.16.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c5cd603b575ebceca7da5a3a251e69561bec509e0b46e4993e1cac402b7247b8"}, + {file = "wrapt-1.16.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b47cfad9e9bbbed2339081f4e346c93ecd7ab504299403320bf85f7f85c7d46c"}, + {file = "wrapt-1.16.0-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f8212564d49c50eb4565e502814f694e240c55551a5f1bc841d4fcaabb0a9b8a"}, + {file = "wrapt-1.16.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:5f15814a33e42b04e3de432e573aa557f9f0f56458745c2074952f564c50e664"}, + {file = "wrapt-1.16.0-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:db2e408d983b0e61e238cf579c09ef7020560441906ca990fe8412153e3b291f"}, + {file = "wrapt-1.16.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:edfad1d29c73f9b863ebe7082ae9321374ccb10879eeabc84ba3b69f2579d537"}, + {file = "wrapt-1.16.0-cp39-cp39-win32.whl", hash = "sha256:ed867c42c268f876097248e05b6117a65bcd1e63b779e916fe2e33cd6fd0d3c3"}, + {file = "wrapt-1.16.0-cp39-cp39-win_amd64.whl", hash = "sha256:eb1b046be06b0fce7249f1d025cd359b4b80fc1c3e24ad9eca33e0dcdb2e4a35"}, + {file = "wrapt-1.16.0-py3-none-any.whl", hash = "sha256:6906c4100a8fcbf2fa735f6059214bb13b97f75b1a61777fcf6432121ef12ef1"}, + {file = "wrapt-1.16.0.tar.gz", hash = "sha256:5f370f952971e7d17c7d1ead40e49f32345a7f7a5373571ef44d800d06b1899d"}, +] + +[[package]] +name = "zipp" +version = "3.21.0" +description = "Backport of pathlib-compatible object wrapper for zip files" +optional = false +python-versions = ">=3.9" +files = [ + {file = "zipp-3.21.0-py3-none-any.whl", hash = "sha256:ac1bbe05fd2991f160ebce24ffbac5f6d11d83dc90891255885223d42b3cd931"}, + {file = "zipp-3.21.0.tar.gz", hash = "sha256:2c9958f6430a2040341a52eb608ed6dd93ef4392e02ffe219417c1b28b5dd1f4"}, +] + +[package.extras] +check = ["pytest-checkdocs (>=2.4)", "pytest-ruff (>=0.2.1)"] +cover = ["pytest-cov"] +doc = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-lint"] +enabler = ["pytest-enabler (>=2.2)"] +test = ["big-O", "importlib-resources", "jaraco.functools", "jaraco.itertools", "jaraco.test", "more-itertools", "pytest (>=6,!=8.1.*)", "pytest-ignore-flaky"] +type = ["pytest-mypy"] + +[metadata] +lock-version = "2.0" +python-versions = "^3.11" +content-hash = "246be99f756b14ca8640a6df64911bbe916781597d9f0bd0a793900c897a25e3" diff --git a/openai_sdk_agent_app_sample_code/pyproject.toml b/openai_sdk_agent_app_sample_code/pyproject.toml new file mode 100644 index 0000000..fa57fb0 --- /dev/null +++ b/openai_sdk_agent_app_sample_code/pyproject.toml @@ -0,0 +1,36 @@ +[tool.poetry] +name = "genai-cookbook" +version = "0.1.0" +description = "" +authors = ["Eric Peter "] +readme = "README.md" +packages = [{include = "cookbook"}] + +[tool.poetry.dependencies] +python = "^3.11" +databricks-connect = "15.1.0" +pydantic = "^2.9.2" +pyyaml = "^6.0.2" +databricks-vectorsearch = "^0.42" +databricks-sdk = {extras = ["openai"], version = "^0.36.0"} +mlflow = "^2.18.0" +databricks-agents = "^0.10.0" +pymupdf4llm = "0.0.5" +pymupdf = "1.24.13" +markdownify = "0.12.1" +transformers = "4.41.1" +torch = "2.3.0" +tiktoken = "0.7.0" +langchain-text-splitters = "0.2.0" +ipykernel = "^6.29.5" +hatchling = "^1.25.0" +pypandoc-binary = "1.13" +tabulate = "^0.9.0" +ipywidgets = "^8.1.5" +unitycatalog-ai = {git = "https://github.com/unitycatalog/unitycatalog.git", subdirectory = "ai/core"} +unitycatalog-openai = {git = "https://github.com/unitycatalog/unitycatalog.git", subdirectory = "ai/integrations/openai"} +pytest = "^8.3.3" + +[build-system] +requires = ["poetry-core"] +build-backend = "poetry.core.masonry.api" diff --git a/openai_sdk_agent_app_sample_code/requirements.txt b/openai_sdk_agent_app_sample_code/requirements.txt new file mode 100644 index 0000000..78ebc76 --- /dev/null +++ b/openai_sdk_agent_app_sample_code/requirements.txt @@ -0,0 +1,11 @@ +pydantic>=2.9.2 +databricks-agents +mlflow>=2.18.0 +databricks-sdk[openai] +databricks-vectorsearch +pyyaml +git+https://github.com/unitycatalog/unitycatalog.git#subdirectory=ai/core +git+https://github.com/unitycatalog/unitycatalog.git#subdirectory=ai/integrations/openai +tabulate +pandas +databricks-connect==15.1.0 \ No newline at end of file diff --git a/openai_sdk_agent_app_sample_code/requirements_datapipeline.txt b/openai_sdk_agent_app_sample_code/requirements_datapipeline.txt new file mode 100644 index 0000000..c7d2e90 --- /dev/null +++ b/openai_sdk_agent_app_sample_code/requirements_datapipeline.txt @@ -0,0 +1,9 @@ +pymupdf4llm==0.0.5 +pymupdf==1.24.13 +markdownify==0.12.1 +transformers==4.41.1 +torch==2.3.0 +tiktoken==0.7.0 +langchain-text-splitters==0.2.0 +pypandoc_binary==1.13 +pyyaml \ No newline at end of file diff --git a/openai_sdk_agent_app_sample_code/tests/conftest.py b/openai_sdk_agent_app_sample_code/tests/conftest.py new file mode 100644 index 0000000..2829f71 --- /dev/null +++ b/openai_sdk_agent_app_sample_code/tests/conftest.py @@ -0,0 +1,6 @@ +import sys +import os + +# Add the root directory to sys.path, so that we can treat directories like +# openai_sdk_agent_app_sample_code as modules +sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '../..'))) diff --git a/openai_sdk_agent_app_sample_code/tests/test_file_loading.py b/openai_sdk_agent_app_sample_code/tests/test_file_loading.py new file mode 100644 index 0000000..3ed59a7 --- /dev/null +++ b/openai_sdk_agent_app_sample_code/tests/test_file_loading.py @@ -0,0 +1,113 @@ +from datetime import datetime + +import pytest +import pyspark +import pandas as pd +from typing import TypedDict + +from utils.file_loading import load_files_to_df, apply_parsing_udf +from utils.typed_dicts_to_spark_schema import typed_dicts_to_spark_schema + + +@pytest.fixture(scope="module") +def spark(): + return ( + pyspark.sql.SparkSession.builder.master("local[1]") + .config("spark.driver.bindAddress", "127.0.0.1") + .config("spark.task.maxFailures", "1") # avoid retry failed spark tasks + .getOrCreate() + ) + + +@pytest.fixture() +def example_files_dir(tmpdir): + temp_dir = tmpdir.mkdir("files_subdir") + file_1 = temp_dir.join("file1.txt") + file_2 = temp_dir.join("file2.txt") + file_1.write("file1 content") + file_2.write("file2 content") + yield temp_dir, file_1, file_2 + + +def test_load_files_to_df(spark, example_files_dir): + temp_dir, file_1, file_2 = example_files_dir + raw_files_df = ( + load_files_to_df(spark, str(temp_dir)).drop("modificationTime").orderBy("path") + ) + assert raw_files_df.count() == 2 + raw_pandas_df = raw_files_df.toPandas() + # Decode the content from bytes to string + raw_pandas_df["content"] = raw_pandas_df["content"].apply( + lambda x: bytes(x).decode("utf-8") + ) + # Expected DataFrame + expected_df = pd.DataFrame( + [ + { + "path": f"file:{str(file_1)}", + "length": len("file1 content"), + "content": "file1 content", + }, + { + "path": f"file:{str(file_2)}", + "length": len("file2 content"), + "content": "file2 content", + }, + ] + ) + pd.testing.assert_frame_equal(raw_pandas_df, expected_df) + + +def test_load_files_to_df_throws_if_no_files(spark, tmpdir): + temp_dir = tmpdir.mkdir("files_subdir") + with pytest.raises(Exception, match="does not contain any files"): + load_files_to_df(spark, str(temp_dir)) + + +class ParserReturnValue(TypedDict): + # Parsed content of the document + content: str # do not change this name + # The status of whether the parser succeeds or fails, used to exclude failed files downstream + parser_status: str # do not change this name + # Unique ID of the document + doc_uri: str # do not change this name + + +def test_apply_parsing_udf(spark, example_files_dir): + def _mock_file_parser( + raw_doc_contents_bytes: bytes, + doc_path: str, + modification_time: datetime, + doc_bytes_length: int, + ): + return { + "content": raw_doc_contents_bytes.decode("utf-8"), + "parser_status": "SUCCESS", + "doc_uri": doc_path, + } + + temp_dir, file_1, file_2 = example_files_dir + raw_files_df = load_files_to_df(spark, str(temp_dir)).orderBy("path") + parsed_df = apply_parsing_udf( + raw_files_df, + _mock_file_parser, + parsed_df_schema=typed_dicts_to_spark_schema(ParserReturnValue), + ) + assert parsed_df.count() == 2 + parsed_pandas_df = parsed_df.toPandas() + # Expected DataFrame + expected_df = pd.DataFrame( + [ + { + "content": file_1.read_text(encoding="utf-8"), + "parser_status": "SUCCESS", + "doc_uri": f"file:{str(file_1)}", + }, + { + "content": file_2.read_text(encoding="utf-8"), + "parser_status": "SUCCESS", + "doc_uri": f"file:{str(file_2)}", + }, + ] + ) + pd.testing.assert_frame_equal(parsed_pandas_df, expected_df) diff --git a/openai_sdk_agent_app_sample_code/tools/README.md b/openai_sdk_agent_app_sample_code/tools/README.md new file mode 100644 index 0000000..e7acbf9 --- /dev/null +++ b/openai_sdk_agent_app_sample_code/tools/README.md @@ -0,0 +1 @@ +Store user-created tools in this directory. \ No newline at end of file diff --git a/openai_sdk_agent_app_sample_code/tools/__init__.py b/openai_sdk_agent_app_sample_code/tools/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/openai_sdk_agent_app_sample_code/tools/code_exec.py b/openai_sdk_agent_app_sample_code/tools/code_exec.py new file mode 100644 index 0000000..633a34b --- /dev/null +++ b/openai_sdk_agent_app_sample_code/tools/code_exec.py @@ -0,0 +1,20 @@ +def python_exec(code: str) -> str: + """ + Executes Python code in the sandboxed environment and returns its stdout. The runtime is stateless and you can not read output of the previous tool executions. i.e. No such variables "rows", "observation" defined. Calling another tool inside a Python code is NOT allowed. + Use only standard python libraries and these python libraries: bleach, chardet, charset-normalizer, defusedxml, googleapis-common-protos, grpcio, grpcio-status, jmespath, joblib, numpy, packaging, pandas, patsy, protobuf, pyarrow, pyparsing, python-dateutil, pytz, scikit-learn, scipy, setuptools, six, threadpoolctl, webencodings, user-agents, cryptography. + + Args: + code (str): Python code to execute. Remember to print the final result to stdout. + + Returns: + str: The output of the executed code. + """ + import sys + from io import StringIO + + sys_stdout = sys.stdout + redirected_output = StringIO() + sys.stdout = redirected_output + exec(code) + sys.stdout = sys_stdout + return redirected_output.getvalue() diff --git a/openai_sdk_agent_app_sample_code/tools/sample_tool.py b/openai_sdk_agent_app_sample_code/tools/sample_tool.py new file mode 100644 index 0000000..eef313c --- /dev/null +++ b/openai_sdk_agent_app_sample_code/tools/sample_tool.py @@ -0,0 +1,46 @@ + +def sku_sample_translator(old_sku: str) -> str: + """ + Translates a pre-2024 SKU formatted as "OLD-XXX-YYYY" to the new SKU format "NEW-YYYY-XXX". + + Args: + old_sku (str): The old SKU in the format "OLD-XXX-YYYY". + + Returns: + str: The new SKU in the format "NEW-YYYY-XXX". + + Raises: + ValueError: If the SKU format is invalid, providing specific error details. + """ + import re + + if not isinstance(old_sku, str): + raise ValueError("SKU must be a string") + + # Normalize input by removing extra whitespace and converting to uppercase + old_sku = old_sku.strip().upper() + + # Define the regex pattern for the old SKU format + pattern = r"^OLD-([A-Z]{3})-(\d{4})$" + + # Match the old SKU against the pattern + match = re.match(pattern, old_sku) + if not match: + if not old_sku.startswith("OLD-"): + raise ValueError("SKU must start with 'OLD-'") + if not re.match(r"^OLD-[A-Z]{3}-\d{4}$", old_sku): + raise ValueError( + "SKU format must be 'OLD-XXX-YYYY' where X is a letter and Y is a digit" + ) + raise ValueError("Invalid SKU format") + + # Extract the letter code and numeric part + letter_code, numeric_part = match.groups() + + # Additional validation for numeric part + if not (1 <= int(numeric_part) <= 9999): + raise ValueError("Numeric part must be between 0001 and 9999") + + # Construct the new SKU + new_sku = f"NEW-{numeric_part}-{letter_code}" + return new_sku diff --git a/openai_sdk_agent_app_sample_code/tools/test_code_exec.py b/openai_sdk_agent_app_sample_code/tools/test_code_exec.py new file mode 100644 index 0000000..081b5bf --- /dev/null +++ b/openai_sdk_agent_app_sample_code/tools/test_code_exec.py @@ -0,0 +1,89 @@ +import pytest +from .code_exec import python_exec + + +def test_basic_arithmetic(): + code = """result = 2 + 2\nprint(result)""" + assert python_exec(code).strip() == "4" + + +def test_multiple_lines(): + code = "x = 5\n" "y = 3\n" "result = x * y\n" "print(result)" + assert python_exec(code).strip() == "15" + + +def test_multiple_prints(): + code = """print('first')\nprint('second')\nprint('third')\n""" + expected = "first\nsecond\nthird\n" + assert python_exec(code) == expected + + +def test_using_pandas(): + code = ( + "import pandas as pd\n" + "data = {'col1': [1, 2], 'col2': [3, 4]}\n" + "df = pd.DataFrame(data)\n" + "print(df.shape)" + ) + assert python_exec(code).strip() == "(2, 2)" + + +def test_using_numpy(): + code = "import numpy as np\n" "arr = np.array([1, 2, 3])\n" "print(arr.mean())" + assert python_exec(code).strip() == "2.0" + + +def test_syntax_error(): + code = "if True\n" " print('invalid syntax')" + with pytest.raises(SyntaxError): + python_exec(code) + + +def test_runtime_error(): + code = "x = 1 / 0\n" "print(x)" + with pytest.raises(ZeroDivisionError): + python_exec(code) + + +def test_undefined_variable(): + code = "print(undefined_variable)" + with pytest.raises(NameError): + python_exec(code) + + +def test_multiline_string_manipulation(): + code = "text = '''\n" "Hello\n" "World\n" "'''\n" "print(text.strip())" + expected = "Hello\nWorld" + assert python_exec(code).strip() == expected + + +# Will not fail locally, but will fail in UC. +# def test_unauthorized_flaskxx(): +# code = "from flask import Flask\n" "app = Flask(__name__)\n" "print(app)" +# with pytest.raises(ImportError): +# python_exec(code) + + +def test_no_print_statement(): + code = "x = 42\n" "y = x * 2" + assert python_exec(code) == "" + + +def test_calculation_without_print(): + code = "result = sum([1, 2, 3, 4, 5])\n" "squared = [x**2 for x in range(5)]" + assert python_exec(code) == "" + + +def test_function_definition_without_call(): + code = "def add(a, b):\n" " return a + b\n" "result = add(3, 4)" + assert python_exec(code) == "" + + +def test_class_definition_without_instantiation(): + code = ( + "class Calculator:\n" + " def add(self, a, b):\n" + " return a + b\n" + "calc = Calculator()" + ) + assert python_exec(code) == "" diff --git a/openai_sdk_agent_app_sample_code/tools/test_code_exec_as_uc_tool.py b/openai_sdk_agent_app_sample_code/tools/test_code_exec_as_uc_tool.py new file mode 100644 index 0000000..cd51720 --- /dev/null +++ b/openai_sdk_agent_app_sample_code/tools/test_code_exec_as_uc_tool.py @@ -0,0 +1,101 @@ +import pytest +from cookbook.tools.uc_tool import UCTool + +CATALOG = "ep" +SCHEMA = "cookbook_local_test" + + +@pytest.fixture +def python_exec(): + """Fixture to provide the python_exec function from UCTool.""" + python_exec_tool = UCTool(uc_function_name=f"{CATALOG}.{SCHEMA}.python_exec") + return python_exec_tool + + +def test_basic_arithmetic(python_exec): + code = """result = 2 + 2\nprint(result)""" + assert python_exec(code=code)["value"].strip() == "4" + + +def test_multiple_lines(python_exec): + code = "x = 5\n" "y = 3\n" "result = x * y\n" "print(result)" + assert python_exec(code=code)["value"].strip() == "15" + + +def test_multiple_prints(python_exec): + code = """print('first')\nprint('second')\nprint('third')\n""" + expected = "first\nsecond\nthird\n" + assert python_exec(code=code)["value"] == expected + + +def test_using_pandas(python_exec): + code = ( + "import pandas as pd\n" + "data = {'col1': [1, 2], 'col2': [3, 4]}\n" + "df = pd.DataFrame(data)\n" + "print(df.shape)" + ) + assert python_exec(code=code)["value"].strip() == "(2, 2)" + + +def test_using_numpy(python_exec): + code = "import numpy as np\n" "arr = np.array([1, 2, 3])\n" "print(arr.mean())" + assert python_exec(code=code)["value"].strip() == "2.0" + + +def test_syntax_error(python_exec): + code = "if True\n" " print('invalid syntax')" + result = python_exec(code=code) + assert "Syntax error at or near 'invalid'." in result["error"]["error_message"] + + +def test_runtime_error(python_exec): + code = "x = 1 / 0\n" "print(x)" + result = python_exec(code=code) + assert "ZeroDivisionError" in result["error"]["error_message"] + + +def test_undefined_variable(python_exec): + code = "print(undefined_variable)" + result = python_exec(code=code) + assert "NameError" in result["error"]["error_message"] + + +def test_multiline_string_manipulation(python_exec): + code = "text = '''\n" "Hello\n" "World\n" "'''\n" "print(text.strip())" + expected = "Hello\nWorld" + assert python_exec(code=code)["value"].strip() == expected + + +def test_unauthorized_flask(python_exec): + code = "from flask import Flask\n" "app = Flask(__name__)\n" "print(app)" + result = python_exec(code=code) + assert ( + "ModuleNotFoundError: No module named 'flask'" + in result["error"]["error_message"] + ) + + +def test_no_print_statement(python_exec): + code = "x = 42\n" "y = x * 2" + assert python_exec(code=code)["value"] == "" + + +def test_calculation_without_print(python_exec): + code = "result = sum([1, 2, 3, 4, 5])\n" "squared = [x**2 for x in range(5)]" + assert python_exec(code=code)["value"] == "" + + +def test_function_definition_without_call(python_exec): + code = "def add(a, b):\n" " return a + b\n" "result = add(3, 4)" + assert python_exec(code=code)["value"] == "" + + +def test_class_definition_without_instantiation(python_exec): + code = ( + "class Calculator:\n" + " def add(self, a, b):\n" + " return a + b\n" + "calc = Calculator()" + ) + assert python_exec(code=code)["value"] == "" diff --git a/openai_sdk_agent_app_sample_code/tools/test_sample_tool.py b/openai_sdk_agent_app_sample_code/tools/test_sample_tool.py new file mode 100644 index 0000000..f818d70 --- /dev/null +++ b/openai_sdk_agent_app_sample_code/tools/test_sample_tool.py @@ -0,0 +1,52 @@ +import pytest +from tools.sample_tool import sku_sample_translator + + + +def test_valid_sku_translation(): + """Test successful SKU translation with valid input.""" + assert sku_sample_translator("OLD-ABC-1234") == "NEW-1234-ABC" + assert sku_sample_translator("OLD-XYZ-0001") == "NEW-0001-XYZ" + assert sku_sample_translator("old-def-5678") == "NEW-5678-DEF" # Test case insensitivity + + +def test_whitespace_handling(): + """Test that the function handles extra whitespace correctly.""" + assert sku_sample_translator(" OLD-ABC-1234 ") == "NEW-1234-ABC" + assert sku_sample_translator("\tOLD-ABC-1234\n") == "NEW-1234-ABC" + + +def test_invalid_input_type(): + """Test that non-string inputs raise ValueError.""" + with pytest.raises(ValueError, match="SKU must be a string"): + sku_sample_translator(123) + with pytest.raises(ValueError, match="SKU must be a string"): + sku_sample_translator(None) + + +def test_invalid_prefix(): + """Test that SKUs not starting with 'OLD-' raise ValueError.""" + with pytest.raises(ValueError, match="SKU must start with 'OLD-'"): + sku_sample_translator("NEW-ABC-1234") + with pytest.raises(ValueError, match="SKU must start with 'OLD-'"): + sku_sample_translator("XXX-ABC-1234") + + +def test_invalid_format(): + """Test various invalid SKU formats.""" + invalid_skus = [ + "OLD-AB-1234", # Too few letters + "OLD-ABCD-1234", # Too many letters + "OLD-123-1234", # Numbers instead of letters + "OLD-ABC-123", # Too few digits + "OLD-ABC-12345", # Too many digits + "OLD-ABC-XXXX", # Letters instead of numbers + "OLD-A1C-1234", # Mixed letters and numbers in middle + ] + + for sku in invalid_skus: + with pytest.raises( + ValueError, + match="SKU format must be 'OLD-XXX-YYYY' where X is a letter and Y is a digit", + ): + sku_sample_translator(sku) diff --git a/openai_sdk_agent_app_sample_code/tools/test_sample_tool_uc.py b/openai_sdk_agent_app_sample_code/tools/test_sample_tool_uc.py new file mode 100644 index 0000000..1539f11 --- /dev/null +++ b/openai_sdk_agent_app_sample_code/tools/test_sample_tool_uc.py @@ -0,0 +1,72 @@ +import pytest +from cookbook.tools.uc_tool import UCTool + +# Load the function from the UCTool versus locally +@pytest.fixture +def uc_tool(): + """Fixture to translate a UC tool into a local function.""" + UC_FUNCTION_NAME = "ep.cookbook_local_test.sku_sample_translator" + loaded_tool = UCTool(uc_function_name=UC_FUNCTION_NAME) + return loaded_tool + + +# Note: The value will be post processed into the `value` key, so we must check the returned value there. +def test_valid_sku_translation(uc_tool): + """Test successful SKU translation with valid input.""" + assert uc_tool(old_sku="OLD-ABC-1234")["value"] == "NEW-1234-ABC" + assert uc_tool(old_sku="OLD-XYZ-0001")["value"] == "NEW-0001-XYZ" + assert ( + uc_tool(old_sku="old-def-5678")["value"] == "NEW-5678-DEF" + ) # Test case insensitivity + + +# Note: The value will be post processed into the `value` key, so we must check the returned value there. +def test_whitespace_handling(uc_tool): + """Test that the function handles extra whitespace correctly.""" + assert uc_tool(old_sku=" OLD-ABC-1234 ")["value"] == "NEW-1234-ABC" + assert uc_tool(old_sku="\tOLD-ABC-1234\n")["value"] == "NEW-1234-ABC" + + +# Note: the input validation happens BEFORE the function is called by Spark, so we will never get these exceptions from the function. +# Instead, we will get invalid parameters errors from Spark. +def test_invalid_input_type(uc_tool): + """Test that non-string inputs raise ValueError.""" + assert ( + uc_tool(old_sku=123)["error"]["error_message"] + == """Invalid parameters provided: {'old_sku': "Parameter old_sku should be of type STRING (corresponding python type ), but got "}.""" + ) + assert ( + uc_tool(old_sku=None)["error"]["error_message"] + == """Invalid parameters provided: {'old_sku': "Parameter old_sku should be of type STRING (corresponding python type ), but got "}.""" + ) + + +# Note: The errors will be post processed into the `error_message` key inside the `error` top level key, so we must check for exceptions there. +def test_invalid_prefix(uc_tool): + """Test that SKUs not starting with 'OLD-' raise ValueError.""" + assert ( + uc_tool(old_sku="NEW-ABC-1234")["error"]["error_message"] + == "ValueError: SKU must start with 'OLD-'" + ) + assert ( + uc_tool(old_sku="XXX-ABC-1234")["error"]["error_message"] + == "ValueError: SKU must start with 'OLD-'" + ) + + +# Note: The errors will be post processed into the `error_message` key inside the `error` top level key, so we must check for exceptions there. +def test_invalid_format(uc_tool): + """Test various invalid SKU formats.""" + invalid_skus = [ + "OLD-AB-1234", # Too few letters + "OLD-ABCD-1234", # Too many letters + "OLD-123-1234", # Numbers instead of letters + "OLD-ABC-123", # Too few digits + "OLD-ABC-12345", # Too many digits + "OLD-ABC-XXXX", # Letters instead of numbers + "OLD-A1C-1234", # Mixed letters and numbers in middle + ] + + expected_error = "ValueError: SKU format must be 'OLD-XXX-YYYY' where X is a letter and Y is a digit" + for sku in invalid_skus: + assert uc_tool(old_sku=sku)["error"]["error_message"] == expected_error