From 96b1bf84aae523bc0009464cf19341985480e7a0 Mon Sep 17 00:00:00 2001 From: Dttbd Date: Tue, 23 Apr 2024 21:13:00 +0800 Subject: [PATCH] feat: add upload file api --- .env | 7 - .gitignore | 144 ++++++- examples/crud/assistant_crud.ipynb | 122 +++--- examples/crud/retrieval_crud.ipynb | 385 ++++++++++++------ requirements.txt | 6 +- taskingai/assistant/assistant.py | 36 +- taskingai/assistant/chat.py | 16 +- taskingai/client/models/entities/__init__.py | 2 + taskingai/client/models/entities/chat.py | 1 + .../client/models/entities/file_id_data.py | 21 + taskingai/client/models/entities/message.py | 3 +- .../client/models/entities/record_type.py | 2 + .../models/entities/upload_file_purpose.py | 20 + taskingai/client/models/schemas/__init__.py | 1 + .../models/schemas/chat_completion_request.py | 4 +- .../models/schemas/chat_create_request.py | 1 + .../models/schemas/chat_update_request.py | 1 + .../models/schemas/record_create_request.py | 6 +- .../models/schemas/record_update_request.py | 2 + .../models/schemas/upload_file_response.py | 22 + taskingai/file/__init__.py | 1 + taskingai/file/api.py | 58 +++ taskingai/file/file.py | 37 ++ taskingai/file/utils.py | 59 +++ taskingai/retrieval/record.py | 110 +++-- taskingai/tool/action.py | 42 +- test/common/utils.py | 15 +- test/config.py | 16 +- test/files/test.docx | Bin 0 -> 7725 bytes test/files/test.html | 11 + test/files/test.md | 2 + test/files/test.pdf | Bin 0 -> 23689 bytes test/files/test.txt | 1 + test/run_test.sh | 3 +- .../test_async/test_async_assistant.py | 367 +++++++++++++++-- .../test_async/test_async_inference.py | 69 +++- .../test_async/test_async_retrieval.py | 263 ++++++++---- test/testcase/test_async/test_async_tool.py | 127 +++--- .../testcase/test_sync/test_sync_assistant.py | 360 ++++++++++++++-- .../testcase/test_sync/test_sync_inference.py | 69 +++- .../testcase/test_sync/test_sync_retrieval.py | 152 ++++++- test/testcase/test_sync/test_sync_tool.py | 132 +++--- test_requirements.txt | 8 +- 43 files changed, 2156 insertions(+), 548 deletions(-) delete mode 100644 .env create mode 100644 taskingai/client/models/entities/file_id_data.py create mode 100644 taskingai/client/models/entities/upload_file_purpose.py create mode 100644 taskingai/client/models/schemas/upload_file_response.py create mode 100644 taskingai/file/__init__.py create mode 100644 taskingai/file/api.py create mode 100644 taskingai/file/file.py create mode 100644 taskingai/file/utils.py create mode 100644 test/files/test.docx create mode 100644 test/files/test.html create mode 100644 test/files/test.md create mode 100644 test/files/test.pdf create mode 100644 test/files/test.txt diff --git a/.env b/.env deleted file mode 100644 index dcbcd97..0000000 --- a/.env +++ /dev/null @@ -1,7 +0,0 @@ -CHAT_COMPLETION_MODEL_ID=TpHmCB8s - -TASKINGAI_HOST=https://api.test199.com - -TEXT_EMBEDDING_MODEL_ID=TpEZlEOK - -TASKINGAI_API_KEY=taxy8i3OCfeJfh0eXW0h00cF2QT7nWyy \ No newline at end of file diff --git a/.gitignore b/.gitignore index 6af4b0d..de27eeb 100644 --- a/.gitignore +++ b/.gitignore @@ -8,7 +8,6 @@ __pycache__/ # Distribution / packaging .Python -env/ build/ develop-eggs/ dist/ @@ -20,9 +19,12 @@ lib64/ parts/ sdist/ var/ +wheels/ +share/python-wheels/ *.egg-info/ .installed.cfg *.egg +MANIFEST # PyInstaller # Usually these files are written by a python script from a template @@ -37,15 +39,17 @@ pip-delete-this-directory.txt # Unit test / coverage reports htmlcov/ .tox/ +.nox/ .coverage .coverage.* .cache nosetests.xml coverage.xml -*,cover +*.cover +*.py,cover .hypothesis/ -venv/ -.python-version +.pytest_cache/ +cover/ # Translations *.mo @@ -53,23 +57,143 @@ venv/ # Django stuff: *.log +local_settings.py +db.sqlite3 +db.sqlite3-journal + +# Flask stuff: +instance/ +.webassets-cache + +# Scrapy stuff: +.scrapy # Sphinx documentation docs/_build/ # PyBuilder +.pybuilder/ target/ -#Ipython Notebook +# Jupyter Notebook .ipynb_checkpoints +# IPython +profile_default/ +ipython_config.py + +# pyenv +# For a library or package, you might want to ignore these files since the code is +# intended to run in multiple environments; otherwise, check them in: +# .python-version + +# pipenv +# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. +# However, in case of collaboration, if having platform-specific dependencies or dependencies +# having no cross-platform support, pipenv may install dependencies that don't work, or not +# install all needed dependencies. +#Pipfile.lock + +# poetry +# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control. +# This is especially recommended for binary packages to ensure reproducibility, and is more +# commonly ignored for libraries. +# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control +#poetry.lock + +# PEP 582; used by e.g. github.com/David-OConnor/pyflow +__pypackages__/ + +# Celery stuff +celerybeat-schedule +celerybeat.pid + +# SageMath parsed files +*.sage.py + +# Environments +.env +.venv +env/ +venv/ +ENV/ +env.bak/ +venv.bak/ + +# Spyder project settings +.spyderproject +.spyproject + +# Rope project settings +.ropeproject + +# mkdocs documentation +/site + +# mypy +.mypy_cache/ +.dmypy.json +dmypy.json + +# Pyre type checker +.pyre/ + +# pytype static type analyzer +.pytype/ + +# Cython debug symbols +cython_debug/ + +# PyCharm +# JetBrains specific template is maintainted in a separate JetBrains.gitignore that can +# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore +# and can be added to the global gitignore or merged into this file. For a more nuclear +# option (not recommended) you can uncomment the following to ignore the entire idea folder. .idea/ +### macOS ### +# General .DS_Store -.venv +.AppleDouble +.LSOverride + +# Icon must end with two \r +Icon + + +# Thumbnails +._* + +# Files that might appear in the root of a volume +.DocumentRevisions-V100 +.fseventsd +.Spotlight-V100 +.TemporaryItems +.Trashes +.VolumeIcon.icns +.com.apple.timemachine.donotpresent + +# Directories potentially created on remote AFP share +.AppleDB +.AppleDesktop +Network Trash Folder +Temporary Items +.apdisk + +### macOS Patch ### +# iCloud generated files +*.icloud + +### VisualStudioCode ### +.vscode/* + +# Local History for Visual Studio Code +.history/ -# test -test/.pytest_cache/ -test/log/ +# Built Visual Studio Code Extensions +*.vsix -**/allure-report +### VisualStudioCode Patch ### +# Ignore all local history of files +.history +.ionide diff --git a/examples/crud/assistant_crud.ipynb b/examples/crud/assistant_crud.ipynb index 8a65b90..0a58e67 100644 --- a/examples/crud/assistant_crud.ipynb +++ b/examples/crud/assistant_crud.ipynb @@ -12,12 +12,12 @@ }, { "cell_type": "markdown", - "source": [ - "# TaskingAI Assistant Module CRUD Example" - ], "metadata": { "collapsed": false - } + }, + "source": [ + "# TaskingAI Assistant Module CRUD Example" + ] }, { "cell_type": "code", @@ -25,7 +25,7 @@ "metadata": {}, "outputs": [], "source": [ - "from taskingai.models import Assistant, Chat\n", + "from taskingai.assistant import Assistant, Chat\n", "from taskingai.assistant.memory import AssistantNaiveMemory\n", "\n", "# choose an available chat_completion model from your project\n", @@ -34,12 +34,12 @@ }, { "cell_type": "markdown", - "source": [ - "## Assistant Object" - ], "metadata": { "collapsed": false - } + }, + "source": [ + "## Assistant Object" + ] }, { "cell_type": "code", @@ -47,6 +47,8 @@ "metadata": {}, "outputs": [], "source": [ + "from taskingai.assistant import RetrievalConfig, RetrievalMethod\n", + "\n", "# create an assistant\n", "def create_assistant() -> Assistant:\n", " assistant: Assistant = taskingai.assistant.create_assistant(\n", @@ -57,6 +59,7 @@ " memory=AssistantNaiveMemory(),\n", " tools=[],\n", " retrievals=[],\n", + " retrieval_configs=RetrievalConfig(top_k=3, max_tokens=4096, method=RetrievalMethod.USER_MESSAGE),\n", " metadata={\"foo\": \"bar\"},\n", " )\n", " return assistant\n", @@ -68,6 +71,9 @@ { "cell_type": "code", "execution_count": null, + "metadata": { + "collapsed": false + }, "outputs": [], "source": [ "# get assistant\n", @@ -77,14 +83,14 @@ ")\n", "\n", "print(f\"got assistant: {assistant}\\n\")" - ], - "metadata": { - "collapsed": false - } + ] }, { "cell_type": "code", "execution_count": null, + "metadata": { + "collapsed": false + }, "outputs": [], "source": [ "# update assistant\n", @@ -92,30 +98,31 @@ " assistant_id=assistant_id,\n", " name=\"My New Assistant\",\n", " description=\"This is my new assistant\",\n", + " retrieval_configs=RetrievalConfig(top_k=4, max_tokens=8192, method=RetrievalMethod.USER_MESSAGE),\n", ")\n", "\n", "print(f\"updated assistant: {assistant}\\n\")\n" - ], - "metadata": { - "collapsed": false - } + ] }, { "cell_type": "code", "execution_count": null, + "metadata": { + "collapsed": false + }, "outputs": [], "source": [ "# delete assistant\n", "taskingai.assistant.delete_assistant(assistant_id=assistant_id)\n", "print(f\"deleted assistant: {assistant_id}\\n\")" - ], - "metadata": { - "collapsed": false - } + ] }, { "cell_type": "code", "execution_count": null, + "metadata": { + "collapsed": false + }, "outputs": [], "source": [ "# list assistants\n", @@ -123,23 +130,23 @@ "assistant_ids = [assistant.assistant_id for assistant in assistants]\n", "# ensure the assistant we deleted is not in the list\n", "print(f\"f{assistant_id} in assistant_ids: {assistant_id in assistant_ids}\\n\")" - ], - "metadata": { - "collapsed": false - } + ] }, { "cell_type": "markdown", - "source": [ - "## Chat Object" - ], "metadata": { "collapsed": false - } + }, + "source": [ + "## Chat Object" + ] }, { "cell_type": "code", "execution_count": null, + "metadata": { + "collapsed": false + }, "outputs": [], "source": [ "# create a new assistant\n", @@ -148,16 +155,17 @@ "# create a chat\n", "chat: Chat = taskingai.assistant.create_chat(\n", " assistant_id=assistant.assistant_id,\n", + " name=\"my chat\",\n", ")\n", "print(f\"created chat: {chat.chat_id} for assistant: {assistant.assistant_id}\\n\")" - ], - "metadata": { - "collapsed": false - } + ] }, { "cell_type": "code", "execution_count": null, + "metadata": { + "collapsed": false + }, "outputs": [], "source": [ "# get chat\n", @@ -167,31 +175,32 @@ " chat_id=chat_id,\n", ")\n", "print(f\"chat: {chat}\\n\")" - ], - "metadata": { - "collapsed": false - } + ] }, { "cell_type": "code", "execution_count": null, + "metadata": { + "collapsed": false + }, "outputs": [], "source": [ "# update chat\n", "chat: Chat = taskingai.assistant.update_chat(\n", " assistant_id=assistant.assistant_id,\n", " chat_id=chat_id,\n", + " name=\"my chat new name\",\n", " metadata={\"foo\": \"bar\"},\n", ")\n", "print(f\"updated chat: {chat}\\n\")" - ], - "metadata": { - "collapsed": false - } + ] }, { "cell_type": "code", "execution_count": null, + "metadata": { + "collapsed": false + }, "outputs": [], "source": [ "# delete chat\n", @@ -200,14 +209,14 @@ " chat_id=chat_id,\n", ")\n", "print(f\"deleted chat: {chat_id}\\n\")" - ], - "metadata": { - "collapsed": false - } + ] }, { "cell_type": "code", "execution_count": null, + "metadata": { + "collapsed": false + }, "outputs": [], "source": [ "# list chats \n", @@ -220,32 +229,29 @@ " assistant_id=assistant.assistant_id,\n", ")\n", "print(f\"num chats = {len(chats)}\\n\")" - ], - "metadata": { - "collapsed": false - } + ] }, { "cell_type": "code", "execution_count": null, + "metadata": { + "collapsed": false + }, "outputs": [], "source": [ "# delete assistant\n", "taskingai.assistant.delete_assistant(assistant_id=assistant.assistant_id)" - ], - "metadata": { - "collapsed": false - } + ] } ], "metadata": { - "language_info": { - "name": "python" - }, "kernelspec": { - "name": "python3", + "display_name": "Python 3 (ipykernel)", "language": "python", - "display_name": "Python 3 (ipykernel)" + "name": "python3" + }, + "language_info": { + "name": "python" } }, "nbformat": 4, diff --git a/examples/crud/retrieval_crud.ipynb b/examples/crud/retrieval_crud.ipynb index 8cd13fc..0ab9cd6 100644 --- a/examples/crud/retrieval_crud.ipynb +++ b/examples/crud/retrieval_crud.ipynb @@ -15,55 +15,59 @@ }, { "cell_type": "markdown", - "source": [ - "# TaskingAI Retrieval Module CRUD Example" - ], + "id": "40014270c97e4463", "metadata": { "collapsed": false }, - "id": "40014270c97e4463" + "source": [ + "# TaskingAI Retrieval Module CRUD Example" + ] }, { "cell_type": "code", "execution_count": null, + "id": "b7b7f8d3b36c0126", + "metadata": { + "collapsed": false + }, "outputs": [], "source": [ "from taskingai.retrieval import Collection, Record, Chunk, TokenTextSplitter\n", "\n", "# choose an available text_embedding model from your project\n", "embedding_model_id = \"YOUR_EMBEDDING_MODEL_ID\"" - ], - "metadata": { - "collapsed": false - }, - "id": "b7b7f8d3b36c0126" + ] }, { "cell_type": "markdown", - "source": [ - "## Collection Object" - ], + "id": "a6874f1ff8ec5a9c", "metadata": { "collapsed": false }, - "id": "a6874f1ff8ec5a9c" + "source": [ + "## Collection Object" + ] }, { "cell_type": "code", "execution_count": null, + "id": "81ec82280d5c8c64", + "metadata": { + "collapsed": false + }, "outputs": [], "source": [ "collections = taskingai.retrieval.list_collections()\n", "print(collections)" - ], - "metadata": { - "collapsed": false - }, - "id": "81ec82280d5c8c64" + ] }, { "cell_type": "code", "execution_count": null, + "id": "ca5934605bd0adf8", + "metadata": { + "collapsed": false + }, "outputs": [], "source": [ "# create a collection\n", @@ -76,15 +80,15 @@ "\n", "collection: Collection = create_collection()\n", "print(f\"created collection: {collection}\")" - ], - "metadata": { - "collapsed": false - }, - "id": "ca5934605bd0adf8" + ] }, { "cell_type": "code", "execution_count": null, + "id": "491c0ffe91ac524b", + "metadata": { + "collapsed": false + }, "outputs": [], "source": [ "# get collection\n", @@ -94,15 +98,15 @@ ")\n", "\n", "print(f\"collection: {collection}\\n\")" - ], - "metadata": { - "collapsed": false - }, - "id": "491c0ffe91ac524b" + ] }, { "cell_type": "code", "execution_count": null, + "id": "11e1c69e34d544a7", + "metadata": { + "collapsed": false + }, "outputs": [], "source": [ "# update collection\n", @@ -112,29 +116,29 @@ ")\n", "\n", "print(f\"updated collection: {collection}\\n\")\n" - ], - "metadata": { - "collapsed": false - }, - "id": "11e1c69e34d544a7" + ] }, { "cell_type": "code", "execution_count": null, + "id": "e65087e786df1b14", + "metadata": { + "collapsed": false + }, "outputs": [], "source": [ "# delete collection\n", "taskingai.retrieval.delete_collection(collection_id=collection_id)\n", "print(f\"deleted collection: {collection_id}\\n\")" - ], - "metadata": { - "collapsed": false - }, - "id": "e65087e786df1b14" + ] }, { "cell_type": "code", "execution_count": null, + "id": "c8f8cf1c5ec5f069", + "metadata": { + "collapsed": false + }, "outputs": [], "source": [ "# list collections\n", @@ -142,128 +146,228 @@ "collection_ids = [collection.collection_id for collection in collections]\n", "# ensure the collection we deleted is not in the list\n", "print(f\"f{collection_id} in collection_ids: {collection_id in collection_ids}\\n\")" - ], - "metadata": { - "collapsed": false - }, - "id": "c8f8cf1c5ec5f069" + ] }, { "cell_type": "markdown", - "source": [ - "## Record Object" - ], + "id": "1b7688a3cf40c241", "metadata": { "collapsed": false }, - "id": "1b7688a3cf40c241" + "source": [ + "## Record Object" + ] }, { "cell_type": "code", "execution_count": null, + "id": "f1107f5ac4cb27b9", + "metadata": { + "collapsed": false + }, "outputs": [], "source": [ "# create a new collection\n", "collection: Collection = create_collection()\n", "print(collection)" - ], - "metadata": { - "collapsed": false - }, - "id": "f1107f5ac4cb27b9" + ] + }, + { + "cell_type": "markdown", + "id": "49ce1a09", + "metadata": {}, + "source": [ + "### Text Record" + ] }, { "cell_type": "code", "execution_count": null, + "id": "87bab2ace805b8ef", + "metadata": { + "collapsed": false + }, "outputs": [], "source": [ "# create a new text record\n", "record: Record = taskingai.retrieval.create_record(\n", " collection_id=collection.collection_id,\n", + " type=\"text\",\n", + " title=\"Machine learning\",\n", " content=\"Machine learning is a subfield of artificial intelligence (AI) that involves the development of algorithms that allow computers to learn from and make decisions or predictions based on data. The term \\\"machine learning\\\" was coined by Arthur Samuel in 1959. In other words, machine learning enables a system to automatically learn and improve from experience without being explicitly programmed. This is achieved by feeding the system massive amounts of data, which it uses to learn patterns and make inferences. There are three main types of machine learning: 1. Supervised Learning: This is where the model is given labeled training data and the goal of learning is to generalize from the training data to unseen situations in a principled way. 2. Unsupervised Learning: This involves training on a dataset without explicit labels. The goal might be to discover inherent groupings or patterns within the data. 3. Reinforcement Learning: In this type, an agent learns to perform actions based on reward/penalty feedback to achieve a goal. It's commonly used in robotics, gaming, and navigation. Deep learning, a subset of machine learning, uses neural networks with many layers (\\\"deep\\\" structures) and has been responsible for many recent breakthroughs in AI, including speech recognition, image recognition, and natural language processing. It's important to note that machine learning is a rapidly developing field, with new techniques and applications emerging regularly.\",\n", - " text_splitter=TokenTextSplitter(chunk_size=200, chunk_overlap=20)\n", + " text_splitter={\"type\": \"token\", \"chunk_size\": 200, \"chunk_overlap\": 20}\n", ")\n", "print(f\"created record: {record.record_id} for collection: {collection.collection_id}\\n\")" - ], + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "4369989d2bd1a777", "metadata": { "collapsed": false }, - "id": "87bab2ace805b8ef" + "outputs": [], + "source": [ + "# update record - content\n", + "record = taskingai.retrieval.update_record(\n", + " record_id=record.record_id,\n", + " collection_id=collection.collection_id,\n", + " type=\"text\",\n", + " title=\"New title\",\n", + " content=\"New content\",\n", + " text_splitter={\"type\": \"token\", \"chunk_size\": 100, \"chunk_overlap\": 20},\n", + ")\n", + "print(f\"updated record: {record}\")" + ] + }, + { + "cell_type": "markdown", + "id": "51527a19", + "metadata": {}, + "source": [ + "### Web Record" + ] }, { "cell_type": "code", "execution_count": null, + "id": "678df05a", + "metadata": {}, "outputs": [], "source": [ - "# get text record\n", - "record = taskingai.retrieval.get_record(\n", + "# create a new web record\n", + "record: Record = taskingai.retrieval.create_record(\n", " collection_id=collection.collection_id,\n", - " record_id=record.record_id\n", + " type=\"web\",\n", + " title=\"Machine learning\",\n", + " url=\"https://www.tasking.ai\", # must https\n", + " text_splitter={\"type\": \"token\", \"chunk_size\": 200, \"chunk_overlap\": 20},\n", ")\n", - "print(f\"got record: {record}\\n\")" - ], - "metadata": { - "collapsed": false - }, - "id": "2dba1ef4650bd5cc" + "print(f\"created record: {record.record_id} for collection: {collection.collection_id}\\n\")" + ] }, { "cell_type": "code", "execution_count": null, + "id": "74fad2e5", + "metadata": {}, "outputs": [], "source": [ - "# update record - metadata\n", + "# update record - url\n", "record = taskingai.retrieval.update_record(\n", " collection_id=collection.collection_id,\n", " record_id=record.record_id,\n", - " metadata={\"foo\": \"bar\"},\n", + " type=\"web\",\n", + " url=\"https://docs.tasking.ai\",\n", + " text_splitter={\"type\": \"token\", \"chunk_size\": 200, \"chunk_overlap\": 20},\n", ")\n", "print(f\"updated record: {record}\")" - ], - "metadata": { - "collapsed": false - }, - "id": "65d833b22e1e657" + ] + }, + { + "cell_type": "markdown", + "id": "ab6a2f62", + "metadata": {}, + "source": [ + "### File Record" + ] }, { "cell_type": "code", "execution_count": null, + "id": "8ba29fc6", + "metadata": {}, "outputs": [], "source": [ - "# update record - content\n", + "# upload a file first\n", + "from taskingai.file import upload_file\n", + "\n", + "file = upload_file(file=\"your file path\", purpose=\"record_file\")\n", + "# or\n", + "# file = upload_file(file=open(\"your file path\", \"rb\"), purpose=\"record_file\")\n", + "print(f\"uploaded file id: {file.file_id}\")\n", + "\n", + "# create a new web record\n", + "record: Record = taskingai.retrieval.create_record(\n", + " collection_id=collection.collection_id,\n", + " type=\"file\",\n", + " title=\"Machine learning\",\n", + " file_id=file.file_id,\n", + " text_splitter={\"type\": \"token\", \"chunk_size\": 200, \"chunk_overlap\": 20},\n", + ")\n", + "print(f\"created record: {record.record_id} for collection: {collection.collection_id}\\n\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "07b449bf", + "metadata": {}, + "outputs": [], + "source": [ + "new_file = upload_file(file=\"new_file_path\", purpose=\"record_file\")\n", + "print(f\"new uploaded file id: {new_file.file_id}\")\n", + "\n", + "# update record - file\n", "record = taskingai.retrieval.update_record(\n", " collection_id=collection.collection_id,\n", " record_id=record.record_id,\n", - " content=\"New content\",\n", - " text_splitter=TokenTextSplitter(chunk_size=100, chunk_overlap=20),\n", + " type=\"file\",\n", + " file_id=new_file.file_id,\n", + " text_splitter={\"type\": \"token\", \"chunk_size\": 200, \"chunk_overlap\": 20},\n", ")\n", "print(f\"updated record: {record}\")" - ], + ] + }, + { + "cell_type": "markdown", + "id": "15465ad8", + "metadata": {}, + "source": [ + "### Record Other Cases" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "65d833b22e1e657", "metadata": { "collapsed": false }, - "id": "4369989d2bd1a777" + "outputs": [], + "source": [ + "# update record - metadata\n", + "record = taskingai.retrieval.update_record(\n", + " collection_id=collection.collection_id,\n", + " record_id=record.record_id,\n", + " metadata={\"foo\": \"bar\"},\n", + ")\n", + "print(f\"updated record: {record}\")" + ] }, { "cell_type": "code", "execution_count": null, + "id": "37f19821", + "metadata": {}, "outputs": [], "source": [ - "# delete record\n", - "taskingai.retrieval.delete_record(\n", + "# get text record\n", + "record = taskingai.retrieval.get_record(\n", " collection_id=collection.collection_id,\n", - " record_id=record.record_id,\n", + " record_id=record.record_id\n", ")\n", - "print(f\"deleted record {record.record_id} from collection {collection.collection_id}\\n\")" - ], - "metadata": { - "collapsed": false - }, - "id": "d00ac0cbfb491116" + "print(f\"got record: {record}\\n\")" + ] }, { "cell_type": "code", "execution_count": null, + "id": "accf6d883fcffaa8", + "metadata": { + "collapsed": false + }, "outputs": [], "source": [ "# list records\n", @@ -271,25 +375,42 @@ "record_ids = [record.record_id for record in records]\n", "# ensure the collection we deleted is not in the list\n", "print(f\"f{record.record_id} in record_ids: {record.record_id in record_ids}\\n\")" - ], + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "d00ac0cbfb491116", "metadata": { "collapsed": false }, - "id": "accf6d883fcffaa8" + "outputs": [], + "source": [ + "# delete record\n", + "taskingai.retrieval.delete_record(\n", + " collection_id=collection.collection_id,\n", + " record_id=record.record_id,\n", + ")\n", + "print(f\"deleted record {record.record_id} from collection {collection.collection_id}\\n\")" + ] }, { "cell_type": "markdown", - "source": [ - "## Chunk Object" - ], + "id": "b0e4c12fb7509fea", "metadata": { "collapsed": false }, - "id": "b0e4c12fb7509fea" + "source": [ + "## Chunk Object" + ] }, { "cell_type": "code", "execution_count": null, + "id": "a395337f136500fc", + "metadata": { + "collapsed": false + }, "outputs": [], "source": [ "# create a new text record\n", @@ -298,15 +419,15 @@ " content=\"The dog is a domesticated descendant of the wolf. Also called the domestic dog, it is derived from extinct gray wolves, and the gray wolf is the dog's closest living relative. The dog was the first species to be domesticated by humans.\",\n", ")\n", "print(f\"created chunk: {chunk.chunk_id} for collection: {collection.collection_id}\\n\")" - ], - "metadata": { - "collapsed": false - }, - "id": "a395337f136500fc" + ] }, { "cell_type": "code", "execution_count": null, + "id": "309e1771251bb079", + "metadata": { + "collapsed": false + }, "outputs": [], "source": [ "# update chunk metadata\n", @@ -316,15 +437,15 @@ " metadata={\"k\": \"v\"},\n", ")\n", "print(f\"updated chunk: {chunk}\")" - ], - "metadata": { - "collapsed": false - }, - "id": "309e1771251bb079" + ] }, { "cell_type": "code", "execution_count": null, + "id": "a9d68db12329b558", + "metadata": { + "collapsed": false + }, "outputs": [], "source": [ "# update chunk content\n", @@ -334,15 +455,15 @@ " content=\"New content\",\n", ")\n", "print(f\"updated chunk: {chunk}\")" - ], - "metadata": { - "collapsed": false - }, - "id": "a9d68db12329b558" + ] }, { "cell_type": "code", "execution_count": null, + "id": "d3899097cd6d0cf2", + "metadata": { + "collapsed": false + }, "outputs": [], "source": [ "# get chunk\n", @@ -351,15 +472,15 @@ " chunk_id=chunk.chunk_id\n", ")\n", "print(f\"got chunk: {chunk}\\n\")" - ], - "metadata": { - "collapsed": false - }, - "id": "d3899097cd6d0cf2" + ] }, { "cell_type": "code", "execution_count": null, + "id": "27e643ad8e8636ed", + "metadata": { + "collapsed": false + }, "outputs": [], "source": [ "# delete chunk\n", @@ -368,15 +489,15 @@ " chunk_id=chunk.chunk_id,\n", ")\n", "print(f\"deleted chunk {chunk.chunk_id} from collection {collection.collection_id}\\n\")" - ], - "metadata": { - "collapsed": false - }, - "id": "27e643ad8e8636ed" + ] }, { "cell_type": "code", "execution_count": null, + "id": "a74dd7615ec28528", + "metadata": { + "collapsed": false + }, "outputs": [], "source": [ "# create a new text record and a new chunk\n", @@ -390,15 +511,15 @@ " collection_id=collection.collection_id,\n", " content=\"The dog is a domesticated descendant of the wolf. Also called the domestic dog, it is derived from extinct gray wolves, and the gray wolf is the dog's closest living relative. The dog was the first species to be domesticated by humans.\",\n", ")" - ], - "metadata": { - "collapsed": false - }, - "id": "a74dd7615ec28528" + ] }, { "cell_type": "code", "execution_count": null, + "id": "55e9645ac41f8ca", + "metadata": { + "collapsed": false + }, "outputs": [], "source": [ "# list chunks\n", @@ -406,24 +527,20 @@ "for chunk in chunks:\n", " print(chunk)\n", " print(\"-\" * 50)" - ], - "metadata": { - "collapsed": false - }, - "id": "55e9645ac41f8ca" + ] }, { "cell_type": "code", "execution_count": null, + "id": "b97aaa156f586e34", + "metadata": { + "collapsed": false + }, "outputs": [], "source": [ "# delete collection\n", "taskingai.retrieval.delete_collection(collection_id=collection.collection_id)" - ], - "metadata": { - "collapsed": false - }, - "id": "b97aaa156f586e34" + ] } ], "metadata": { @@ -435,14 +552,14 @@ "language_info": { "codemirror_mode": { "name": "ipython", - "version": 2 + "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", - "pygments_lexer": "ipython2", - "version": "2.7.6" + "pygments_lexer": "ipython3", + "version": "3.10.14" } }, "nbformat": 4, diff --git a/requirements.txt b/requirements.txt index c10b9c8..ab9828c 100644 --- a/requirements.txt +++ b/requirements.txt @@ -4,5 +4,7 @@ python_dateutil>=2.5.3 setuptools>=21.0.0 httpx>=0.23.0 pydantic>=2.5.0 -wheel==0.41.2 - +wheel==0.43.0 +aiofiles==23.2.1 +aiohttp==3.9.5 +requests==2.31.0 diff --git a/taskingai/assistant/assistant.py b/taskingai/assistant/assistant.py index ea20ff7..f42f366 100644 --- a/taskingai/assistant/assistant.py +++ b/taskingai/assistant/assistant.py @@ -1,4 +1,4 @@ -from typing import Optional, List, Dict +from typing import Any, Optional, List, Dict, Union from taskingai.client.models import * from taskingai.client.apis import * @@ -11,6 +11,8 @@ "ToolType", "RetrievalRef", "RetrievalType", + "RetrievalConfig", + "RetrievalMethod", "AssistantRetrievalType", "get_assistant", "list_assistants", @@ -122,7 +124,7 @@ def create_assistant( system_prompt_template: Optional[List[str]] = None, tools: Optional[List[AssistantTool]] = None, retrievals: Optional[List[AssistantRetrieval]] = None, - retrieval_configs: Optional[RetrievalConfig] = None, + retrieval_configs: Optional[Union[RetrievalConfig, Dict[str, Any]]] = None, metadata: Optional[Dict[str, str]] = None, ) -> Assistant: """ @@ -138,6 +140,12 @@ def create_assistant( :param metadata: The assistant metadata. It can store up to 16 key-value pairs where each key's length is less than 64 and value's length is less than 512. :return: The created assistant object. """ + if retrieval_configs: + retrieval_configs = ( + retrieval_configs + if isinstance(retrieval_configs, RetrievalConfig) + else RetrievalConfig(**retrieval_configs) + ) body = AssistantCreateRequest( model_id=model_id, @@ -162,7 +170,7 @@ async def a_create_assistant( system_prompt_template: Optional[List[str]] = None, tools: Optional[List[AssistantTool]] = None, retrievals: Optional[List[AssistantRetrieval]] = None, - retrieval_configs: Optional[RetrievalConfig] = None, + retrieval_configs: Optional[Union[RetrievalConfig, Dict[str, Any]]] = None, metadata: Optional[Dict[str, str]] = None, ) -> Assistant: """ @@ -178,6 +186,12 @@ async def a_create_assistant( :param metadata: The assistant metadata. It can store up to 16 key-value pairs where each key's length is less than 64 and value's length is less than 512. :return: The created assistant object. """ + if retrieval_configs: + retrieval_configs = ( + retrieval_configs + if isinstance(retrieval_configs, RetrievalConfig) + else RetrievalConfig(**retrieval_configs) + ) body = AssistantCreateRequest( model_id=model_id, @@ -203,7 +217,7 @@ def update_assistant( memory: Optional[AssistantMemory] = None, tools: Optional[List[AssistantTool]] = None, retrievals: Optional[List[AssistantRetrieval]] = None, - retrieval_configs: Optional[RetrievalConfig] = None, + retrieval_configs: Optional[Union[RetrievalConfig, Dict[str, Any]]] = None, metadata: Optional[Dict[str, str]] = None, ) -> Assistant: """ @@ -221,6 +235,12 @@ def update_assistant( :return: The updated assistant object. """ + if retrieval_configs: + retrieval_configs = ( + retrieval_configs + if isinstance(retrieval_configs, RetrievalConfig) + else RetrievalConfig(**retrieval_configs) + ) body = AssistantUpdateRequest( model_id=model_id, name=name, @@ -245,7 +265,7 @@ async def a_update_assistant( memory: Optional[AssistantMemory] = None, tools: Optional[List[AssistantTool]] = None, retrievals: Optional[List[AssistantRetrieval]] = None, - retrieval_configs: Optional[RetrievalConfig] = None, + retrieval_configs: Optional[Union[RetrievalConfig, Dict[str, Any]]] = None, metadata: Optional[Dict[str, str]] = None, ) -> Assistant: """ @@ -263,6 +283,12 @@ async def a_update_assistant( :return: The updated assistant object. """ + if retrieval_configs: + retrieval_configs = ( + retrieval_configs + if isinstance(retrieval_configs, RetrievalConfig) + else RetrievalConfig(**retrieval_configs) + ) body = AssistantUpdateRequest( model_id=model_id, name=name, diff --git a/taskingai/assistant/chat.py b/taskingai/assistant/chat.py index b0e46a8..e3c1907 100644 --- a/taskingai/assistant/chat.py +++ b/taskingai/assistant/chat.py @@ -116,17 +116,20 @@ async def a_get_chat(assistant_id: str, chat_id: str) -> Chat: def create_chat( assistant_id: str, + name: str, metadata: Optional[Dict[str, str]] = None, ) -> Chat: """ Create a chat. :param assistant_id: The ID of the assistant. + :param name: The name of the chat. :param metadata: The chat metadata. It can store up to 16 key-value pairs where each key's length is less than 64 and value's length is less than 512. :return: The created chat object. """ body = ChatCreateRequest( + name=name, metadata=metadata or {}, ) response: ChatCreateResponse = api_create_chat(assistant_id=assistant_id, payload=body) @@ -135,17 +138,20 @@ def create_chat( async def a_create_chat( assistant_id: str, + name: str, metadata: Optional[Dict[str, str]] = None, ) -> Chat: """ Create a chat in async mode. :param assistant_id: The ID of the assistant. + :param name: The name of the chat. :param metadata: The chat metadata. It can store up to 16 key-value pairs where each key's length is less than 64 and value's length is less than 512. :return: The created chat object. """ body = ChatCreateRequest( + name=name, metadata=metadata or {}, ) response: ChatCreateResponse = await async_api_create_chat(assistant_id=assistant_id, payload=body) @@ -155,18 +161,21 @@ async def a_create_chat( def update_chat( assistant_id: str, chat_id: str, - metadata: Dict[str, str], + name: Optional[str] = None, + metadata: Optional[Dict[str, str]] = None, ) -> Chat: """ Update a chat. :param assistant_id: The ID of the assistant. :param chat_id: The ID of the chat. + :param name: The name of the chat. :param metadata: The assistant metadata. It can store up to 16 key-value pairs where each key's length is less than 64 and value's length is less than 512. :return: The updated chat object. """ body = ChatUpdateRequest( + name=name, metadata=metadata, ) response: ChatUpdateResponse = api_update_chat(assistant_id=assistant_id, chat_id=chat_id, payload=body) @@ -176,18 +185,21 @@ def update_chat( async def a_update_chat( assistant_id: str, chat_id: str, - metadata: Dict[str, str], + name: Optional[str] = None, + metadata: Optional[Dict[str, str]] = None, ) -> Chat: """ Update a chat in async mode. :param assistant_id: The ID of the assistant. :param chat_id: The ID of the chat. + :param name: The name of the chat. :param metadata: The assistant metadata. It can store up to 16 key-value pairs where each key's length is less than 64 and value's length is less than 512. :return: The updated chat object. """ body = ChatUpdateRequest( + name=name, metadata=metadata, ) response: ChatUpdateResponse = await async_api_update_chat(assistant_id=assistant_id, chat_id=chat_id, payload=body) diff --git a/taskingai/client/models/entities/__init__.py b/taskingai/client/models/entities/__init__.py index 8d3644f..68316a1 100644 --- a/taskingai/client/models/entities/__init__.py +++ b/taskingai/client/models/entities/__init__.py @@ -38,6 +38,7 @@ from .chat_memory_message import * from .chunk import * from .collection import * +from .file_id_data import * from .message import * from .message_chunk import * from .message_content import * @@ -57,3 +58,4 @@ from .text_splitter_type import * from .tool_ref import * from .tool_type import * +from .upload_file_purpose import * diff --git a/taskingai/client/models/entities/chat.py b/taskingai/client/models/entities/chat.py index b624ae6..ecfe120 100644 --- a/taskingai/client/models/entities/chat.py +++ b/taskingai/client/models/entities/chat.py @@ -21,6 +21,7 @@ class Chat(BaseModel): chat_id: str = Field(..., min_length=20, max_length=30) assistant_id: str = Field(..., min_length=20, max_length=30) + name: str = Field("", min_length=0, max_length=127) metadata: Dict[str, str] = Field({}, min_length=0, max_length=16) updated_timestamp: int = Field(..., ge=0) created_timestamp: int = Field(..., ge=0) diff --git a/taskingai/client/models/entities/file_id_data.py b/taskingai/client/models/entities/file_id_data.py new file mode 100644 index 0000000..6a5f749 --- /dev/null +++ b/taskingai/client/models/entities/file_id_data.py @@ -0,0 +1,21 @@ +# -*- coding: utf-8 -*- + +# file_id_data.py + +""" +This script is automatically generated for TaskingAI python client +Do not modify the file manually + +Author: James Yao +Organization: TaskingAI +License: Apache 2.0 +""" + +from pydantic import BaseModel, Field + + +__all__ = ["FileIdData"] + + +class FileIdData(BaseModel): + file_id: str = Field(...) diff --git a/taskingai/client/models/entities/message.py b/taskingai/client/models/entities/message.py index ef0890a..0968969 100644 --- a/taskingai/client/models/entities/message.py +++ b/taskingai/client/models/entities/message.py @@ -12,7 +12,7 @@ """ from pydantic import BaseModel, Field -from typing import Dict +from typing import List, Dict from .message_content import MessageContent __all__ = ["Message"] @@ -25,5 +25,6 @@ class Message(BaseModel): role: str = Field(..., min_length=1, max_length=20) content: MessageContent = Field(...) metadata: Dict[str, str] = Field({}, min_length=0, max_length=16) + logs: List[Dict] = Field([]) updated_timestamp: int = Field(..., ge=0) created_timestamp: int = Field(..., ge=0) diff --git a/taskingai/client/models/entities/record_type.py b/taskingai/client/models/entities/record_type.py index e2a890a..9235603 100644 --- a/taskingai/client/models/entities/record_type.py +++ b/taskingai/client/models/entities/record_type.py @@ -18,3 +18,5 @@ class RecordType(str, Enum): TEXT = "text" + FILE = "file" + WEB = "web" diff --git a/taskingai/client/models/entities/upload_file_purpose.py b/taskingai/client/models/entities/upload_file_purpose.py new file mode 100644 index 0000000..1296b78 --- /dev/null +++ b/taskingai/client/models/entities/upload_file_purpose.py @@ -0,0 +1,20 @@ +# -*- coding: utf-8 -*- + +# upload_file_purpose.py + +""" +This script is automatically generated for TaskingAI python client +Do not modify the file manually + +Author: James Yao +Organization: TaskingAI +License: Apache 2.0 +""" + +from enum import Enum + +__all__ = ["UploadFilePurpose"] + + +class UploadFilePurpose(str, Enum): + RECORD_FILE = "record_file" diff --git a/taskingai/client/models/schemas/__init__.py b/taskingai/client/models/schemas/__init__.py index 2385513..5300362 100644 --- a/taskingai/client/models/schemas/__init__.py +++ b/taskingai/client/models/schemas/__init__.py @@ -72,3 +72,4 @@ from .record_update_response import * from .text_embedding_request import * from .text_embedding_response import * +from .upload_file_response import * diff --git a/taskingai/client/models/schemas/chat_completion_request.py b/taskingai/client/models/schemas/chat_completion_request.py index dfed9e0..46094b3 100644 --- a/taskingai/client/models/schemas/chat_completion_request.py +++ b/taskingai/client/models/schemas/chat_completion_request.py @@ -13,10 +13,10 @@ from pydantic import BaseModel, Field from typing import Optional, List, Dict, Union -from ..entities.chat_completion_assistant_message import ChatCompletionAssistantMessage +from ..entities.chat_completion_function_message import ChatCompletionFunctionMessage from ..entities.chat_completion_system_message import ChatCompletionSystemMessage from ..entities.chat_completion_user_message import ChatCompletionUserMessage -from ..entities.chat_completion_function_message import ChatCompletionFunctionMessage +from ..entities.chat_completion_assistant_message import ChatCompletionAssistantMessage from ..entities.chat_completion_function import ChatCompletionFunction __all__ = ["ChatCompletionRequest"] diff --git a/taskingai/client/models/schemas/chat_create_request.py b/taskingai/client/models/schemas/chat_create_request.py index e62a659..ef19ead 100644 --- a/taskingai/client/models/schemas/chat_create_request.py +++ b/taskingai/client/models/schemas/chat_create_request.py @@ -19,4 +19,5 @@ class ChatCreateRequest(BaseModel): + name: str = Field("") metadata: Dict[str, str] = Field({}) diff --git a/taskingai/client/models/schemas/chat_update_request.py b/taskingai/client/models/schemas/chat_update_request.py index 45de54a..6a39c41 100644 --- a/taskingai/client/models/schemas/chat_update_request.py +++ b/taskingai/client/models/schemas/chat_update_request.py @@ -19,4 +19,5 @@ class ChatUpdateRequest(BaseModel): + name: Optional[str] = Field(None) metadata: Optional[Dict[str, str]] = Field(None) diff --git a/taskingai/client/models/schemas/record_create_request.py b/taskingai/client/models/schemas/record_create_request.py index 4963daa..5e5d5fb 100644 --- a/taskingai/client/models/schemas/record_create_request.py +++ b/taskingai/client/models/schemas/record_create_request.py @@ -12,7 +12,7 @@ """ from pydantic import BaseModel, Field -from typing import Dict +from typing import Optional, Dict from ..entities.record_type import RecordType from ..entities.text_splitter import TextSplitter @@ -21,7 +21,9 @@ class RecordCreateRequest(BaseModel): type: RecordType = Field("text") + file_id: Optional[str] = Field(None, min_length=1, max_length=256) + url: Optional[str] = Field(None, min_length=1, max_length=2048) title: str = Field("", min_length=0, max_length=256) - content: str = Field(..., min_length=1, max_length=32768) + content: Optional[str] = Field(None, min_length=1, max_length=32768) text_splitter: TextSplitter = Field(...) metadata: Dict[str, str] = Field({}, min_length=0, max_length=16) diff --git a/taskingai/client/models/schemas/record_update_request.py b/taskingai/client/models/schemas/record_update_request.py index bf28a0f..b3034e4 100644 --- a/taskingai/client/models/schemas/record_update_request.py +++ b/taskingai/client/models/schemas/record_update_request.py @@ -21,6 +21,8 @@ class RecordUpdateRequest(BaseModel): type: Optional[RecordType] = Field(None) + file_id: Optional[str] = Field(None, min_length=1, max_length=256) + url: Optional[str] = Field(None, min_length=1, max_length=2048) title: Optional[str] = Field(None, min_length=0, max_length=256) content: Optional[str] = Field(None, min_length=1, max_length=32768) text_splitter: Optional[TextSplitter] = Field(None) diff --git a/taskingai/client/models/schemas/upload_file_response.py b/taskingai/client/models/schemas/upload_file_response.py new file mode 100644 index 0000000..df23501 --- /dev/null +++ b/taskingai/client/models/schemas/upload_file_response.py @@ -0,0 +1,22 @@ +# -*- coding: utf-8 -*- + +# upload_file_response.py + +""" +This script is automatically generated for TaskingAI python client +Do not modify the file manually + +Author: James Yao +Organization: TaskingAI +License: Apache 2.0 +""" + +from pydantic import BaseModel, Field +from ..entities.file_id_data import FileIdData + +__all__ = ["UploadFileResponse"] + + +class UploadFileResponse(BaseModel): + status: str = Field("success") + data: FileIdData = Field(...) diff --git a/taskingai/file/__init__.py b/taskingai/file/__init__.py new file mode 100644 index 0000000..6dbc219 --- /dev/null +++ b/taskingai/file/__init__.py @@ -0,0 +1 @@ +from .file import * diff --git a/taskingai/file/api.py b/taskingai/file/api.py new file mode 100644 index 0000000..765c107 --- /dev/null +++ b/taskingai/file/api.py @@ -0,0 +1,58 @@ +from typing import Union + +import aiohttp +import requests + +from taskingai.client.models import UploadFilePurpose + +from .utils import ( + FileType, + extract_file, + extract_file_async, + get_file_response, + get_file_response_async, +) + +__all__ = ["upload_file_api", "a_upload_file_api"] + + +def upload_file_api( + file: FileType, + purpose: Union[UploadFilePurpose, str], + **kwargs, +): + from ..config import Config + _url = f"{Config.HOST}/v1/files" + _header = {"Accept": "application/json", "Authorization": f"Bearer {Config.API_KEY}"} + _content_type = "application/octet-stream" + + file_bytes, file_name = extract_file(file) + + purpose = purpose if isinstance(purpose, UploadFilePurpose) else UploadFilePurpose(purpose) + + files = {"file": (file_name, file_bytes, _content_type)} + data = {"purpose": purpose.value} + + response = requests.post(_url, headers=_header, files=files, data=data, **kwargs) + return get_file_response(response) + + +async def a_upload_file_api( + file: FileType, + purpose: Union[UploadFilePurpose, str], + **kwargs, +): + from ..config import Config + _url = f"{Config.HOST}/v1/files" + _header = {"Accept": "application/json", "Authorization": f"Bearer {Config.API_KEY}"} + _content_type = "application/octet-stream" + + file_bytes, file_name = await extract_file_async(file) + + purpose = purpose if isinstance(purpose, UploadFilePurpose) else UploadFilePurpose(purpose) + + data = aiohttp.FormData() + data.add_field("file", file_bytes, filename=file_name, content_type=_content_type) + data.add_field("purpose", purpose) + async with aiohttp.ClientSession(headers=_header) as session, session.post(_url, data=data, **kwargs) as response: + return await get_file_response_async(response) diff --git a/taskingai/file/file.py b/taskingai/file/file.py new file mode 100644 index 0000000..28d3aa0 --- /dev/null +++ b/taskingai/file/file.py @@ -0,0 +1,37 @@ +from typing import Union + +from taskingai.client.models import FileIdData, UploadFilePurpose + +from .utils import FileType +from .api import upload_file_api, a_upload_file_api + +__all__ = ["upload_file", "a_upload_file"] + + +def upload_file( + file: FileType, + purpose: Union[UploadFilePurpose, str] = UploadFilePurpose.RECORD_FILE, +) -> FileIdData: + """ + Upload a file. + + :param file: The file bytes or file path. + :param purpose: The purpose of the file. + :return: The uploaded file info. + """ + return upload_file_api(file=file, purpose=purpose) + + +async def a_upload_file( + file: FileType, + purpose: Union[UploadFilePurpose, str] = UploadFilePurpose.RECORD_FILE, +) -> FileIdData: + """ + Upload a file. + + :param file: The file bytes or file path. + :param purpose: The purpose of the file. + :return: The uploaded file info. + """ + + return await a_upload_file_api(file=file, purpose=purpose) diff --git a/taskingai/file/utils.py b/taskingai/file/utils.py new file mode 100644 index 0000000..0899ba6 --- /dev/null +++ b/taskingai/file/utils.py @@ -0,0 +1,59 @@ +from io import IOBase +from typing import IO, Union + +import aiofiles + +from taskingai.client.models import FileIdData + +__all__ = [ + "FileType", + "get_file_response", + "get_file_response_async", + "extract_file", + "extract_file_async", +] + + +FileType = Union[IO, str] + + +def get_file_response(response): + response_json = response.json() + data = response_json.get("data") + if data: + return FileIdData(**data) + return response_json + + +async def get_file_response_async(response): + response_json = await response.json() + data = response_json.get("data") + if data: + return FileIdData(**data) + return response_json + + +def extract_file(file: FileType): + if isinstance(file, str): + with open(file, "rb") as f: + file_bytes = f.read() + file_name = f.name + elif isinstance(file, IOBase): + file_bytes = file.read() + file_name = file.name + else: + raise Exception("file type error") + return file_bytes, file_name + + +async def extract_file_async(file: FileType): + if isinstance(file, str): + async with aiofiles.open(file, "rb") as f: + file_bytes = await f.read() + file_name = f.name + elif isinstance(file, IOBase): + file_bytes = file.read() + file_name = file.name + else: + raise Exception("file type error") + return file_bytes, file_name diff --git a/taskingai/retrieval/record.py b/taskingai/retrieval/record.py index 86d0a30..5ff45c2 100644 --- a/taskingai/retrieval/record.py +++ b/taskingai/retrieval/record.py @@ -1,10 +1,11 @@ -from typing import Optional, List, Dict +from typing import Any, Dict, List, Optional, Union -from taskingai.client.models import * from taskingai.client.apis import * +from taskingai.client.models import * __all__ = [ "Record", + "RecordType", "get_record", "list_records", "create_record", @@ -18,6 +19,25 @@ ] +def _validate_record_type( + type: Union[RecordType, str], + content: Optional[str] = None, + file_id: Optional[str] = None, + url: Optional[str] = None, +): + type = type if isinstance(type, RecordType) else RecordType(type) + if type == RecordType.TEXT and not content: + raise ValueError("Content must be provided when type is 'text'.") + if type == RecordType.FILE and not file_id: + raise ValueError("File ID must be provided when type is 'file'.") + if type == RecordType.WEB: + if not url: + raise ValueError("URL must be provided when type is 'web'.") + if not url.startswith("https://"): + raise ValueError("URL only supports https.") + return type + + def list_records( collection_id: str, order: str = "desc", @@ -113,24 +133,35 @@ async def a_get_record(collection_id: str, record_id: str) -> Record: def create_record( collection_id: str, - content: str, - text_splitter: TextSplitter, + title: str, + type: Union[RecordType, str], + text_splitter: Union[TextSplitter, Dict[str, Any]], + content: Optional[str] = None, + file_id: Optional[str] = None, + url: Optional[str] = None, metadata: Optional[Dict[str, str]] = None, ) -> Record: """ Create a record. :param collection_id: The ID of the collection. + :param type: The type of the record. It can be "text", "web" or "file". + :param title: The title of the record. :param content: The content of the record. :param text_splitter: The text splitter to split records into chunks. :param metadata: The collection metadata. It can store up to 16 key-value pairs where each key's length is less than 64 and value's length is less than 512. :return: The created record object. """ + type = _validate_record_type(type, content, file_id, url) + text_splitter = text_splitter if isinstance(text_splitter, TextSplitter) else TextSplitter(**text_splitter) body = RecordCreateRequest( - type="text", - content=content, + title=title, + type=type, text_splitter=text_splitter, + content=content, + file_id=file_id, + url=url, metadata=metadata or {}, ) response: RecordCreateResponse = api_create_record(collection_id=collection_id, payload=body) @@ -139,24 +170,36 @@ def create_record( async def a_create_record( collection_id: str, - content: str, - text_splitter: TextSplitter, + title: str, + type: Union[RecordType, str], + text_splitter: Union[TextSplitter, Dict[str, Any]], + content: Optional[str] = None, + file_id: Optional[str] = None, + url: Optional[str] = None, metadata: Optional[Dict[str, str]] = None, ) -> Record: """ Create a record in async mode. :param collection_id: The ID of the collection. + :param type: The type of the record. It can be "text", "web" or "file". + :param title: The title of the record. :param content: The content of the record. :param text_splitter: The text splitter to split records into chunks. :param metadata: The collection metadata. It can store up to 16 key-value pairs where each key's length is less than 64 and value's length is less than 512. :return: The created record object. """ + type = _validate_record_type(type, content, file_id, url) + text_splitter = text_splitter if isinstance(text_splitter, TextSplitter) else TextSplitter(**text_splitter) + body = RecordCreateRequest( - type="text", - content=content, + title=title, + type=type, text_splitter=text_splitter, + content=content, + file_id=file_id, + url=url, metadata=metadata or {}, ) response: RecordCreateResponse = await async_api_create_record(collection_id=collection_id, payload=body) @@ -164,10 +207,14 @@ async def a_create_record( def update_record( - collection_id: str, record_id: str, + collection_id: str, + title: Optional[str] = None, + type: Optional[Union[RecordType, str]] = None, + text_splitter: Optional[Union[TextSplitter, Dict[str, Any]]] = None, content: Optional[str] = None, - text_splitter: Optional[TextSplitter] = None, + file_id: Optional[str] = None, + url: Optional[str] = None, metadata: Optional[Dict[str, str]] = None, ) -> Record: """ @@ -175,6 +222,7 @@ def update_record( :param collection_id: The ID of the collection. :param record_id: The ID of the record. + :param type: The type of the record. It can be "text", "web" or "file". :param content: The content of the record. :param text_splitter: The text splitter to split records into chunks. :param metadata: The collection metadata. It can store up to 16 key-value pairs where each key's length is less @@ -182,24 +230,33 @@ def update_record( :return: The collection object. """ - type = None - if content and text_splitter: - type = "text" + if type: + type = type if isinstance(type, RecordType) else RecordType(type) + if text_splitter: + text_splitter = text_splitter if isinstance(text_splitter, TextSplitter) else TextSplitter(**text_splitter) + body = RecordUpdateRequest( + title=title, type=type, - content=content, text_splitter=text_splitter, - metadata=metadata, + content=content, + file_id=file_id, + url=url, + metadata=metadata or {}, ) response: RecordUpdateResponse = api_update_record(collection_id=collection_id, record_id=record_id, payload=body) return response.data async def a_update_record( - collection_id: str, record_id: str, + collection_id: str, + title: Optional[str] = None, + type: Optional[Union[RecordType, str]] = None, + text_splitter: Optional[Union[TextSplitter, Dict[str, Any]]] = None, content: Optional[str] = None, - text_splitter: Optional[TextSplitter] = None, + file_id: Optional[str] = None, + url: Optional[str] = None, metadata: Optional[Dict[str, str]] = None, ) -> Record: """ @@ -207,21 +264,26 @@ async def a_update_record( :param collection_id: The ID of the collection. :param record_id: The ID of the record. + :param type: The type of the record. It can be "text", "web" or "file". :param content: The content of the record. :param text_splitter: The text splitter to split records into chunks. :param metadata: The collection metadata. It can store up to 16 key-value pairs where each key's length is less than 64 and value's length is less than 512. :return: The collection object. """ + if type: + type = type if isinstance(type, RecordType) else RecordType(type) + if text_splitter: + text_splitter = text_splitter if isinstance(text_splitter, TextSplitter) else TextSplitter(**text_splitter) - type = None - if content and text_splitter: - type = "text" body = RecordUpdateRequest( + title=title, type=type, - content=content, text_splitter=text_splitter, - metadata=metadata, + content=content, + file_id=file_id, + url=url, + metadata=metadata or {}, ) response: RecordUpdateResponse = await async_api_update_record( collection_id=collection_id, record_id=record_id, payload=body diff --git a/taskingai/tool/action.py b/taskingai/tool/action.py index 861a281..9303513 100644 --- a/taskingai/tool/action.py +++ b/taskingai/tool/action.py @@ -1,4 +1,4 @@ -from typing import Optional, List, Dict +from typing import Any, Optional, List, Dict, Union from taskingai.client.models import * from taskingai.client.apis import * @@ -105,7 +105,7 @@ async def a_get_action(action_id: str) -> Action: def bulk_create_actions( openapi_schema: Dict, - authentication: Optional[ActionAuthentication] = None, + authentication: Optional[Union[ActionAuthentication, Dict[str, Any]]] = None, ) -> List[Action]: """ Create actions from an OpenAPI schema. @@ -115,10 +115,12 @@ def bulk_create_actions( :return: The created action object. """ - if authentication is None: - authentication = ActionAuthentication( - type=ActionAuthenticationType.NONE, - ) + authentication = ( + authentication + if isinstance(authentication, ActionAuthentication) + else ActionAuthentication(**(authentication or ActionAuthentication(type=ActionAuthenticationType.NONE))) + ) + body = ActionBulkCreateRequest( openapi_schema=openapi_schema, authentication=authentication, @@ -129,7 +131,7 @@ def bulk_create_actions( async def a_bulk_create_actions( openapi_schema: Dict, - authentication: Optional[ActionAuthentication] = None, + authentication: Optional[Union[ActionAuthentication, Dict[str, Any]]] = None, ) -> List[Action]: """ Create actions from an OpenAPI schema in async mode. @@ -139,10 +141,12 @@ async def a_bulk_create_actions( :return: The created action object. """ - if authentication is None: - authentication = ActionAuthentication( - type=ActionAuthenticationType.NONE, - ) + authentication = ( + authentication + if isinstance(authentication, ActionAuthentication) + else ActionAuthentication(**(authentication or ActionAuthentication(type=ActionAuthenticationType.NONE))) + ) + body = ActionBulkCreateRequest( openapi_schema=openapi_schema, authentication=authentication, @@ -154,7 +158,7 @@ async def a_bulk_create_actions( def update_action( action_id: str, openapi_schema: Optional[Dict] = None, - authentication: Optional[ActionAuthentication] = None, + authentication: Optional[Union[ActionAuthentication, Dict[str, Any]]] = None, ) -> Action: """ Update an action. @@ -164,6 +168,12 @@ def update_action( :param authentication: The action API authentication. :return: The updated action object. """ + if authentication: + authentication = ( + authentication + if isinstance(authentication, ActionAuthentication) + else ActionAuthentication(**authentication) + ) body = ActionUpdateRequest( openapi_schema=openapi_schema, authentication=authentication, @@ -175,7 +185,7 @@ def update_action( async def a_update_action( action_id: str, openapi_schema: Optional[Dict] = None, - authentication: Optional[ActionAuthentication] = None, + authentication: Optional[Union[ActionAuthentication, Dict[str, Any]]] = None, ) -> Action: """ Update an action in async mode. @@ -185,6 +195,12 @@ async def a_update_action( :param authentication: The action API authentication. :return: The updated action object. """ + if authentication: + authentication = ( + authentication + if isinstance(authentication, ActionAuthentication) + else ActionAuthentication(**authentication) + ) body = ActionUpdateRequest( openapi_schema=openapi_schema, authentication=authentication, diff --git a/test/common/utils.py b/test/common/utils.py index 5b4815a..94a9a60 100644 --- a/test/common/utils.py +++ b/test/common/utils.py @@ -107,10 +107,17 @@ def assume_collection_result(create_dict: dict, res_dict: dict): def assume_record_result(create_record_data: dict, res_dict: dict): - for key in create_record_data: - if key == "text_splitter": + for key, value in create_record_data.items(): + if key in ["text_splitter"]: continue - pytest.assume(res_dict[key] == create_record_data[key]) + elif key in ["url"]: + assert create_record_data[key] in res_dict.get("content") + elif key == "file_id": + assert create_record_data[key] in res_dict.get("content") + assert int(res_dict.get("content").split('\"file_size\":')[-1].strip("}").strip()) > 0 + else: + pytest.assume(res_dict[key] == create_record_data[key]) + pytest.assume(res_dict["status"] == "ready") @@ -131,6 +138,8 @@ def assume_assistant_result(assistant_dict: dict, res: dict): elif key in ["memory", "tool", "retrievals"]: continue else: + if key == 'retrieval_configs': + res[key] = vars(res[key]) pytest.assume(res[key] == assistant_dict[key]) diff --git a/test/config.py b/test/config.py index 96b51ad..c2271fa 100644 --- a/test/config.py +++ b/test/config.py @@ -7,13 +7,17 @@ class Config: - chat_completion_model_id = os.environ.get("CHAT_COMPLETION_MODEL_ID") - if not chat_completion_model_id: - raise ValueError("chat_completion_model_id is not defined") + openai_chat_completion_model_id = os.environ.get("OPENAI_CHAT_COMPLETION_MODEL_ID") + if not openai_chat_completion_model_id: + raise ValueError("openai_chat_completion_model_id is not defined") - text_embedding_model_id = os.environ.get("TEXT_EMBEDDING_MODEL_ID") - if not chat_completion_model_id: - raise ValueError("chat_completion_model_id is not defined") + openai_text_embedding_model_id = os.environ.get("OPENAI_TEXT_EMBEDDING_MODEL_ID") + if not openai_chat_completion_model_id: + raise ValueError("openai_chat_completion_model_id is not defined") + + anthropic_chat_completion_model_id = os.environ.get("ANTHROPIC_CHAT_COMPLETION_MODEL_ID") + if not openai_chat_completion_model_id: + raise ValueError("anthropic_chat_completion_model_id is not defined") taskingai_host = os.environ.get("TASKINGAI_HOST") if not taskingai_host: diff --git a/test/files/test.docx b/test/files/test.docx new file mode 100644 index 0000000000000000000000000000000000000000..3e78daab4229e4358c5ec0c4237af98c6409fd12 GIT binary patch literal 7725 zcmc&(WmHt{+8(+aBm@BgVWd++y1Nl+Y006ayThToyQMp&q`OPHq-&7;IPd$Ov&3`! z{q0$^*WS-w*R$3g&mGsTAPobH3jhER0mf;G>e8xe=d+J*ywCsu3IG?NYind-$H?%n zI}`x(@x#-rG)h()Mi4dN=nhZ#aLIx;NW#$-EAvg{HB5)hCr8t5Uzy44BNCA@eYJ zZS!s6>is+aWqUT*hO5oKmzV8Nr61uV)#2`794^Hg{UIEFFoQTUn1KjyE(e*l60 zBzx-c5ON0-ChUr9`pb>wJQqyhT?6jOiy}yr#k|)XTA#+=!gDM)KzJt;=>&R{XAqjc ztzYU}l^R^Hq_EE1=(=5{w9Qfc19l~^Ui7G57@1V9MB!|eaoQ^`)rOu$^$?FO;`OXv zjr)2ngFkT^m~RBaBnjTVMDEJgT%KpgYboGYY6s z*|HDa^p&A}Tf2Chz;f=y!;5@JhjNUP-{aQM%oQ?cIxT6BoD@d=BY|~pL!ltA|0@4m zU9qM|UDN#KM<{1=!*xO`Hiqu7ndJqx5G@^PibbLli{5w8M5}#WuYD?28r%|CvVU;4 z1}m7Y7A6$OI=Bh_IG_T-g`_zVl0>+)cf8yf=eoYeOfl(_26&1m*nCzPB77Chx~v~9 zM?;%!3yYAeyQw!ms>i)TN0{6ul^#7E!&b=hEZ;}+ZCaR4HHZ;P{hD*KqaUvHn!nl+ zu{3o|#v`~yY(?1|!JAI|Lh<9Ke0DXf41`g(NffzW;HEqksr9^$(qt&HZ^C^F5Rx57 zm|wiXbA1u6kP@&29zYl` zCb50v-v7e9559F!FtouiPRxjG*z^PBMpfAMQxuYUP5(EbzW;s>JP%vt3q5xLhDZRk z+!kL>pmul{*eF>^mbtt|W!iOqY06+&10Kj-W9)CHMoxQV#k=>u% zX;Xd3YMleK^+*}9k-E}1!}PPmGjOOLOwLD4>K0{O`ieXW?;Lv95x5W?m;KD0+?|GR z(C!)f3$3DoxKsl%C9t8-5F1;A&W1p2tSO{pW#29Ao>>H`s2qRf4;1I3DUQp4k-HC82l?u=(<8lWnk?;gwLoHpn$Z&=?yM`D7hKyUD`dr_hZheKeAArY6~?xv9t2D$YnD^9QB| zV6X)qFGjJ{<8wjd4BRJYfW-MsP(ZK(B!MUn%lIxRh7=SYlhL%)J(v$pIAN zBe8)B>4y?}DpwKxSO#Y=w4@tfzDPLF(k~_4f_B6qA4Pua+m);)&AahpW>at526(x9M|@q)>I^uvg`#6z?CatPVgA!EkJ_X1-iI@dQIR)?Qt*pswE$n66L*A>?V1oqNcW=`fBne? zvvKgH7x8G)MwzjK*VB;=??WvYRA6@QtIC$ckc4ypMo#OXy%>m?qSY;vpgoU8*rJPM z?)V!u|Egnitz?U{{3Ru;%WwDZN$B3u9E=BXYcHVF%@#SrH5UkA=VId}GLsc{$wa}) z9djoJ)EVzE@j!9Lh}>x)yaUd>Zsb7E6DeIC-J{;-z1wipy=E7?>RVp}!LupI;3VKn zq|q6#ekd-_b4=|fbP8SFfKPGAL_XYWhc(V{Na24ujnre)V3IUP0M8{unQ~iXFpxF@ zc9@7AdIwK25*on)<4MMi5kN{T83bcYjje5!O3ZB>+gDy+3#SB`=`S!0O}TKDe1FvK zN`+#OYdJf`rzsD~LJdb^VjHYO-ts|vO|bSYhJ=lxoS9Rd*(cHwS}O!J$~C=YB&&jPUiZ8XS0dR^MXCuK6m%l(v*9++`-746)|++6 z(T9_nH~gQL56*Zv&XI5Nbtz!JLz6NQXAZ}@PHP@evQ!?^BeELVt9f!wR?1_jOhlx= zF<7WNheI98u8Zd>zv~an?tWWOy_?jqQ4r zBP6@IZ%SNp8FcP`D+v~jOv29|%YmlHM2h|Im}hKlWv}u<-{Ox2QB9<*Stk>w7=+7z zv^6qG6!^*((SKhV9^kKw(2bA zgmIwt-go~M=Ec~^WSOxlY%f?(EiWvHxLUQ|0Ka<$HWxGfg^4g{Zr>^&zomDo17wX=UyrI zY$Ka&soTVnJIB+iXRM&q&}}7Hj_yI1N+gxA6730|Fn>k(3T*=z^dJ*WD(@80h#CUx z-}{M>OzL=R&@@r_EOBHrn_b7qX8VoDHfL{05s-+8MT5Q|%l1^4ny`ecRub3vE`Th? zgG^4-MS6+}w$0dP!pgCXW}bVW%Fx`ayWViz$aEMs7#61rWmep>qJ*y5xFQrbF*kfR z#bBEzA?BXF+W7%*hqB_G-EeQ~Muzt~qmKoks6#x`rhCsCO=^-K7bg7SbaZspu%gt9hYBR3DV+3N>V~s6iYnh+Ho@^DzYw$#g559*^l3&Vqq&6Fxx zj50abA;OzPp8C5QC`%w-=e6%7TU|ou7Hpnh&%nli}urST^_gE2>M=wdxe1@eOlw2 zEkSaVazvMK#ZUM)I;Ew0ccm44SJ+P63|Ii#ELg<x>1k1_Nh1Is&3(E;jOt{vYv)YUG*IHzD0EO}jBFHd^Vj_To_3x*u zW~sWxwk#=yT1qUbj)n1dQNPU(@0ryx3J0~~31MPvrv=BjIfUkX$s{fPbb*&AAy|8) zRF|2b5>7z-*(J^%`@)wmN$Dmc!*`m*`Sgl!k?N#^H~^w#^8jLRQy36z7Fs*j2w$+*FEWHc)S!jnl1*Ddynd2hZVAH5LetX-JuLa(B(zm?=9rCDp@m}c! z?V0d$o}gJ$t z6Vyy?fLclRz`7kyth@Y1) z$t|iTxsu0JBhaIJk%n@J{<)er?^brgHbzK(v^6yV{^6Rgg;m@;=jx?0QLj!p!Ovn5 z^M^fIRxKgcgGIsej1DR+=4&?(XU|4YSNG%4@FsEGX>!-JV?MGX+0_WNr7whcCK@BH zzuAWILGbqd@3t|vwzmKOvW+r=cev^^;V|26RCHl!yH~;5rkyd6Tp~&v50v)>6EVuQ zQ`HeN4aYPgazc7OoVwq(7S$)mUbDi*0}9=wm8x^3g!KiKjZ5pSG`9s(pl{`N8lph# zdU1+j$O`q@gH-t$rE*ItX2AloK?_*2`Q*;2Z z770W%WK<8kRn4mLyvQSD$V~inNT#l^8Wlv~&ezt=ztq;kch&uEi30gY1h2dkDdtoj zs5iiBChVTI)I|5`u_=W07PWt@iANs!fPe9Xe{%sw_VypGO#a9Qd{&V)#}OoGg}n3Q z+Ra>IyAu$q%gv2>QG#5k)75F{SV_pjQ#CmF3LZ;Lr0C_EpmgZg+SgDgT*t_lSIU)Z z4);Ds9_+P(yC1jQ(;(){kh!H<7lO*32nkU*$FWl-9Ck#ApS<%*R#TvB=$p~$t&X08 z(x7QT5j-qt3Y{1Kq6Rz<&Ti&Is?fm3S{1nTL#=>UhL;Q4Bm+O=#S&AWU@%e%PgSdG z02i@!h*oSIyzbVQ;04C)?7q#Rcj4WO+$*36cnBp3D7k#Do!b@};X&yy9^+HzU$TSN zm`7~cksKHkMg-E^%92p}5*gh|9;?&~LkH8ZI`l^R3ku=f>Nuv-%rj_G7>k;7PfKL zM%(`!Gk*I%qa)C0?C{(k=O>?rtyhi~`G-_vYs-)25 zm}nBoV*yJ@KZG@CF}5)V;DD3p?o-qpgEG@kHme#P~$#*us6c+T*rCiPtm zHoG2kvNHA`E6rDF=HJSY1*}KZ@A#Z!_{pEzSLE33y~RJp_T$XmLUB)<(bXA{=EgT; znd@8cg92w&TD(#Jgq1C*i|;z4_cJ53V+X!8#2inncmo;6^+hjSbMXMG`P8hg2Q7Oo zOQ)K-g+oB;)a*Q}W?97r@sDtdmxP-z2@>w~?EMJ@Se~9eBZ5 z`oqU|F8(pv6aU+7|0e!_Wk&xz1c)99d1QNgj`&0qnq-TM=|T(fv6>k!pelFIF={0x zdW_s%eML*jj4-%7q6Z;2EW#WMZLJ1vJ8tIHUUZV)nQD0!4)iPP``hW|Lng`A`;`8) ztshHGGR0k?&?4{F7K`CscX1391Ec%vb(A8$Y^fDRmqap-+Yc?x!kpsEf1-h_7fytt zWq9_mZdp{MTF<(-n4R|q@i3C6xf4Nt*NhShB8A=T)fQeJM3~8vg#2@2m9;aQ4NsrY z0o{HV^wvK)t;4gc>kW;xz5H!P74ZJ`Av)^kpT21uWd^6fbMU2SwD3u!ciU~5+b6iE zx>MIW>Cb@7KPk}&}dH9mkS`) z7=1NBqU%SfKcQsq!k80A@*LsJ)K-xjFFJUoXzX@K5>9=3o2lqf3ALR&7JAN|oQt%x zh*&rvdR^g?xSWeI(C2nx{*}5~?M76CeBQOkO~-qUkM$Xw5%M~&__aM%)!ThlR?_=x~ZlFNf-8v2v z5B)4J7uZ{)rgblP+%#w)#5u^CH`PWM`VJ|GC!7^(98!KxDKth{R6=j%^f0_z{P4O3 ztc5v-fmMVnG_;w4i>P414K`WDUShw#Hav2|6M67M@3PpN8f!V*<7p`TR%iiLlLH?0 zmNC+e9IuU39=FX9dqU=;VRiYlBQkUF()=d{7~UF6Sahm=F^AE}obWe`3qZdwNSFvN z4+PY+=zLRe3nH!2mnS|bEWfSrL_GW{j}Wr6`h6i9|8VTeEp7oS`qw;=6#$O{8xSOJN#*6{uO?X{}22R nar*b2p2n$PcVZ$0{I95`APx6ORRI8~kH4EoHz9uhuebjLp=?GD literal 0 HcmV?d00001 diff --git a/test/files/test.html b/test/files/test.html new file mode 100644 index 0000000..a90fdf1 --- /dev/null +++ b/test/files/test.html @@ -0,0 +1,11 @@ + + + + + + TaskingAI + + +

The open source platform for AI-native application development.

+ + diff --git a/test/files/test.md b/test/files/test.md new file mode 100644 index 0000000..fcc4cf5 --- /dev/null +++ b/test/files/test.md @@ -0,0 +1,2 @@ +# TaskingAI +The open source platform for AI-native application development. diff --git a/test/files/test.pdf b/test/files/test.pdf new file mode 100644 index 0000000000000000000000000000000000000000..5030d9122d0f9d21837834d51b38e6beaf320d02 GIT binary patch literal 23689 zcmagF19+t0@-7-pJh78WI<{@www;M>O>7$zTNB&P#I|iaH#5Jz@80K}|2?yv*eS1A})O!0= zi3W#O2W}PoC>8RJ-Kbx;Ni7>gCiaeg8nu4A51t0w6;6P`R|o;aM3>Jp#_|6}CLMQ6 z!Z2w?rnH+c#bl&G;)+N=!d?pJ0hLr>)4i>ZzlRDL{{+xA^OkM(c07`T5ddR&4M65hN}<%d=gB3mXsC-c@zTKz@*Mr}_`9 z^mmn=k#~2GK@6|)5h#I$K69h97v-JPvVI?=)PnXK+oBl2BjjbI444=aa09plO2OB4QLt_U3(?1=8 zHr9@x;|_p75&E1UZER$&FJR*e(4_n9U}k3qurV=cLx0BPUwZzm`*$Se>}?DcjU54+ zpUQ+p0JKWRu8sh00Ii^nrH#F!t-hf#;7`B>9q0kff5r3ge1`o~YPymUu&=(ODL~<1)r7mF6U;;y76qq0W1`ANv z2yz46@U)#A;Q;Wq0YVXf!e2rRHs5<3H9OgfKVhLCUiNJxN zh38nfyh+gaKoS&d`!N|lz(#Hp$r*y^8PIZ|a?1&J`l>ANi}op7hf|VCc5|@Q$1&TO zvLagL*3*c$@*==;v{HySP4heveY*$Ps?fB z(SYuWG}6D-b|fkT)mee_?QNCIUI2iuzW~*(XWZZhS?a{~ZNrkngrA;TIsj*Xr=dJ# zeaZH>8R-mSe@GoYe^KEqMCbJ*6h~z=7Tzl?DwGr&8-s;Qo-lfQK=|0{VmJRi+)I^_ z-GzP?c&IFYm;ee~!9ogw>7Dr<#mq?;$SaUtu1OqX$c7t8 zkPwu}n%5n~>qHZQ9(t&w&8#A}$|0J;{r%t>4@N7@dP|C*Kym*bHrmLZIF|}DZzI66 z8w4o^!tp0GlOGKo2$kMf9A8FT&~*S%fUg(=Xc8!m7>GF^*e`ybMIdN?`bA*p9~QPi zvwrTAU^4zJw%}S|s$E}gVPJhl`JjWkCFy|I{mdc#FA-34`AcKa^5Ihj<`J+pAl(G{ z<=9X_`vpT|5aRfrCP|7hD1MaY2+DEa5;|hFKx+SZ{s}P&U;=*w_6k5u0h!VNx(L+Z zyHw4I11Z`wx+&y}ngM~{wX!AAgvJBc*n_@>eF;PrsM`|{$u|TEIwhh8iG3v`5Tix} zjEMg|mLMObL^v%5E+4x&QY_YP7Zf}=M?VDvZ2)s8SUqCPKt+GvK!R>A#V|!h3Vol& z1cA}-$-ls0uBK3pw4B!&iUlz}lDbbtFIrEtI=KdPQU8<^2X3$na~sFjy9q;!!J4@e ztPy@8qSg1L_v;qPm1`TtI?9DVe7D1;fhT?kv=5FC?nX4;KqqMolm^&$e|Y>P0Ffn; z5&{Y@MOgxHs5D~fp5qWBzpOUx*fi?*@ zNjrZ0Fo%(-J%&1;j%+1SHb9pcK8{x|fs6!2Rsu&@U`d8uqM6T)&rPsE;cKz=tXgSG zOQwcwyO5WJ7p-VKWAaV%&TvJ1S?u@3lVlHK*@T5;mtgHVGX+sys)#BFhYag9i_C45KK)5Hb%SOPNzZqrS5 z(sdlm#>;fyL|OA+X5x?fZ}AS>Z=sBdjWzek_Hg#F_G(8Izs?2ph+Q<{1D6A!t6M_do1QK1dXKq}D|RSVL6@pS^9ZfhJ>ZD zWm6P+8hM-pY=#YXVOKmmnk3bsnc{Kck;Br8i?i!WdMy)m6R_X+%}AKam|F*Pt5?<4 z%SGmYviEWgdFHvLKT*PPhpUBWqOGCnQk_vLQlC)asgtXJQM0N$u72)a?o0cwZRj-^ zWU*sdJ6^c9Utdu|z2V$>=Ea$&UA>0ZD$_b~4|h-SLh?cn!5!=x+z$7!=;%z=8sIgD zcyhSaG=3Ga13~IRS~DYFw2`SkPCov1JP!sI{Stka8pD3dZmS8$K9dN4g$r5N;Oussxj`t;KJgf zh=+qGlqZr0xx=}m%!~F>{|)5L`FZVT;qu^l;YAjV4XhmM5IP@B1WW~j2ZjL(>5CIs zZ#Qsvt3Mv?v@Vg>EE=srGc)zJ9M{bfPjww_Dwg0v8+a+Xli%C~{B9o>b+nZnMiC*g|}3@Jrmm z0OdjCmG-0YYb;oDz^{NQq2xa8P($%baa8dg5Ly2EO{ZVF*M7St2{8$IK^N61c>#+swuS@xk=k;dztd+1Y1VT=gyUpbNQxXrN|uAOXKAu> zQ@dhs>^k?lu~qf7Lq%WHP7mHZ+9 zr#a5q1=eEL1eTu2xPh7-yFr_`%{xI4?kd0Ogrr!y<7E7wQ;lB;yCn(a$g+FxFklgmsi*|j;nGQHZ*g5DzM zaa*&JbWgp_kLoY1+K+5&-;#DJqB_2K9XxBl(5~ONcO@>aafJuaA(wBMfoT+o}S4Ka-lY7(UVNPMc2 z?fo)QGC?U)Cvq%WAvzK96tU)BbK8G88j(D+6WB@q-tdq(RhI4T^3rZqc*nbf0AK?25cEtq+1@s+^jR1eJsscd!-_Vu*4`%*1IQ|EOD>~Xc89FN2 z8yhRw*f;`cB?0Vz*Zc?X{%QYDYlJP0tpK!&pP<*q(8=l#hNqPS{MGOY(>&H${=wxBo6~ZDRA6Bm8N6`uWp|#-F?V zbjSbVPUwHR<3Czh|E1*L7653~)HOc)K1s}H`Tqv$5AFD0_xG2T&?-6^IR52MN}me< z3E-bV|1-${=OsX?WNu~bAY<&JU}L3kEvG20^be&GGIlVuH@9`Pv4^Jr7nc&Y{^Yji z)}I@c);Ih+{Xab-=JpPbf@b>m0LD-LBBlSY20c9+fL7Jq$kFVN7a5r8p#P-Q|8y}j zGX5F=AIInb46J`r^FJ3W1H=C&>CgKAjQ^*`@;8P4)Bf)o6Vo5|_gO+S)BU{z{z&;# z|MU4r=f7*LpBDUkF8KKX{z{V`k5Q@Tbo)Mf0{?j?_l`n zTgJxznZ|#V{%KG%(6fAIx4x~IvAL<4BQy)kr(8uxV=I+EE2NC||Hk(-xoFk?nepiz z)=yE+rhkJ7V50jJCav%KPwTJpzh-@&ffzGD)Bi)l|C7FdJO1s0e~v`xKcmdT^qCd^ zIw7HFV`O6g|7W}f?J%BN!fU)-4&&=prY^M)D@&)=Ea_`T4zes?e~UYqr`Hip7D_|o zA4S^Ni%THJOalyZ0FnNB!s7DHkk5|kI%3UYnu84o4b<5ds#FvRl{BNCiP$bGLkj&2K>D+}bVQu%n@jj4aS;HZj1_t|mjKES-3 z&dJ+&-%M-fB+jh`l#!NmzX0Y$_Bn?%LOpGNcdjS6ig%PEyhO*%ZF@lU|FV@VQ{)m3 z>USB+fAqK}@edOtiv_9&L;&gV$L zA`wOSvleN2>)Z^?#u0z{*+CDFLq%ETT{{Qp;D3(*gxG}G#7(~{Lr7AV$~LCS^Yy2P zBq|`0y!0)I!|?;c9?O2z#2pWt_GkZm7u^mwLhd~6C$Fu*`u5J}>Axth#zlZjGmFDx zR-+t0Z?%nHO~M!=3&;>+i~LET*)KXI6Tl7mf~eFN#ugF7f2dEqy|cZOav<2W9>(^g z8dV?OA6oJeW2DNc=CRoI3;963OV|Sf=`?3wtJ^bmKF@Ba-!;NCM6BtN5xx(vg38C7 zz?06d_K;6O%zCgr@AU)0#567X^+MRnlhXpqwAXe+iJY3n*Bl*@Vj%?JHPAy`sx@xd zQ3S;+v?~gpAE07oQ#*4{D6&?AN<<^sQ&B&xZSHuej^L=?;L2D3ymJN8u_qdu={E|s z+7PX*h0?L7_C?Ai3Yxy|SoMMuh7f~c^7pma^tRMq<7{IFAVU&7;cP<%cu|phW1Xb* zm0P1%rBHkYU24D?qnM+X4NW#ELA{#(v95QQ746^-D%d9pe@q=XJF;xOZ^eo?o33`W z|D!{XN4ShqlX_d^8WYS91wRUZmY|NXxGz$%M(R=g%G16+iY)0;M!%>@v0kyF)UxQ_ zwBKSn{2K;|b4>JZv~-jqoXAu+ZIY z&sai333%u)r5Png{Z-R3Ow$O`h~o?s5eESW1qY41B<4gc;h8eb`3SQCX5=(4thWFY z0ZLeG(TE)pd4`bzi9CZ-=F(n`ma01yJ5cL*KoWuo0tteW+75og=r%E=pevT3T7#sV z3w8gki?{c;YGbGfk#$
^zTHX5~@4FB}lanQGNG~Kcq#z_Aq{Vey6WW+Q^$DgOhCY}+ufK#=@P&q5 z#ih1qC!wdP6y-jJX-bop9*c9jj71_CvTc_X-rkj}t0!l)rYnvjmG=Ha>)6H#gHOk3 z1`r@2PDRTWKAJGBX3n#CZgs{&rXg)AJzXe7-S%3o*Wg(cH+y2IP637fgVEq!v+p%I zTJn2Y6C%KGRI4Q?{oK=o5#wVT!ADgsMm+7T#<13=OhXS*i5 z|F$Z){7fa+C+t13E{~UEE(2B;CVba-bdTcqX7QEz_gK&We#38*e-D>l#^=7g?AF*YWWF_2t`?(^k9PZcz_wQMSnpKy@c-;io&GovkhR>B`pB zGc8xcd_XS@e--5$jH&!8n{+asCzlp#x`|?m5MDGMRI+|w99g!6Zg0H6x2fqf2@Q`( z=O50I_aJ8kR*?-_l;3|W`!$4EMW%t0-3vIj3xJ6353INV!Lx4Vm1;?HqcXgARHvvvof^h&M3^0Laq0*gTZJuNrpyX)%MM`Gc-C7nJRW zzeGdl^~^`z=k-8JfiluMU*or?^`-zkb{esM=B|62Y(X0{jS%t+?qv&75hYgPoGESAyCrH#pVO} zn>xc}5X8{*5C19OH@eekSSmYl!tkekHI%E>T^|MEu7-#bzYOvgB2y8)m%_ICwnSxp zmtPbxVHJTC6Vl-X@1#e0ffP{v4Tnf^1X)p|E4sr zF1U5KylXjBHWL=XHp0u@&o-d*SiObaTHZ+h?wQ}u{OyrOn*P9A)@XyrSM#>Cog5aJ zvt7~88UBDGi*`XfUmf-tVY1)x{XR5kjiBj=%#V)MIdn`4uQjL4EA**WE0EwzK{0qV z_1tsw8yZq90?P$4W22#<_+fHCs-gL%-;u8-5WRyhS;0`7zg(?BZV0grN%Yy*wDmf; zabnv6apMx>);#MOYdh^QP2VN&&nTZL;)H}dP%b?LBXo#f{y83zTOk=xWso{ zarQ6h0_FD?Y4gNxg*Dt`*MJc-wP~-1`1U6Ik7C`g#Yy~4aHNv+%&tO9gt1R>RM+A>IMW{0x1a_r-)bH1_K*qC24% z#IHHJW3iagz-=$#kFXVSZ}6iwD88Ee2!%2RLCJ?Ac2MXs_x_H~Xgr&Juz>={11 z{_Brju(Lu?#~>b3p8U!T8+9@bTbfsWeZPcQ?=jmru3-A{Nr}2{NP`;JNQ(Rx5p8Z4 z#&cWbCoFb;?;y7}Kc^4QCkX1^2i6|xp7FqXm}r_;T!$%sXEP%7uC&@jElD2f4!@#j zE_RW30bYb#v!}S@mt*YNBJ_@cdTM-0x(D1koxqNZF2yv0na2+Q);f7imnpG{BJ^o$ zNBH(+nVE&d@BShozRDex`+a!fohox>b1>3}!~INddczv_m|0!YYG<@t-gv|tq_hg8 z)H!=id8)Q=FFgb~-!^QZX`yUF8pAx4@Je}keeB<(Z}6y2Ni_796v}&@uQ61%CT0WK ziH=YmvERO#@ZjShQRfng^mEiGYf;06G=@He@S^yXVkvT0J}0_2ySG2vojpIv{{FDc z@z`=ugMrqj29uJA*O&%+ZAMviB;IHBzrbjYU6pcIqCPCi`gin$xvvT;MBEF8$)f+%Y=qQj9nCs-{`0UAXr1WjMIj!Po)R!DcMpZR1t= z=3$(d^&>ct8D_!o-A+MagighCx|6m z5l9nQT$53mkG{F`iI-a7Htrlj+hI2{9W%U>)&+P)JNY`tI(56-xrI8Vqc>l`ypT3@ zCs~g)Z?&IQfyJmoy7rr55Eul|T$V|wP~8JyuGR=F`(1=EkSqI<-G$i5a8Pv+zZu}n zI=8b?oTGjSdg^$pzZOZz6)0mW*IGncgdd~LNtR}p=qP*rge&8mO)kNc;VdnPuB}6}ZqQ#N#2QIG8NV9_@&&4-4t@0Hfp_}dW zGps?@bfKQf5e}`01S5YYS&vzfJ_bsXfm7r3Onv~peIOQlv%VoeF(ua2nuwKz&8?Ok z0~uq1zqNyINE;zESH;qr!x!Uw^2&Nkefb6IpcCP>UZw#);&IV~+SEJRou}eU+95`x+Z0{5wWQr-JKFQj z$ZV@SFg$0gQo|MH_F*}YTtU{k1R{X>IH44c>0a0!_Vv(bij%J~Iab#Xx4$H~E&Req z?>&4@Hg>I4*WbB;YPI=pD-g?yBfE6$gg~x`srVfZTrH~ zO9MgAEz4=gr6W?pZXC)(<8v!Wy<8sgo>w+ zqRbAeGbk~~OC)kflM-t^9l4?BvlZzw;^k+bhFRECwHtAjv&~9=?iGAg6vWw-c+SR2 z*fW~b$c}!KXLo7irjVU+Lcaxl7w~Z88~<(ZN*qkeW(kfHx(U+U>!9A6k=JW4T6wcP z>g)pBm|S!OoR>qr+^{5e|iiHkCe&YsP!=59R0p0?!60|s`tL&su+jo z!?;7^)+&sFA6IANu`pH9Qcs>EQ-Se)3-}j0Yj}~n8n07+cV~W}!bT5)3wXB_zaHY5 z_l`j0s5{FcsOCZR7_wn32jUJ+*!t2Li(h)8=M#UiizEx04-I1*kLB}*c~}^I9%TIT zFeFUE%LWcBvroU|qM0~a338E?CQG&rXDW)4ly`gBfXec5yl_~#=xmJQk zbkJCu{rK6Zcs--)LI?FM2#&<%6qjbx^5Ix^_xjF~zs9j&#z(qj((2j`3Ph>^_JV3(?F9gGcx#Dc@~JcG`~PLF(06v~gsBJ`}b-t3b$ z2cSlKOe>H$7^7i7vi387Q0R86xt8}lI`84eit}uw;z^b^qfI_or#DgKNmCt)Y6X>{ zv>__ctYB?=;zL)D5Bb0tJDsxaFI=67^K?2%HUSJ0ET1%WG{5!?ETQDu7@8KlD_B0P zVeT#PZd(?Ya)8cnbCz8abWDhpOGE|#EjNak{SMwX>k6Yp{w<0Mt&=Z_(6JO!CZmH- zJ4#A%^B5*O{UZ3w2L9Q z{0`FI{}poxW|-hZ9W3&i!K|}$0GSJ*Q;s{iTKE?J&N@)+R;Em8RlRZ0H1|~b0z5I= zLGlsUEttc)S2_eA$_E;Q0-918vQ*9mm@Epo&?m0zsZ|uJNJHyz5hDVp28C1Ho-?1F z$i*$=ix-lu%1=g+m2mHLt9JE`Xg%b5?22EwmDrfd^rm5|?mSuMd28r*&!rxDBxGIOpy3pbTJCP0G z-dbu}g=iT~Abx8eleWFI`HSFd#2c_`pppiaW!j!QK#v!hu_R=RMdX604pi;+74F5y zMYeax(djfLKu-Bc^_3%h&LRmG->Zo=*G}VG+{FIP3ux9)5`V(> z64!5%a4Po;*G$UYGbKhHYBa;>F9J^nth8D7*Fw#*_OodfB92-0&gz^6dpTJ{zD^ah zsv^n@s7TB$Xy$_u5 z%ZMa%_slx4ooi$Mva&G6(*A`Su}G3!d9qo ztD;57Wh8GoBe2(@(7*W4~^mB=vFBgCpYs{g;ki?=vhFe}l6tV@3~_?PB+pH3+pi z!w2)W1@~G8q6y-SU7guizSEF3Aq#K%5NO_D7{tdQcmg( zq25cz8kEN}Bh9(zE^fvtqo<;opaf?XhHN*~Emo*q>ZB+t8EKFydvde~Q3tH|sfgMb zl;zPnxAv4E+~*qTtH#8jU_qb3lnBUfTQ1gLW*Hb!?r%#T30y$zN&kRlR4ifqF*bzL zW5P0D16LR^ub~#CSd?$lVOg#-h1{gP9*ihK58rtWsUL zpLb_mRyrl}trVhySVYuy^w{)`m^-!kfMTIfYXgU(;%que>=^ZCqAH4++0)iUw22D0 zDsfPWBz&hdoE%LFFp6%nwe<_H<<4#U8ssfrlWo7A%#u38+e%o=lnL6)t^?Q3!2e;$ zyx*)5#*3L|9_)z~u0|NhR>baCec6`P_g@-W7NeAl&%^Fo3i5&irA95k=JOAyO2nbS z)hz`R&E2<*iTkiprAjk#kGBeN*Lw*21()~hpsC7??=h<3jgq#Ouf{TdXZ`NT;E-V) zs?MDq8#^hPr9ax+f&KP2qGG7$5~-)FEPH0p#<_l7{2szzS9Hob6qPm+&l%w|Ywqu? z8b8aho;wYpQmksmu^Y9GDlIX3#-G`pn<#!|zig#ut);WDHaTDS+Ezb6J)n7)&FDF9 zrhV4ZPaL2bq!FO6{k6oZQY>kKNB9kaRSJet^j2N5gg$&FP2N-5DXwIJh%6*XI(IDo zyEKJjt*QH#GO_vCy=xGcCbz*r?F^x-HMC^Hnw@d&G}UH<=*Zw`;5A42O5I|~qS8QE zs=Wpe4k@!qd`SZu%m;p!Z(-FwxwE~@wxy6{QeW@Y)uWJBL7(hk;1ye>WV!q+dCpgZ zFk}qOagL%RG~(R}qq4d%V#?k_mP%A&YVL}Hc8yXi&DDGIQ+I^fshXPEktN1ja&tHj zAF3}n`?F{$4{!W2zKjk<-~~%lNlP`&-BStFqs#dUsi0T`ibX=m#6Li5T~$u)l$9H_ ztEeh8Q9??;eT}?}^cRWVG37@lPYVAcok>rLtT~AYa1Lg+gqz$xByeKk!*Z%;s3>Dz z237FY2t#BxPXj%L{DRzl1x4^$FfuY?-SK3?0ZzG0_U#+tqOl^iGc*HF3W4FIA)NA- zj9)Bi5m6RSjS;rd>dS4W%{F3I-zfQJ%uPKl8L(&Esb{u4GDSs}A%^)^XK*H_=d)I8q= z@gPgQKepGGX>1b<6cS|(ORw9$=lZT~Hb{_|-V7BeOW$@l%D$uxpk}u&@4ua*S}C6` zRD`a1o7>s7l;pNFc>We*Xd`kuc@1x*qL@xg$G))YOowCfnCV?Irt6TOZnl*;?HP}r z?36uHx;k|(+aew;!S`RcE1 z` zZW%7=PORYj#VF}+{O*H=7ijt7cP=l#oWp8UHronQs_GT*WWyeoEw>bJ$wfSFNX=LH zI^N5UJWfI*BH&WbOE z#p)Ne4f7~XAqGVbL?C1%qv)Sl5-n0QzwND@CvbNLqx6<;4s163bag@SFn0uMEv~6S zBxTBr5R5>yWDQysx;A?2S|W@p*NFRtA_>n0p)!FJb3G)XXj_d@fb}y|PF2zZ+MW>u zHida-1{TUM*+BJmPP5~lr6Ab(O;p)ke#j(31l@+?!XLtB!`54ny8ZFeN8xvRmBMIN zB5^9xLH(B8LJ&mf9lvJ(UDL~bpdtnHI8krD)*;QNig?f{vv_1My~@{QCFmiM-Jhdf z651kTx3G}Bm*On+Lh)|j6W#^4$jCh~+(-OK@xvf6V})rj8e|12l5xV;32xcL#Bktg zGEEL4mACF@Ey4W=iJE#(Hor3Buy^CV2G+MvMwNwFq6j;i>k)}1ZraL021OnY9QLqG z-Zm>mxEllsQzhaH;G?NUQkhc3wSDbm&oDG&Km|DJiIZpPE$*-t8pmVeaIG`I%>CSR zx+HaNGXm_5*~mSWGfD(|GerKU!x3`dxP#n~mYD|#LCtR2s`Yv5qX zv(M9&Kd!PD9@LDZX^jgT?7#Qz(Xx(bS57k7jNQ6u zCA1#rQfD4`UA|q+6i{})-5K+cb6j_yz|(wP%*~^nR$USO#hv_iDcT7pQU%nm_2qYc z2w4BOFA+3z&0!gRMqi+jU}UK>^fWROp>r6-essaYu>sUmw(8=5!+x6xdKlaeT504a z*7+wX*UDhnrc5cNK%?nwsRBJtAzQK*mqv)lV5HoF!WvvpY;FQXPZkszQ+F66X6>!Z zz%qA?@P=ZC3V(KSb&%m_o1+{DB^KfnyqoPqUstFKuqY7SSju(AF9;9FBHvI9U#DYh4EFq_LyB^*R zyuH_`=@3=^ED=<%#;;rw%Ba^;C8(iNLp!AgGKpJFLt`{tHBou?nOVoHa|dZ5#v}89 zAUi3{JyNBLu_SYpxe><2!a3DhvJt$Koa>zZ)K%T}_cP^D#3k#LKHfasiHNOB<4N|+ z`=q-Dj7$-;j|{?{Wwu$hY|HUoW{-lGtZRL>Mknw=N+YNB!?}~ES>7(Bz z#8&=MM2?t?fMn=>fC`2&xn?UAl?EI{BBzpY{`8)a%VxFdnc#TlxcEW3OybPZM|0RV zR2C2@|Mh*s58^6{(Npz?0rVI0Uf?rX$4UjC`QuzoZBA_ut=tnS`6)KVB@#!00_*5= z=}fXClwupg7Mxl3$aDH|MPX}G*DrXg9tDBs{RA~)XiEfH4RSE)p}618&0*@D4qQn3 z8WSDzCc_{^A!5e_M0SEuEagt?>9NbvV&WS(@JadW46&HN*s3v#@ggIQ9Ag^p@TL(c zAoD=AstpO?lMADes6`k>YDo&XzKi%`u`?>C8Jh(KOofp9@ElAmpxOs{j>lBZ++-L` znP$eZo+MhSGlGzQ$&RVkdvG-S38uWj+603GTi3N0A=(tARQ`mzR%gD@qB+_-j_0+P z!~4EBq^g^;B8;ajxgL2}W{rsUwwv~yMMu@~#fHr5EL!PhXE5#AFmmK~Ot8CNTPwd8 zsW?HR@aS)A9f-*gmX*tlLd- zaBV8w9`!Eo(WvqDeP0QOheW0E^~G5AIMJ$G?X4rV2m1SAlS(VUob z$^Kh8F;42^1Y0I1Om{SJMop^o!V(=annYvqR_VMMEF!=oQ~Tv6E*a%a&x>Lm6|*#P zzgAklbD8vW+?<3VtU5d#{K9oz33+xy-R_JUsw}E1K!H;~2ev<$iZBUt6e@gRTbh^L zVlyv(K{lnK`w?C)`Gokl*g6&$^T6HF{(f`KeiFMG@z1o3kHwu(0#)%@X8r=MzxHc?rL(cUy^UT8y?J@7c=>uH&Unn( zbKSG(u@ewa+cSLc>zaM7-dgt15!Zf4K%Pv4R7u^6R`|*w9k}1aX{0PEI3cWw-H;ri zRVP|6>J|Rtkj3rPioP*jQ}lAft?TTwRNH8(;iTj>$v5?fa3NZfz!PhO z)WE}Fc4YXag!iVcj|G=N4pv)Ein`{Dk&(~tQpgZ>a#K)I6On&Im}zdfGdbCre?emb zo#>fWgaOJS!72_dS^D38BO4n?}cbEpJ_NF+d zypf+%?pu3tGMcL1vgKf%F!BrS0@#7ID}Za z0r@DLO0iI{(da6<#oB*9muY?`a5nEUD{UY8b+9h8Jj{GKPO&6hQHi)93Amqu@lm!s zN0;k~-Rg24#$MMiKlTVn^I#9>&^~gqG(u@KTAdl4v%3t%gaoW=`U?#3yV_Hg|feHK$R<; zW<1X$hG~fOJ$hghDOe}04HL18bYOf;fb0k5UQY71&44_^z*yDT`PZ;s5aT0tuaBVf zdg*U9Eqs$_ImdCXPRi2CR*A*t)JNncA<2p5p;d5`T?C7(ttToYt}8jhBnKs$Ao-1+ z#=cr98q_n+vZlWG#Pi^XY4zPCylF4!v8c-03}@%1WvT%zn2w$`rR;nY6Iuv>vN-gi z{NA;oT1@BoRjy0BTXCOJhLH4APzPE==e&%1@*rp^HVx9H)XdFWV9teXa5E^*4IosY z4gCZlcMqntB1ovp9>%-%vu)<7Z7?js)KZ&G)hUBw)eNt0M=g+Vrx`fcPGt+?u>nOg=ZA{+8yTGqHOUZw~wa(_RP-kKA&Mzj?G>WgMkgs5k)?#b9lKftF z!kzVq^fq9t%;p|ieX@*=tJZ3C8Z_)A$gIu{^c+IgUcUvALn=_{_z?w0UpamBK>?OrOrlqy9>{{2q*^+7n5)76?T>UaTv}5_5pUV(!Tq;jN#ncQZXw@=O zQ*r>C6ic~zAawva(m+Dc13MU7R4sN|%s7)2^OtnEO23L$7A$c~*w^8hS=2}(LE@QQ z;gXPfVhWA#f=*{jGPB6u$<;86mT}!Dp+}^!Or}3odPDT5FXQ-)^@J#NWC&`)Y~5_0 zY@fuME!sSDwT2QEZOgAyuaWmkueXANbH4S6K@#*4Ns9fX`l7oZ%v?+TyTLN)B-rwb z-GS!%%PfcVsxFe82dGlnjrorVZpIVbdvexec?4RM!D@Q%4Avt3(LL#%MJqNBy^$P| zQDs5Xd^qsP+Q3PSqmhbiD{h~@i)svt#HW6lf@MI^3T0X4p}h&`LHc+&np?SkHltyV z^PRQ>w)LT!L(TQ#>&$m!Spb@3g4n^#y$9{^?EM%7O$p?n0i56t)>T{E{ZRSw?~uAh zT9Gzth3Zl~VtP7%fS_q>YF;aQ4>cwt{0=T+u;}cMAGR8Bn?$0XKvkEKV4Fpv80#m$ z7Zu&Y;5QrFP*;pi8&@cUpRD6k+56Uzm#~(gShU(uJzUiUnYH5fG&olx(+na(r#C49 z$Ab_B3pb@ili3&kJBvs^Q^_sCo~LB!Mm^z$T*sc-XV<acFO+0{7ZoPExgbGhAf#PsQ zHFtWq?XgXU?tI;VUJu3i78Th?V(?;6WN5)YkBy9i){n7MURk+kZm7sZT{TuT#U`5w zT~&pK-8ZFdDHC+Mhz4a-h7(qo-bOMAW=$+9RwI5=jZJc^lx@uAT3QiB99CZ-(>86y z;PIP2I&Q>ZySi(Ebxy|BtST7JLxc-C%PVUJx89V3<4(Hc+hX0)6DOxk`tR@(x~!e~ zYZash)axoRpgH{w(8_FwuCqtTDs~OSXnRRW=_G0mC6)QMLlPV16HiUeK;5dZPap+o z8u=aJ%1p_L)M?RU!{2}L8R2JOeM&fhh$z!DU)+b77V`Wk@q*FO5(t=$fP#S3V3!iUXrHwt)w z>>luIXf`QuiYie9HwBReOqm3%wJwf58n^+-g5u9R;(WLi>#Bpgjt;*B*DC}%W@MHw z#Ei|HOx#W2A3;kpCW>Fs=z;|a)F@UD82o0(V7lCFfvG%o_I5FxX4Af!erXlSeQeeI zFzyYo>v8P7D{FW;)BP%I0*?i&ZZpqH^mQqyM^#+YO0)L?=9xFhq_?N(-DDJI8)n`m zz@@ylfp^7;nx(BB%{oEzSLQr@uX7m257R|7c{$OIAOi8p3Sw)hvyg9Y@cQ483 z)_fgnO*HS%f1GZ)fY)nY#PxLBHVLX3%wbrm1TUFFYX~M)nSaEjKLZTxtC1 zmUB(~NrCZ$;qE76wY-8j6Opa2v^A6bjHz!IcqFSngZ?3i%{ECUB-$JGcp^~7ek(#( z=Oayd@em7|GrnOOtkLQr+Yp4QnDuzft37BNK8IerKaOkT3;Tl8;z zW=v+j7dlmgdgV-!=>an&?mdpvL=h<_DZ=Y#S$#`6v>65JbRQuZ4I5%bX@4J>7p7H4ze)==j@5&y(f|fY%}WPmU#6@*+=vB=wiEy+fI{G z#bEkk(^7r>NR9R3z*|80Cc6|DJ6ETGM9`YBPYoiY%721 z-pOLCt-=6SL6Z2agkY!uof2kBIts%vEOmoXjTCKS z%${+ijsIFgJZx7n*C^i++ooT$kD~5Gc+>&bMv}|(ps@iOuUI6YtJUnZT((lklzd>L z>2zN+y>ze8d?z2^S#jWNIIyzI>X+Qiw|x0g&-F=kz2$%e!xVR+3bA)3m(k}W0cRk= zU-l5P4{f&P>y;Kpa<+vXys~bLV%k}!**dwOO9kd+wD=wbHAviZ5J7R%uzFmhz~5dX zHihkfCp7tO2We_XO;VqpKUqT*b?!#Y0uvuks3>pVl#Ib9CTM$9B+_ebk+h*!D9(NC zY9TTxY0lDM6r07DxRLiiS-ab=j_RlfMwDNU(0Ha-xNONUkv!_871HNKf}D1zj907E zxt<8Ay_%&}NvJ|fKWBl1^0!b8Wu?`5LmC30$xQDxt zP`2b~tX=xRx1xJH1c>X}tY4pOHiG8^*d!6g9eD>z)n5}}&(?kivB4O2aes9gBp@mM zLi$hGaMV5}?;S%bO2!6&gp#llD)KljY}|=PGjZS zLKH>D!%D`-5Hl{X2WUPslzv3ulfI>hB<$RHO}9-79=sg>`c|Uo`A?6}6%Cb_vriv_ zG~Jmu+yRO=X@=k^jiYzy_xdLrj={Sm%X4iA!(o`>>^2)B847nN#|qtSASb^LAP8N* z#D5e*h<#32V_9eSx~ta9CVh~`OxX6DJPOk7-TaksH&TLhZ#;YL{7RUj{tGV)5kuri zR1@W;ycMNU&6QJk4W3H_AHmHY_0VHh!mR4Zlh?7>ZjSj=)Ra<%z>0~`ey+QnoIb^;#dUR@+)It^}F&B%zAEMKrNSak}EuK#ST^Ax^ z%d`oKjEf9hGyRM~&z7UHo0j+%>{6d(Ck@M(b=R5a*}1D57uzMKllv5N%&|+<0_H{< zs_JNkdWD{wkZcI?8Iw4a)Ek(_1_u`pgW6!tp*H5Gg!v8A;@VOxBjHZhIAf8-4m3D7Tw zPLMIsT!jmPZ&iN?9#Z?gAj{ae$x5OD_sq8}4_(ajeVt`oZ2MppVm6+`lxMNOW2M08 zl}-NZ`?!=Goysynx?)}F{s?V2%54WxW1b4E;M&f=Xa#MqR_y;)xj&;IvcZe2=jUl} z`e1I*=)Bdszu#HP^u5NCtL=4uOUc`+%zf5!y`2mO!;=-T6Rt_?9Y@d;(G}4Hu*pI4 zz*YRk7SCA;7k=P8Uggj&0Z%2BLZO2%B27%Z@T19qef-2u93V}vXQZ&l)L=-%prI;+AEo<*cR^*l6>zY0xKS6c{Y4BjAH@*#ci3 zE)T?mp@iLQt)vvhk5>W1q~Yy=Vb52Vlh~4X;9UwmA{xBAE~H=AZ=VW=RTY<0G*Vl# zEZwI2cKG)Ba{1o%-GR}9q~(*q)jBUL<@EW!?rcF<0#>y0=s&llak#F!p?{j4$ z@DwdMuhOUXgc0WoN0Zbqyt{x2W$5vvljXb zki>%{y&qP;LSfwHjJi79-WsXgCB<-yT>YQ(hac7!+qWYRj&J=-@3svIxfprXY*FYn z3>KZ{tqj)Yk|nJt|Bi!>ez>h#!Ml9L5`+Mj(r5|nA@ckb&r9|z9Wn<@8yrGXmCD85 znwx}Nm={I4_ZLnG&THZP4wk+QI?MTnz9y`hPGRw}4KX3lhPIr(M?_S!`z=ms`}|TG z4bnQGc~q|(t5!FO9{p=+(>HNahAnI5 z`~w{8ViUtMwmn%#N1vMWXWsiM_|+Axz0w8si&YQfS>B~)o>#E*p^$-x34(?lNKUvs zCgSLo@E@#4)GF0eTsFT{SeISzjqkaz@0t6_c*#cQlik_ZFdHb;VHn!>3)m%zG@2_v z|8(k*)rkuF#K&*j3ikJt^dplhDrUbu!=eTK#EX9p>Z_odi4-iGdb?_$baX$?nObl{ zt*mv7D>-}S(3`yEyd`>-be2UlDua!vGGoKCz$uH8^Risxbjb%5e(oM!7E7sU{JQrb z5!A)iv7yLEx-Y8N%+dU9X8QF*4*{z6Flr^Rq1}7B58Ud(0N-uwzDs_4* zBi1C5sDBMyDSOs~TaCR7eTJNGBwYeZ(RD{itnS)A zE?K`J^!sr;7PCRQaaq24*bHp+|1Xp~!e%uecN=NqP=4)#p4zv>?>7Q=Q4&*OEuySg+67+JigUjm>jh^_sUx4=N56GH9<2@k zkxVDxsSJscpr`6TQSJ%ECAiZCdudDM-9P+fv?RWG$-QE-Y`f5Bv~d5Y%C?Z3NpaJ5 zb(3Ym4Pu|;hg@su&UkwKg)4zeO$A6G{;4Wo2Ja7w7ogS2dvfexohGV;q#r^37 z2K~fN)-IS0GG$Vh1v{c8EISXUr(Jl^6`?itf$c<8UJF6%>H_|5`f4UHoY*(VmiMj? z-j0x2Ynjv8Kv7eo{4B`Y{KtgfX7P;L;;!6XyF#N~v9&v$o6^%tkT+3RlnY^IY1tnS#hwW1&sq{! zuT*@Am5TisrWTP6^J(;Y*cV;50Ga-Y9gtZ`LWWWxUxS(*+h-*AG3`&MG(E0UqB;*! z8g4WuoiSF?Ll%IX(*d$Zs(oblT8wUgKjq|T%7~=?(QhOCp7sMZC~z=a-z4124eNof zGe;ApGe6lvPMm0nW5m4O8q~CO<2X|Ij#)UfBCZ;!K_212TihPwV@1stLCM#rMxQtc zAq-2oqOV6?4UV_kqk7prbq2#v^6~H&n!9~(+sq&Xjk`=4GlPY$d45iTC4KD8miS@PV@C4z3^%ZIg`)SM&6#0E&t$cRYFFk5b@n+%;C-L2!# z^m*6qCL>RkFL50&B^NEGBDQtY57ZCdJdb?1-t4_Bwa-SmY}HxTOZMHahkZvo2hMTb zw|=jv6}~0-4t+oBIaRP)`Ng%_x>N3%|JYOAY|UxQ?fjfKIlF0dC5oPb60Kd|+>ff@ z0_Bw?sZNo1$fwCdV+DaK`Q=%q3IoR9hyx3bVuEbh(t`l3-4fc`fdyW5BX(^pP7hSm zfjxJ9U);cr97ZY*8JX#3{9n$+Tg&Yj#eo?bYkPI~UM((o-4oU)g#7twERy_;7uyU zDIEix?}-vvt-R%rAoiksJ-&Bbcr~@uQOkL!(#OFIC&6a6lE!=Vz4_XsetzT5%x7=1 zfkICp3HA^cgz+BwTa^hfIWeYvJ4?g!c0Z!Q_CTWBg?dx1V$8nB@({k>F5*&Y>j{AW zN=La#95D7AllLkYf1SSPU!B^gWPNcYzGPxe>(x4*s*VaHXK|gI?9#UixpmqxnVsiu z(^yu#?Q5&q67&?-HWpqvPh@$nM8IbG?7QVX2RV`Krz6!Y*KtcONAPIc`&TMYjUTFA z`BXpZTYo%2QFnhh_*VjNHE%A0qQVdQv4JqDuTSJbjCqe!0+KMS}A$qJr@btup2`5P>KyG9xmHS7tL9N zd$ydIv^3Y+yx-wo3kR#v)#!NEqe=le`86j(olYtdH;%cxn!iimO zZ2dF7nuvE9{q8G>zHeN3Fg2dsQ*r;Xu+Ry5#j_f(b$96`2=K5=Alje&>g2GMe06f~ z*=Bmq)Ucfer|hcLC+pLJX!p(i;+>Y?1^yG)L*45R+tGA5I_} zW33%envRcz@idnZMkX#emFtV#pLZucPGJ_Dch?n=?g>9X*dPz~^WbVMP?5q4OvLVH z2$Aqi&I8>M&Tk&%w~)`VS;X;o{TR_Vrd1?+?Fw^3GB*boK9d=8SimEf}lf<@Q*qfPLymP@S%bMs3eFgyse&{!VkTXJglP zXlPij=7F~aR%>TmPA66uZ+7gBu(dCbBY*BUOpYJi=c(jtk zS`ye%Z<#t%1N^r-&dkxPro{uWqP$Up$&U}_wto#!ft`fcd0RPS28{=yJ&-w~cDzfA zZ*cUZ^!$tS2L}_Gf!ESMRZmTSor0o6CRME3`;(=L6MP2}imLo)X#?Yf^n>d->|Xv5 zOWsQl*1kZ#Ato&HUFEgT^l0f^`M9=yC~{_5eVOA?bFtMnl#?(AtmAZMy> z(WfAn_eVzh?P2DwM~MyzNyJ<`Oq;ngO^c^AySY9M(dO;IU*Qnm;|ks9xodutA~o|i z*59a@^9xfV&NuJWF}ZQO8nJ%Zl*D?Pux`1u10~zZcrd z8z+pmWrWWe0eTKi3!u#I8W`D8((@yX$IJ835*ZmteJqf5bi1NzxpUZKX% ztMXU)m>-(Sf-Hy9__TbL6>)u?a}=H{3zlvS{GNFvBqilBI5_l4 zeGeQZeYYdCk?Xjf6;nBa4|e+OYhs01ED>S0y=^o3xwZZX)S#ns! zoAkxpk}mtq5E{f|27iTPIG{0r8s2euAwZPE3FWw3pGBznoCDFKAj=H46|t>(;R=<7 zTi4oqzTt8631v_9xml-enlJ@x8aAkW>b^4k@VlsbsP3Su?yhZHkCyT^@ zyj8lLt3%zwRt2tY-&%p=$-nTr1WykXLTKUw_y(i>aqQ21y9uuLZTLb0p$u2mZHMtM zgbtZf9tMbA;lMaTRw^lPN$u~4V7p_P=wJB}g~Z{ok<-{tjVNduR6gi>cG7qv6?)NI zWm0o%bWo9AN&LGD3-V$^{y$M;Vt=#7F!Wp_XCE}5h~LN2+XoR?(w%L4g{dj z&p$o%M4|y>|K>?Zqp?^2^uz&Z0M@^IQfQ#le|Q+Q*+2V=0;SM+vwwQx;{TfmKy#)3 zEhho|Pft=54GH^aPD1QIa}p95GS@$H0Dv^`U&iYLvqkf)V1MY|hR)9%(Q}GMO=Ea> zf2eN;Xw()4==SHb#x#`CY%_UjDKTkBI|m0TJ6kCSDTt_yC{Pk2ZZ9Ja6qgpYwTCE> h{QnU&{0=0.3.1 py>=1.4.31 randomize>=0.13 pytest==7.4.4 -allure-pytest==2.13.2 +allure-pytest==2.13.5 pytest-ordering==0.6 pytest-xdist==3.5.0 PyYAML==6.0.1 pytest-assume==2.4.3 -pytest-asyncio==0.23.5 +pytest-asyncio==0.23.6 asyncio==3.4.3 pytest-tornasync>=0.6.0 pytest-trio==0.8.0 -pytest-twisted==1.14.0 +pytest-twisted==1.14.1 Twisted==24.3.0 python-dotenv==1.0.0 -pytest-rerunfailures==13.0 \ No newline at end of file +pytest-rerunfailures==14.0