From e397034dc153095f459a493ecb4a36588c4b8dc7 Mon Sep 17 00:00:00 2001 From: Stan Girard Date: Mon, 23 Sep 2024 16:14:49 +0200 Subject: [PATCH] feat: add mem0ai dependency to pyproject.toml --- .../modules/rag_service/rag_service.py | 4 +- backend/core/pyproject.toml | 1 + backend/core/quivr_core/prompts.py | 1 + .../core/quivr_core/quivr_rag_langgraph.py | 52 ++++++++++- backend/pyproject.toml | 2 +- backend/requirements-dev.lock | 85 ++++++++++++------ backend/requirements.lock | 86 +++++++++++++------ 7 files changed, 176 insertions(+), 55 deletions(-) diff --git a/backend/api/quivr_api/modules/rag_service/rag_service.py b/backend/api/quivr_api/modules/rag_service/rag_service.py index c7aef4aaf4af..3253faa21a91 100644 --- a/backend/api/quivr_api/modules/rag_service/rag_service.py +++ b/backend/api/quivr_api/modules/rag_service/rag_service.py @@ -163,7 +163,7 @@ async def generate_answer( # Format the history, sanitize the input chat_history = self._build_chat_history(history) - parsed_response = rag_pipeline.answer(question, chat_history, list_files) + parsed_response = rag_pipeline.answer(question, chat_history, list_files, str(self.brain.brain_id)) # Save the answer to db new_chat_entry = self.save_answer(question, parsed_response) @@ -212,7 +212,7 @@ async def generate_answer_stream( ) # Initialize the rag pipline rag_pipeline = QuivrQARAGLangGraph( - rag_config=rag_config, llm=llm, vector_store=vector_store + rag_config=rag_config, llm=llm, vector_store=vector_store, memory_id=str(self.brain.brain_id) ) full_answer = "" diff --git a/backend/core/pyproject.toml b/backend/core/pyproject.toml index b8b07ba71e4b..819dbe0313c0 100644 --- a/backend/core/pyproject.toml +++ b/backend/core/pyproject.toml @@ -16,6 +16,7 @@ dependencies = [ "aiofiles>=23.1.0", "langchain-community>=0.2.12", "langchain-anthropic>=0.1.23", + "mem0ai>=0.1.15", ] readme = "README.md" requires-python = ">= 3.11" diff --git a/backend/core/quivr_core/prompts.py b/backend/core/quivr_core/prompts.py index eeb573fdd6fc..29d3c07fdaed 100644 --- a/backend/core/quivr_core/prompts.py +++ b/backend/core/quivr_core/prompts.py @@ -45,6 +45,7 @@ If not None, User instruction to follow to answer: {custom_instructions} Don't cite the source id in the answer objects, but you can use the source to answer the question. +{memories} """ diff --git a/backend/core/quivr_core/quivr_rag_langgraph.py b/backend/core/quivr_core/quivr_rag_langgraph.py index f856e52ceac7..50fd1354193e 100644 --- a/backend/core/quivr_core/quivr_rag_langgraph.py +++ b/backend/core/quivr_core/quivr_rag_langgraph.py @@ -1,5 +1,9 @@ import logging from typing import Annotated, AsyncGenerator, Optional, Sequence, TypedDict +import os +from urllib.parse import urlparse +from mem0 import Memory + # TODO(@aminediro): this is the only dependency to langchain package, we should remove it from langchain.retrievers import ContextualCompressionRetriever @@ -23,7 +27,6 @@ ) from quivr_core.prompts import ANSWER_PROMPT, CONDENSE_QUESTION_PROMPT from quivr_core.utils import ( - combine_documents, format_file_list, get_chunk_metadata, parse_chunk_response, @@ -32,6 +35,26 @@ logger = logging.getLogger("quivr_core") +# Read the database URL from the environment +database_url = os.getenv("PG_DATABASE_ASYNC_URL") +parsed_url = urlparse(database_url) + +config = { + "vector_store": { + "provider": "pgvector", + "config": { + "user": parsed_url.username, + "password": parsed_url.password, + "host": parsed_url.hostname, + "port": parsed_url.port, + "dbname": parsed_url.path[1:], # Remove leading '/' + } + } +} + +m = Memory.from_config(config) + + class AgentState(TypedDict): # The add_messages function defines how an update should be processed @@ -43,6 +66,7 @@ class AgentState(TypedDict): transformed_question: BaseMessage files: str final_response: dict + mem0_user_id: str class IdempotentCompressor(BaseDocumentCompressor): @@ -69,6 +93,7 @@ def __init__( llm: LLMEndpoint, vector_store: VectorStore, reranker: BaseDocumentCompressor | None = None, + memory_id: str | None = None, ): """ Construct a QuivrQARAGLangGraph object. @@ -87,6 +112,8 @@ def __init__( self.compression_retriever = ContextualCompressionRetriever( base_compressor=self.reranker, base_retriever=self.retriever ) + self.memory = Memory.from_config(config) + self.memory_id = memory_id @property def retriever(self): @@ -181,15 +208,26 @@ def generate(self, state): files = state["files"] docs = state["docs"] + + memories = self.memory.search(question, user_id=self.memory_id) + print(memories) + + context = "Memory and relevant information from previous conversations:\n" + for memory in memories: + context += f"- {memory['memory']}\n" + context += "End of memory" + + print(context) # Prompt prompt = self.rag_config.prompt final_inputs = { - "context": combine_documents(docs), + "context": context, "question": question, "custom_instructions": prompt, "files": files, + "memories": context, } # LLM @@ -209,6 +247,11 @@ def generate(self, state): "answer": response, # Assuming the last message contains the final answer "docs": docs, } + + print("Adding to memory") + result = self.memory.add(f"User: {question}\nAssistant: {response}", user_id=self.memory_id) + print("Added to memory") + print(result) return {"messages": [response], "final_response": formatted_response} def build_langgraph_chain(self): @@ -266,6 +309,7 @@ def answer( question: str, history: ChatHistory, list_files: list[QuivrKnowledge], + mem0_user_id: str | None = None, metadata: dict[str, str] = {}, ) -> ParsedRAGResponse: """ @@ -288,6 +332,7 @@ def answer( ], "chat_history": history, "files": concat_list_files, + "mem0_user_id": mem0_user_id, } raw_llm_response = conversational_qa_chain.invoke( inputs, @@ -303,6 +348,7 @@ async def answer_astream( question: str, history: ChatHistory, list_files: list[QuivrKnowledge], + mem0_user_id: str | None = None, metadata: dict[str, str] = {}, ) -> AsyncGenerator[ParsedRAGChunkResponse, ParsedRAGChunkResponse]: """ @@ -324,6 +370,7 @@ async def answer_astream( sources = [] prev_answer = "" chunk_id = 0 + async for event in conversational_qa_chain.astream_events( { @@ -332,6 +379,7 @@ async def answer_astream( ], "chat_history": history, "files": concat_list_files, + "mem0_user_id": mem0_user_id, }, version="v1", config={"metadata": metadata}, diff --git a/backend/pyproject.toml b/backend/pyproject.toml index 0f57a09db2d4..f838c5071600 100644 --- a/backend/pyproject.toml +++ b/backend/pyproject.toml @@ -40,7 +40,7 @@ dev-dependencies = [ ] [tool.rye.workspace] -members = [".", "core", "worker", "api", "docs", "core/examples/chatbot"] +members = [".", "core", "worker", "api", "docs", "core/examples/chatbot", "diff-assistant"] [tool.hatch.metadata] allow-direct-references = true diff --git a/backend/requirements-dev.lock b/backend/requirements-dev.lock index c6dec2b92f96..69f1b449d388 100644 --- a/backend/requirements-dev.lock +++ b/backend/requirements-dev.lock @@ -124,6 +124,7 @@ click==8.1.7 # via mkdocs # via mkdocstrings # via nltk + # via python-oxmsg # via uvicorn click-didyoumean==0.3.1 # via celery @@ -287,9 +288,13 @@ griffe==1.2.0 grpcio==1.65.5 # via google-api-core # via grpcio-status + # via grpcio-tools # via opentelemetry-exporter-otlp-proto-grpc + # via qdrant-client grpcio-status==1.62.3 # via google-api-core +grpcio-tools==1.62.3 + # via qdrant-client h11==0.14.0 # via httpcore # via uvicorn @@ -316,6 +321,7 @@ httpx==0.27.0 # via notion-client # via openai # via postgrest + # via qdrant-client # via quivr-core # via storage3 # via supabase @@ -397,7 +403,7 @@ kiwisolver==1.4.5 # via matplotlib kombu==5.4.0 # via celery -langchain==0.2.14 +langchain==0.2.16 # via langchain-community # via megaparse # via quivr-api @@ -409,9 +415,10 @@ langchain-cohere==0.2.2 langchain-community==0.2.12 # via langchain-experimental # via megaparse + # via mem0ai # via quivr-api # via quivr-core -langchain-core==0.2.38 +langchain-core==0.2.41 # via langchain # via langchain-anthropic # via langchain-cohere @@ -425,7 +432,7 @@ langchain-core==0.2.38 # via quivr-core langchain-experimental==0.0.64 # via langchain-cohere -langchain-openai==0.1.22 +langchain-openai==0.1.25 # via megaparse # via quivr-api langchain-text-splitters==0.2.2 @@ -436,7 +443,7 @@ langgraph==0.2.14 # via quivr-core langgraph-checkpoint==1.0.6 # via langgraph -langsmith==0.1.100 +langsmith==0.1.125 # via langchain # via langchain-community # via langchain-core @@ -450,14 +457,14 @@ literalai==0.0.607 # via chainlit llama-cloud==0.0.13 # via llama-index-indices-managed-llama-cloud -llama-index==0.10.67.post1 +llama-index==0.11.12 # via megaparse -llama-index-agent-openai==0.2.9 +llama-index-agent-openai==0.3.4 # via llama-index # via llama-index-program-openai -llama-index-cli==0.1.13 +llama-index-cli==0.3.1 # via llama-index -llama-index-core==0.10.67 +llama-index-core==0.11.12 # via llama-index # via llama-index-agent-openai # via llama-index-cli @@ -470,32 +477,32 @@ llama-index-core==0.10.67 # via llama-index-readers-file # via llama-index-readers-llama-parse # via llama-parse -llama-index-embeddings-openai==0.1.11 +llama-index-embeddings-openai==0.2.5 # via llama-index # via llama-index-cli -llama-index-indices-managed-llama-cloud==0.2.7 +llama-index-indices-managed-llama-cloud==0.3.1 # via llama-index llama-index-legacy==0.9.48.post3 # via llama-index -llama-index-llms-openai==0.1.30 +llama-index-llms-openai==0.2.9 # via llama-index # via llama-index-agent-openai # via llama-index-cli # via llama-index-multi-modal-llms-openai # via llama-index-program-openai # via llama-index-question-gen-openai -llama-index-multi-modal-llms-openai==0.1.9 +llama-index-multi-modal-llms-openai==0.2.1 # via llama-index -llama-index-program-openai==0.1.7 +llama-index-program-openai==0.2.0 # via llama-index # via llama-index-question-gen-openai -llama-index-question-gen-openai==0.1.3 +llama-index-question-gen-openai==0.2.0 # via llama-index -llama-index-readers-file==0.1.33 +llama-index-readers-file==0.2.2 # via llama-index -llama-index-readers-llama-parse==0.1.6 +llama-index-readers-llama-parse==0.3.0 # via llama-index -llama-parse==0.4.9 +llama-parse==0.5.6 # via llama-index-readers-llama-parse # via megaparse # via quivr-api @@ -545,6 +552,8 @@ mdurl==0.1.2 # via markdown-it-py megaparse==0.0.31 # via quivr-core +mem0ai==0.1.15 + # via quivr-core mergedeep==1.3.4 # via mkdocs # via mkdocs-get-deps @@ -596,6 +605,8 @@ nbformat==5.10.4 # via jupytext # via nbclient # via nbconvert +neo4j==5.24.0 + # via mem0ai nest-asyncio==1.6.0 # via chainlit # via ipykernel @@ -607,6 +618,7 @@ networkx==3.2.1 # via torch # via unstructured nltk==3.9.1 + # via llama-index # via llama-index-core # via llama-index-legacy # via unstructured @@ -632,12 +644,16 @@ numpy==1.26.3 # via pdf2docx # via pgvector # via pycocotools + # via qdrant-client + # via rank-bm25 # via scipy # via torchvision # via transformers # via unstructured oauthlib==3.2.2 # via requests-oauthlib +olefile==0.47 + # via python-oxmsg omegaconf==2.3.0 # via effdet onnx==1.16.2 @@ -645,13 +661,14 @@ onnx==1.16.2 # via unstructured-inference onnxruntime==1.19.0 # via unstructured-inference -openai==1.42.0 +openai==1.47.0 # via langchain-openai # via litellm # via llama-index-agent-openai - # via llama-index-core + # via llama-index-embeddings-openai # via llama-index-legacy # via llama-index-llms-openai + # via mem0ai # via quivr-api # via quivr-worker opencv-python==4.10.0.84 @@ -719,8 +736,8 @@ paginate==0.5.7 pandas==2.2.2 # via langchain-cohere # via layoutparser - # via llama-index-core # via llama-index-legacy + # via llama-index-readers-file # via unstructured pandocfilters==1.5.1 # via nbconvert @@ -746,6 +763,8 @@ pexpect==4.9.0 ; sys_platform != 'emscripten' and sys_platform != 'win32' # via ipython pgvector==0.3.2 # via quivr-api +pi-heif==0.18.0 + # via unstructured pikepdf==9.1.1 # via unstructured pillow==10.2.0 @@ -755,13 +774,11 @@ pillow==10.2.0 # via matplotlib # via pdf2image # via pdfplumber + # via pi-heif # via pikepdf - # via pillow-heif # via python-pptx # via torchvision # via unstructured-pytesseract -pillow-heif==0.18.0 - # via unstructured platformdirs==4.2.2 # via black # via jupyter-core @@ -778,9 +795,11 @@ ply==3.11 # via stone portalocker==2.10.1 # via iopath + # via qdrant-client postgrest==0.16.10 # via supabase posthog==3.5.0 + # via mem0ai # via quivr-api pre-commit==3.8.0 prometheus-client==0.20.0 @@ -796,6 +815,7 @@ protobuf==4.25.4 # via google-cloud-vision # via googleapis-common-protos # via grpcio-status + # via grpcio-tools # via onnx # via onnxruntime # via opentelemetry-proto @@ -837,9 +857,12 @@ pydantic==2.8.2 # via litellm # via literalai # via llama-cloud + # via llama-index-core + # via mem0ai # via openai # via postgrest # via pydantic-settings + # via qdrant-client # via quivr-core # via sqlmodel pydantic-core==2.20.1 @@ -932,6 +955,8 @@ python-multipart==0.0.9 # via chainlit # via quivr-api # via unstructured-inference +python-oxmsg==0.0.1 + # via unstructured python-pptx==1.0.2 # via megaparse # via unstructured @@ -939,6 +964,8 @@ python-socketio==5.11.3 # via chainlit pytz==2024.1 # via flower + # via mem0ai + # via neo4j # via pandas pywin32==306 ; (platform_python_implementation != 'PyPy' and sys_platform == 'win32') or platform_system == 'Windows' # via jupyter-core @@ -964,6 +991,10 @@ pyyaml-env-tag==0.1 pyzmq==26.1.1 # via ipykernel # via jupyter-client +qdrant-client==1.11.2 + # via mem0ai +rank-bm25==0.2.2 + # via mem0ai rapidfuzz==3.9.6 # via unstructured # via unstructured-inference @@ -1024,6 +1055,7 @@ scipy==1.14.1 sentry-sdk==2.13.0 # via quivr-api setuptools==70.0.0 + # via grpcio-tools # via opentelemetry-instrumentation simple-websocket==1.0.0 # via python-engineio @@ -1051,6 +1083,7 @@ sqlalchemy==2.0.32 # via langchain-community # via llama-index-core # via llama-index-legacy + # via mem0ai # via sqlmodel sqlmodel==0.0.21 # via quivr-api @@ -1173,6 +1206,7 @@ typing-extensions==4.12.2 # via pydantic-core # via pyee # via python-docx + # via python-oxmsg # via python-pptx # via realtime # via resend @@ -1192,7 +1226,7 @@ tzdata==2024.1 # via pandas unidecode==1.3.8 # via quivr-api -unstructured==0.15.7 +unstructured==0.15.13 # via megaparse # via quivr-core unstructured-client==0.6.0 @@ -1205,8 +1239,9 @@ uptrace==1.26.0 # via chainlit uritemplate==4.1.1 # via google-api-python-client -urllib3==1.26.13 +urllib3==2.2.3 # via botocore + # via qdrant-client # via requests # via sentry-sdk # via unstructured-client diff --git a/backend/requirements.lock b/backend/requirements.lock index 23de6bdc3b63..de857926f865 100644 --- a/backend/requirements.lock +++ b/backend/requirements.lock @@ -106,6 +106,7 @@ click==8.1.7 # via mkdocs # via mkdocstrings # via nltk + # via python-oxmsg # via uvicorn click-didyoumean==0.3.1 # via celery @@ -247,8 +248,12 @@ griffe==1.2.0 grpcio==1.65.5 # via google-api-core # via grpcio-status + # via grpcio-tools + # via qdrant-client grpcio-status==1.65.5 # via google-api-core +grpcio-tools==1.65.5 + # via qdrant-client h11==0.14.0 # via httpcore # via uvicorn @@ -272,6 +277,7 @@ httpx==0.27.0 # via notion-client # via openai # via postgrest + # via qdrant-client # via quivr-core # via storage3 # via supabase @@ -348,7 +354,7 @@ kiwisolver==1.4.5 # via matplotlib kombu==5.4.0 # via celery -langchain==0.2.14 +langchain==0.2.16 # via langchain-community # via megaparse # via quivr-api @@ -360,9 +366,10 @@ langchain-cohere==0.2.2 langchain-community==0.2.12 # via langchain-experimental # via megaparse + # via mem0ai # via quivr-api # via quivr-core -langchain-core==0.2.38 +langchain-core==0.2.41 # via langchain # via langchain-anthropic # via langchain-cohere @@ -376,7 +383,7 @@ langchain-core==0.2.38 # via quivr-core langchain-experimental==0.0.64 # via langchain-cohere -langchain-openai==0.1.22 +langchain-openai==0.1.25 # via megaparse # via quivr-api langchain-text-splitters==0.2.2 @@ -387,7 +394,7 @@ langgraph==0.2.19 # via quivr-core langgraph-checkpoint==1.0.9 # via langgraph -langsmith==0.1.100 +langsmith==0.1.125 # via langchain # via langchain-community # via langchain-core @@ -397,14 +404,14 @@ litellm==1.43.19 # via quivr-api llama-cloud==0.0.13 # via llama-index-indices-managed-llama-cloud -llama-index==0.10.67.post1 +llama-index==0.11.12 # via megaparse -llama-index-agent-openai==0.2.9 +llama-index-agent-openai==0.3.4 # via llama-index # via llama-index-program-openai -llama-index-cli==0.1.13 +llama-index-cli==0.3.1 # via llama-index -llama-index-core==0.10.67 +llama-index-core==0.11.12 # via llama-index # via llama-index-agent-openai # via llama-index-cli @@ -417,32 +424,32 @@ llama-index-core==0.10.67 # via llama-index-readers-file # via llama-index-readers-llama-parse # via llama-parse -llama-index-embeddings-openai==0.1.11 +llama-index-embeddings-openai==0.2.5 # via llama-index # via llama-index-cli -llama-index-indices-managed-llama-cloud==0.2.7 +llama-index-indices-managed-llama-cloud==0.3.1 # via llama-index llama-index-legacy==0.9.48.post3 # via llama-index -llama-index-llms-openai==0.1.30 +llama-index-llms-openai==0.2.9 # via llama-index # via llama-index-agent-openai # via llama-index-cli # via llama-index-multi-modal-llms-openai # via llama-index-program-openai # via llama-index-question-gen-openai -llama-index-multi-modal-llms-openai==0.1.9 +llama-index-multi-modal-llms-openai==0.2.1 # via llama-index -llama-index-program-openai==0.1.7 +llama-index-program-openai==0.2.0 # via llama-index # via llama-index-question-gen-openai -llama-index-question-gen-openai==0.1.3 +llama-index-question-gen-openai==0.2.0 # via llama-index -llama-index-readers-file==0.1.33 +llama-index-readers-file==0.2.2 # via llama-index -llama-index-readers-llama-parse==0.1.6 +llama-index-readers-llama-parse==0.3.0 # via llama-index -llama-parse==0.4.9 +llama-parse==0.5.6 # via llama-index-readers-llama-parse # via megaparse # via quivr-api @@ -490,6 +497,8 @@ mdurl==0.1.2 # via markdown-it-py megaparse==0.0.31 # via quivr-core +mem0ai==0.1.15 + # via quivr-core mergedeep==1.3.4 # via mkdocs # via mkdocs-get-deps @@ -538,6 +547,8 @@ nbformat==5.10.4 # via jupytext # via nbclient # via nbconvert +neo4j==5.24.0 + # via mem0ai nest-asyncio==1.6.0 # via ipykernel # via llama-index-core @@ -548,6 +559,7 @@ networkx==3.2.1 # via torch # via unstructured nltk==3.9.1 + # via llama-index # via llama-index-core # via llama-index-legacy # via unstructured @@ -570,12 +582,16 @@ numpy==1.26.3 # via pdf2docx # via pgvector # via pycocotools + # via qdrant-client + # via rank-bm25 # via scipy # via torchvision # via transformers # via unstructured oauthlib==3.2.2 # via requests-oauthlib +olefile==0.47 + # via python-oxmsg omegaconf==2.3.0 # via effdet onnx==1.16.2 @@ -583,13 +599,14 @@ onnx==1.16.2 # via unstructured-inference onnxruntime==1.19.0 # via unstructured-inference -openai==1.42.0 +openai==1.47.0 # via langchain-openai # via litellm # via llama-index-agent-openai - # via llama-index-core + # via llama-index-embeddings-openai # via llama-index-legacy # via llama-index-llms-openai + # via mem0ai # via quivr-api # via quivr-worker opencv-python==4.10.0.84 @@ -623,8 +640,8 @@ paginate==0.5.7 pandas==2.2.2 # via langchain-cohere # via layoutparser - # via llama-index-core # via llama-index-legacy + # via llama-index-readers-file # via unstructured pandocfilters==1.5.1 # via nbconvert @@ -649,6 +666,8 @@ pexpect==4.9.0 ; sys_platform != 'emscripten' and sys_platform != 'win32' # via ipython pgvector==0.3.2 # via quivr-api +pi-heif==0.18.0 + # via unstructured pikepdf==9.1.1 # via unstructured pillow==10.2.0 @@ -658,13 +677,11 @@ pillow==10.2.0 # via matplotlib # via pdf2image # via pdfplumber + # via pi-heif # via pikepdf - # via pillow-heif # via python-pptx # via torchvision # via unstructured-pytesseract -pillow-heif==0.18.0 - # via unstructured platformdirs==4.3.2 # via jupyter-core # via mkdocs-get-deps @@ -675,9 +692,11 @@ ply==3.11 # via stone portalocker==2.10.1 # via iopath + # via qdrant-client postgrest==0.16.10 # via supabase posthog==3.5.0 + # via mem0ai # via quivr-api prometheus-client==0.20.0 # via flower @@ -692,6 +711,7 @@ protobuf==5.27.3 # via google-cloud-vision # via googleapis-common-protos # via grpcio-status + # via grpcio-tools # via onnx # via onnxruntime # via proto-plus @@ -726,9 +746,12 @@ pydantic==2.8.2 # via langsmith # via litellm # via llama-cloud + # via llama-index-core + # via mem0ai # via openai # via postgrest # via pydantic-settings + # via qdrant-client # via quivr-core # via sqlmodel pydantic-core==2.20.1 @@ -798,11 +821,15 @@ python-magic==0.4.27 python-multipart==0.0.9 # via quivr-api # via unstructured-inference +python-oxmsg==0.0.1 + # via unstructured python-pptx==1.0.2 # via megaparse # via unstructured pytz==2024.1 # via flower + # via mem0ai + # via neo4j # via pandas pywin32==306 ; (platform_python_implementation != 'PyPy' and sys_platform == 'win32') or platform_system == 'Windows' # via jupyter-core @@ -827,6 +854,10 @@ pyyaml-env-tag==0.1 pyzmq==26.2.0 # via ipykernel # via jupyter-client +qdrant-client==1.11.2 + # via mem0ai +rank-bm25==0.2.2 + # via mem0ai rapidfuzz==3.9.6 # via unstructured # via unstructured-inference @@ -884,6 +915,8 @@ scipy==1.14.1 # via layoutparser sentry-sdk==2.13.0 # via quivr-api +setuptools==70.0.0 + # via grpcio-tools six==1.16.0 # via asttokens # via bleach @@ -908,6 +941,7 @@ sqlalchemy==2.0.32 # via langchain-community # via llama-index-core # via llama-index-legacy + # via mem0ai # via sqlmodel sqlmodel==0.0.21 # via quivr-api @@ -1022,6 +1056,7 @@ typing-extensions==4.12.2 # via pydantic-core # via pyee # via python-docx + # via python-oxmsg # via python-pptx # via realtime # via resend @@ -1041,7 +1076,7 @@ tzdata==2024.1 # via pandas unidecode==1.3.8 # via quivr-api -unstructured==0.15.7 +unstructured==0.15.13 # via megaparse # via quivr-core unstructured-client==0.8.1 @@ -1052,8 +1087,9 @@ unstructured-pytesseract==0.3.13 # via unstructured uritemplate==4.1.1 # via google-api-python-client -urllib3==1.26.13 +urllib3==2.2.3 # via botocore + # via qdrant-client # via requests # via sentry-sdk # via unstructured-client