diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 89127885a..691ad663e 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -12,11 +12,16 @@ repos: rev: 24.4.2 hooks: - id: black - args: ["--line-length=88"] + args: ['--line-length=88'] - repo: https://github.com/astral-sh/ruff-pre-commit rev: v0.4.2 hooks: # Run the linter. - id: ruff - args: ["--fix", "--extend-ignore=E402"] + args: ['--fix', '--extend-ignore=E402'] + # - repo: https://github.com/pycqa/flake8 + # rev: 4.0.1 + # hooks: + # - id: flake8 + # args: ['--max-line-length=88'] diff --git a/lightrag/__init__.py b/_lightrag/lightrag/__init__.py similarity index 100% rename from lightrag/__init__.py rename to _lightrag/lightrag/__init__.py diff --git a/developer_notes/generator.ipynb b/developer_notes/generator.ipynb index 548ca532e..ff804b223 100644 --- a/developer_notes/generator.ipynb +++ b/developer_notes/generator.ipynb @@ -88,7 +88,7 @@ "source": [ "from lightrag.core import Component, Generator, Prompt\n", "from lightrag.components.model_client import GroqAPIClient\n", - "from lightrag.utils import setup_env\n", + "from lightrag.utils import setup_env # noqa\n", "\n", "\n", "class SimpleQA(Component):\n", diff --git a/developer_notes/generator_note.py b/developer_notes/generator_note.py index 5d05f31a7..148470323 100644 --- a/developer_notes/generator_note.py +++ b/developer_notes/generator_note.py @@ -1,6 +1,5 @@ from lightrag.core import Component, Generator from lightrag.components.model_client import GroqAPIClient -from lightrag.utils import setup_env # noqa class SimpleQA(Component): diff --git a/docs/.gitignore b/docs/.gitignore index 9f2cc5a90..b72c4b2c0 100644 --- a/docs/.gitignore +++ b/docs/.gitignore @@ -1,15 +1,15 @@ -source/apis/components/components* -source/apis/components/_autosummary* -source/apis/components/modules* -source/apis/core/core* -source/apis/core/modules* -source/apis/eval/eval* -source/apis/eval/modules* -source/apis/prompts/prompts* -source/apis/prompts/modules* -source/apis/utils/utils* -source/apis/utils/modules* -source/apis/tracing/tracing* -source/apis/tracing/modules* -source/apis/optim/optim* -source/apis/optim/modules* \ No newline at end of file +# source/apis/components/components* +# source/apis/components/_autosummary* +# source/apis/components/modules* +# source/apis/core/core* +# source/apis/core/modules* +# source/apis/eval/eval* +# source/apis/eval/modules* +# source/apis/prompts/prompts* +# source/apis/prompts/modules* +# source/apis/utils/utils* +# source/apis/utils/modules* +# source/apis/tracing/tracing* +# source/apis/tracing/modules* +# source/apis/optim/optim* +# source/apis/optim/modules* diff --git a/docs/Makefile b/docs/Makefile index cb659334c..3b198b52e 100644 --- a/docs/Makefile +++ b/docs/Makefile @@ -22,12 +22,12 @@ help: @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) apidoc: - @sphinx-apidoc -o $(APIDOCOUTDIR)/core ../lightrag/core --separate --force - @sphinx-apidoc -o $(APIDOCOUTDIR)/components ../lightrag/components --separate --force --templatedir=$(SOURCEDIR)/_templates - @sphinx-apidoc -o $(APIDOCOUTDIR)/eval ../lightrag/eval --separate --force - @sphinx-apidoc -o $(APIDOCOUTDIR)/optim ../lightrag/optim --separate --force - @sphinx-apidoc -o $(APIDOCOUTDIR)/utils ../lightrag/utils --separate --force - @sphinx-apidoc -o $(APIDOCOUTDIR)/tracing ../lightrag/tracing --separate --force + @sphinx-apidoc -o $(APIDOCOUTDIR)/core ../lightrag/lightrag/core --separate --force + @sphinx-apidoc -o $(APIDOCOUTDIR)/components ../lightrag/lightrag/components --separate --force --templatedir=$(SOURCEDIR)/_templates + @sphinx-apidoc -o $(APIDOCOUTDIR)/eval ../lightrag/lightrag/eval --separate --force + @sphinx-apidoc -o $(APIDOCOUTDIR)/optim ../lightrag/lightrag/optim --separate --force + @sphinx-apidoc -o $(APIDOCOUTDIR)/utils ../lightrag/lightrag/utils --separate --force + @sphinx-apidoc -o $(APIDOCOUTDIR)/tracing ../lightrag/lightrag/tracing --separate --force @echo "Inserting reference labels into RST files." @python $(SOURCEDIR)/insert_labels.py @echo "Removing unnecessary strings for better formatting" @@ -38,4 +38,4 @@ apidoc: html: apidoc - @$(SPHINXBUILD) -b html "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) \ No newline at end of file + @$(SPHINXBUILD) -b html "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) diff --git a/docs/source/_static/css/custom.css b/docs/source/_static/css/custom.css index be7a7db05..a97701dd3 100644 --- a/docs/source/_static/css/custom.css +++ b/docs/source/_static/css/custom.css @@ -20,6 +20,9 @@ width: 270px; /* Adjust the width for larger screens */ } } +.pre { + color: #0a7d91; /* Change the color of ```` blocks */ +} /* .copyright { text-align: center; diff --git a/docs/source/apis/components/_autosummary/components.agent.react.rst b/docs/source/apis/components/_autosummary/components.agent.react.rst new file mode 100644 index 000000000..9c0d4e939 --- /dev/null +++ b/docs/source/apis/components/_autosummary/components.agent.react.rst @@ -0,0 +1,20 @@ +components.agent.react +====================== + +.. automodule:: components.agent.react + + + + + + + + + + + + .. rubric:: Classes + + .. autosummary:: + + ReActAgent diff --git a/docs/source/apis/components/_autosummary/components.api_client.anthropic_client.rst b/docs/source/apis/components/_autosummary/components.api_client.anthropic_client.rst new file mode 100644 index 000000000..31e030aa4 --- /dev/null +++ b/docs/source/apis/components/_autosummary/components.api_client.anthropic_client.rst @@ -0,0 +1,22 @@ +.. _components-api_client-anthropic_client: + +components.api\_client.anthropic\_client +======================================== + +.. automodule:: components.api_client.anthropic_client + + + + + + + + + + + + .. rubric:: Classes + + .. autosummary:: + + AnthropicAPIClient diff --git a/docs/source/apis/components/_autosummary/components.api_client.google_client.rst b/docs/source/apis/components/_autosummary/components.api_client.google_client.rst new file mode 100644 index 000000000..8da90bc60 --- /dev/null +++ b/docs/source/apis/components/_autosummary/components.api_client.google_client.rst @@ -0,0 +1,22 @@ +.. _components-api_client-google_client: + +components.api\_client.google\_client +===================================== + +.. automodule:: components.api_client.google_client + + + + + + + + + + + + .. rubric:: Classes + + .. autosummary:: + + GoogleGenAIClient diff --git a/docs/source/apis/components/_autosummary/components.api_client.groq_client.rst b/docs/source/apis/components/_autosummary/components.api_client.groq_client.rst new file mode 100644 index 000000000..ccaab3bdf --- /dev/null +++ b/docs/source/apis/components/_autosummary/components.api_client.groq_client.rst @@ -0,0 +1,22 @@ +.. _components-api_client-groq_client: + +components.api\_client.groq\_client +=================================== + +.. automodule:: components.api_client.groq_client + + + + + + + + + + + + .. rubric:: Classes + + .. autosummary:: + + GroqAPIClient diff --git a/docs/source/apis/components/_autosummary/components.api_client.openai_client.rst b/docs/source/apis/components/_autosummary/components.api_client.openai_client.rst new file mode 100644 index 000000000..4c14edecf --- /dev/null +++ b/docs/source/apis/components/_autosummary/components.api_client.openai_client.rst @@ -0,0 +1,22 @@ +.. _components-api_client-openai_client: + +components.api\_client.openai\_client +===================================== + +.. automodule:: components.api_client.openai_client + + + + + + + + + + + + .. rubric:: Classes + + .. autosummary:: + + OpenAIClient diff --git a/docs/source/apis/components/_autosummary/components.api_client.transformers_client.rst b/docs/source/apis/components/_autosummary/components.api_client.transformers_client.rst new file mode 100644 index 000000000..0398fa6f4 --- /dev/null +++ b/docs/source/apis/components/_autosummary/components.api_client.transformers_client.rst @@ -0,0 +1,29 @@ +.. _components-api_client-transformers_client: + +components.api\_client.transformers\_client +=========================================== + +.. automodule:: components.api_client.transformers_client + + + + + + + + .. rubric:: Functions + + .. autosummary:: + + average_pool + + + + + + .. rubric:: Classes + + .. autosummary:: + + TransformerEmbedder + TransformersClient diff --git a/docs/source/apis/components/_autosummary/components.data_process.data_components.rst b/docs/source/apis/components/_autosummary/components.data_process.data_components.rst new file mode 100644 index 000000000..4482fe534 --- /dev/null +++ b/docs/source/apis/components/_autosummary/components.data_process.data_components.rst @@ -0,0 +1,27 @@ +components.data\_process.data\_components +========================================= + +.. automodule:: components.data_process.data_components + + + + + + + + .. rubric:: Functions + + .. autosummary:: + + retriever_output_to_context_str + + + + + + .. rubric:: Classes + + .. autosummary:: + + RetrieverOutputToContextStr + ToEmbeddings diff --git a/docs/source/apis/components/_autosummary/components.data_process.document_splitter.rst b/docs/source/apis/components/_autosummary/components.data_process.document_splitter.rst new file mode 100644 index 000000000..205e73fcf --- /dev/null +++ b/docs/source/apis/components/_autosummary/components.data_process.document_splitter.rst @@ -0,0 +1,28 @@ +.. _components-data_process-document_splitter: + +components.data\_process.document\_splitter +=========================================== + +.. automodule:: components.data_process.document_splitter + + + + + + + + .. rubric:: Functions + + .. autosummary:: + + split_text_by_token_fn + + + + + + .. rubric:: Classes + + .. autosummary:: + + DocumentSplitter diff --git a/docs/source/apis/components/_autosummary/components.data_process.text_splitter.rst b/docs/source/apis/components/_autosummary/components.data_process.text_splitter.rst new file mode 100644 index 000000000..63a89e71c --- /dev/null +++ b/docs/source/apis/components/_autosummary/components.data_process.text_splitter.rst @@ -0,0 +1,20 @@ +components.data\_process.text\_splitter +======================================= + +.. automodule:: components.data_process.text_splitter + + + + + + + + + + + + .. rubric:: Classes + + .. autosummary:: + + TextSplitter diff --git a/docs/source/apis/components/_autosummary/components.memory.memory.rst b/docs/source/apis/components/_autosummary/components.memory.memory.rst new file mode 100644 index 000000000..0338de4da --- /dev/null +++ b/docs/source/apis/components/_autosummary/components.memory.memory.rst @@ -0,0 +1,20 @@ +components.memory.memory +======================== + +.. automodule:: components.memory.memory + + + + + + + + + + + + .. rubric:: Classes + + .. autosummary:: + + Memory diff --git a/docs/source/apis/components/_autosummary/components.model_client.anthropic_client.rst b/docs/source/apis/components/_autosummary/components.model_client.anthropic_client.rst new file mode 100644 index 000000000..3c6b45a71 --- /dev/null +++ b/docs/source/apis/components/_autosummary/components.model_client.anthropic_client.rst @@ -0,0 +1,20 @@ +components.model\_client.anthropic\_client +========================================== + +.. automodule:: components.model_client.anthropic_client + + + + + + + + + + + + .. rubric:: Classes + + .. autosummary:: + + AnthropicAPIClient diff --git a/docs/source/apis/components/_autosummary/components.model_client.cohere_client.rst b/docs/source/apis/components/_autosummary/components.model_client.cohere_client.rst new file mode 100644 index 000000000..03c29e465 --- /dev/null +++ b/docs/source/apis/components/_autosummary/components.model_client.cohere_client.rst @@ -0,0 +1,20 @@ +components.model\_client.cohere\_client +======================================= + +.. automodule:: components.model_client.cohere_client + + + + + + + + + + + + .. rubric:: Classes + + .. autosummary:: + + CohereAPIClient diff --git a/docs/source/apis/components/_autosummary/components.model_client.google_client.rst b/docs/source/apis/components/_autosummary/components.model_client.google_client.rst new file mode 100644 index 000000000..744dc3bc9 --- /dev/null +++ b/docs/source/apis/components/_autosummary/components.model_client.google_client.rst @@ -0,0 +1,22 @@ +.. _components-model_client-google_client: + +components.model\_client.google\_client +======================================= + +.. automodule:: components.model_client.google_client + + + + + + + + + + + + .. rubric:: Classes + + .. autosummary:: + + GoogleGenAIClient diff --git a/docs/source/apis/components/_autosummary/components.model_client.groq_client.rst b/docs/source/apis/components/_autosummary/components.model_client.groq_client.rst new file mode 100644 index 000000000..02650f40e --- /dev/null +++ b/docs/source/apis/components/_autosummary/components.model_client.groq_client.rst @@ -0,0 +1,20 @@ +components.model\_client.groq\_client +===================================== + +.. automodule:: components.model_client.groq_client + + + + + + + + + + + + .. rubric:: Classes + + .. autosummary:: + + GroqAPIClient diff --git a/docs/source/apis/components/_autosummary/components.model_client.openai_client.rst b/docs/source/apis/components/_autosummary/components.model_client.openai_client.rst new file mode 100644 index 000000000..9b8bc7e76 --- /dev/null +++ b/docs/source/apis/components/_autosummary/components.model_client.openai_client.rst @@ -0,0 +1,28 @@ +components.model\_client.openai\_client +======================================= + +.. automodule:: components.model_client.openai_client + + + + + + + + .. rubric:: Functions + + .. autosummary:: + + get_all_messages_content + get_first_message_content + get_probabilities + + + + + + .. rubric:: Classes + + .. autosummary:: + + OpenAIClient diff --git a/docs/source/apis/components/_autosummary/components.model_client.transformers_client.rst b/docs/source/apis/components/_autosummary/components.model_client.transformers_client.rst new file mode 100644 index 000000000..cb1c9d331 --- /dev/null +++ b/docs/source/apis/components/_autosummary/components.model_client.transformers_client.rst @@ -0,0 +1,29 @@ +components.model\_client.transformers\_client +============================================= + +.. automodule:: components.model_client.transformers_client + + + + + + + + .. rubric:: Functions + + .. autosummary:: + + average_pool + + + + + + .. rubric:: Classes + + .. autosummary:: + + TransformerEmbedder + TransformerLLM + TransformerReranker + TransformersClient diff --git a/docs/source/apis/components/_autosummary/components.model_client.utils.rst b/docs/source/apis/components/_autosummary/components.model_client.utils.rst new file mode 100644 index 000000000..7d7f919e4 --- /dev/null +++ b/docs/source/apis/components/_autosummary/components.model_client.utils.rst @@ -0,0 +1,16 @@ +components.model\_client.utils +============================== + +.. automodule:: components.model_client.utils + + + + + + + + .. rubric:: Functions + + .. autosummary:: + + parse_embedding_response diff --git a/docs/source/apis/components/_autosummary/components.output_parsers.outputs.rst b/docs/source/apis/components/_autosummary/components.output_parsers.outputs.rst new file mode 100644 index 000000000..797976da1 --- /dev/null +++ b/docs/source/apis/components/_autosummary/components.output_parsers.outputs.rst @@ -0,0 +1,24 @@ +components.output\_parsers.outputs +================================== + +.. automodule:: components.output_parsers.outputs + + + + + + + + + + + + .. rubric:: Classes + + .. autosummary:: + + BooleanOutputParser + JsonOutputParser + ListOutputParser + OutputParser + YamlOutputParser diff --git a/docs/source/apis/components/_autosummary/components.reasoning.chain_of_thought.rst b/docs/source/apis/components/_autosummary/components.reasoning.chain_of_thought.rst new file mode 100644 index 000000000..665486a7f --- /dev/null +++ b/docs/source/apis/components/_autosummary/components.reasoning.chain_of_thought.rst @@ -0,0 +1,4 @@ +components.reasoning.chain\_of\_thought +======================================= + +.. automodule:: components.reasoning.chain_of_thought diff --git a/docs/source/apis/components/_autosummary/components.retriever.bm25_retriever.rst b/docs/source/apis/components/_autosummary/components.retriever.bm25_retriever.rst new file mode 100644 index 000000000..6f869f553 --- /dev/null +++ b/docs/source/apis/components/_autosummary/components.retriever.bm25_retriever.rst @@ -0,0 +1,28 @@ +components.retriever.bm25\_retriever +==================================== + +.. automodule:: components.retriever.bm25_retriever + + + + + + + + .. rubric:: Functions + + .. autosummary:: + + split_text_by_word_fn + split_text_by_word_fn_then_lower_tokenized + split_text_tokenized + + + + + + .. rubric:: Classes + + .. autosummary:: + + BM25Retriever diff --git a/docs/source/apis/components/_autosummary/components.retriever.faiss_retriever.rst b/docs/source/apis/components/_autosummary/components.retriever.faiss_retriever.rst new file mode 100644 index 000000000..cad15914a --- /dev/null +++ b/docs/source/apis/components/_autosummary/components.retriever.faiss_retriever.rst @@ -0,0 +1,20 @@ +components.retriever.faiss\_retriever +===================================== + +.. automodule:: components.retriever.faiss_retriever + + + + + + + + + + + + .. rubric:: Classes + + .. autosummary:: + + FAISSRetriever diff --git a/docs/source/apis/components/_autosummary/components.retriever.llm_retriever.rst b/docs/source/apis/components/_autosummary/components.retriever.llm_retriever.rst new file mode 100644 index 000000000..d8c9c6c6b --- /dev/null +++ b/docs/source/apis/components/_autosummary/components.retriever.llm_retriever.rst @@ -0,0 +1,20 @@ +components.retriever.llm\_retriever +=================================== + +.. automodule:: components.retriever.llm_retriever + + + + + + + + + + + + .. rubric:: Classes + + .. autosummary:: + + LLMRetriever diff --git a/docs/source/apis/components/_autosummary/components.retriever.postgres_retriever.rst b/docs/source/apis/components/_autosummary/components.retriever.postgres_retriever.rst new file mode 100644 index 000000000..6a8442036 --- /dev/null +++ b/docs/source/apis/components/_autosummary/components.retriever.postgres_retriever.rst @@ -0,0 +1,23 @@ +.. _components-retriever-postgres_retriever: + +components.retriever.postgres\_retriever +======================================== + +.. automodule:: components.retriever.postgres_retriever + + + + + + + + + + + + .. rubric:: Classes + + .. autosummary:: + + DistanceToOperator + PostgresRetriever diff --git a/docs/source/apis/components/_autosummary/components.retriever.reranker_retriever.rst b/docs/source/apis/components/_autosummary/components.retriever.reranker_retriever.rst new file mode 100644 index 000000000..3231d4a68 --- /dev/null +++ b/docs/source/apis/components/_autosummary/components.retriever.reranker_retriever.rst @@ -0,0 +1,20 @@ +components.retriever.reranker\_retriever +======================================== + +.. automodule:: components.retriever.reranker_retriever + + + + + + + + + + + + .. rubric:: Classes + + .. autosummary:: + + RerankerRetriever diff --git a/docs/source/apis/components/components.agent.rst b/docs/source/apis/components/components.agent.rst new file mode 100644 index 000000000..3bea7354d --- /dev/null +++ b/docs/source/apis/components/components.agent.rst @@ -0,0 +1,27 @@ +.. _components-agent: + +components.agent +======================== + +Submodules +---------- +.. autosummary:: + :toctree: _autosummary + + + components.agent.react + + + +.. toctree:: + :maxdepth: 4 + + components.agent.react + + +--------------- + +.. automodule:: components.agent + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/source/apis/components/components.data_process.data_components.rst b/docs/source/apis/components/components.data_process.data_components.rst new file mode 100644 index 000000000..c567436c1 --- /dev/null +++ b/docs/source/apis/components/components.data_process.data_components.rst @@ -0,0 +1,9 @@ +.. _components-data_process-data_components: + +components.data\_process.data\_components +================================================ + +.. automodule:: components.data_process.data_components + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/source/apis/components/components.data_process.document_splitter.rst b/docs/source/apis/components/components.data_process.document_splitter.rst new file mode 100644 index 000000000..8d9c0f3a5 --- /dev/null +++ b/docs/source/apis/components/components.data_process.document_splitter.rst @@ -0,0 +1,9 @@ +.. _components-data_process-document_splitter: + +components.data\_process.document\_splitter +================================================== + +.. automodule:: components.data_process.document_splitter + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/source/apis/components/components.data_process.rst b/docs/source/apis/components/components.data_process.rst new file mode 100644 index 000000000..1b444e9ff --- /dev/null +++ b/docs/source/apis/components/components.data_process.rst @@ -0,0 +1,30 @@ +.. _components-data_process: + +components.data\_process +================================ + +Submodules +---------- +.. autosummary:: + :toctree: _autosummary + + + components.data_process.data_components + + components.data_process.text_splitter + + + +.. toctree:: + :maxdepth: 4 + + components.data_process.data_components + components.data_process.text_splitter + + +--------------- + +.. automodule:: components.data_process + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/source/apis/components/components.data_process.text_splitter.rst b/docs/source/apis/components/components.data_process.text_splitter.rst new file mode 100644 index 000000000..a5ceea863 --- /dev/null +++ b/docs/source/apis/components/components.data_process.text_splitter.rst @@ -0,0 +1,9 @@ +.. _components-data_process-text_splitter: + +components.data\_process.text\_splitter +============================================== + +.. automodule:: components.data_process.text_splitter + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/source/apis/components/components.memory.memory.rst b/docs/source/apis/components/components.memory.memory.rst new file mode 100644 index 000000000..d6ca50513 --- /dev/null +++ b/docs/source/apis/components/components.memory.memory.rst @@ -0,0 +1,9 @@ +.. _components-memory-memory: + +components.memory.memory +=============================== + +.. automodule:: components.memory.memory + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/source/apis/components/components.memory.rst b/docs/source/apis/components/components.memory.rst new file mode 100644 index 000000000..4505a1851 --- /dev/null +++ b/docs/source/apis/components/components.memory.rst @@ -0,0 +1,27 @@ +.. _components-memory: + +components.memory +========================= + +Submodules +---------- +.. autosummary:: + :toctree: _autosummary + + + components.memory.memory + + + +.. toctree:: + :maxdepth: 4 + + components.memory.memory + + +--------------- + +.. automodule:: components.memory + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/source/apis/components/components.model_client.rst b/docs/source/apis/components/components.model_client.rst new file mode 100644 index 000000000..a8fb35d8b --- /dev/null +++ b/docs/source/apis/components/components.model_client.rst @@ -0,0 +1,45 @@ +.. _components-model_client: + +components.model\_client +================================ + +Submodules +---------- +.. autosummary:: + :toctree: _autosummary + + + components.model_client.anthropic_client + + components.model_client.cohere_client + + components.model_client.google_client + + components.model_client.groq_client + + components.model_client.openai_client + + components.model_client.transformers_client + + components.model_client.utils + + + +.. toctree:: + :maxdepth: 4 + + components.model_client.anthropic_client + components.model_client.cohere_client + components.model_client.google_client + components.model_client.groq_client + components.model_client.openai_client + components.model_client.transformers_client + components.model_client.utils + + +--------------- + +.. automodule:: components.model_client + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/source/apis/components/components.model_client.utils.rst b/docs/source/apis/components/components.model_client.utils.rst new file mode 100644 index 000000000..53ae77d2e --- /dev/null +++ b/docs/source/apis/components/components.model_client.utils.rst @@ -0,0 +1,9 @@ +.. _components-model_client-utils: + +components.model\_client.utils +===================================== + +.. automodule:: components.model_client.utils + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/source/apis/components/components.output_parsers.rst b/docs/source/apis/components/components.output_parsers.rst new file mode 100644 index 000000000..729afabdc --- /dev/null +++ b/docs/source/apis/components/components.output_parsers.rst @@ -0,0 +1,27 @@ +.. _components-output_parsers: + +components.output\_parsers +================================== + +Submodules +---------- +.. autosummary:: + :toctree: _autosummary + + + components.output_parsers.outputs + + + +.. toctree:: + :maxdepth: 4 + + components.output_parsers.outputs + + +--------------- + +.. automodule:: components.output_parsers + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/source/apis/components/components.reasoning.rst b/docs/source/apis/components/components.reasoning.rst new file mode 100644 index 000000000..6c79f3cdf --- /dev/null +++ b/docs/source/apis/components/components.reasoning.rst @@ -0,0 +1,27 @@ +.. _components-reasoning: + +components.reasoning +============================ + +Submodules +---------- +.. autosummary:: + :toctree: _autosummary + + + components.reasoning.chain_of_thought + + + +.. toctree:: + :maxdepth: 4 + + components.reasoning.chain_of_thought + + +--------------- + +.. automodule:: components.reasoning + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/source/apis/components/components.retriever.postgres_retriever.rst b/docs/source/apis/components/components.retriever.postgres_retriever.rst new file mode 100644 index 000000000..099bd303c --- /dev/null +++ b/docs/source/apis/components/components.retriever.postgres_retriever.rst @@ -0,0 +1,9 @@ +.. _components-retriever-postgres_retriever: + +components.retriever.postgres\_retriever +=============================================== + +.. automodule:: components.retriever.postgres_retriever + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/source/apis/components/components.retriever.rst b/docs/source/apis/components/components.retriever.rst new file mode 100644 index 000000000..fdc4e1bd7 --- /dev/null +++ b/docs/source/apis/components/components.retriever.rst @@ -0,0 +1,39 @@ +.. _components-retriever: + +components.retriever +============================ + +Submodules +---------- +.. autosummary:: + :toctree: _autosummary + + + components.retriever.bm25_retriever + + components.retriever.faiss_retriever + + components.retriever.llm_retriever + + components.retriever.postgres_retriever + + components.retriever.reranker_retriever + + + +.. toctree:: + :maxdepth: 4 + + components.retriever.bm25_retriever + components.retriever.faiss_retriever + components.retriever.llm_retriever + components.retriever.postgres_retriever + components.retriever.reranker_retriever + + +--------------- + +.. automodule:: components.retriever + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/source/apis/core/core.base_data_class.rst b/docs/source/apis/core/core.base_data_class.rst new file mode 100644 index 000000000..8e629861b --- /dev/null +++ b/docs/source/apis/core/core.base_data_class.rst @@ -0,0 +1,9 @@ +.. _core-base_data_class: + +core.base\_data\_class +============================= + +.. automodule:: core.base_data_class + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/source/apis/core/core.component.rst b/docs/source/apis/core/core.component.rst new file mode 100644 index 000000000..aab446e21 --- /dev/null +++ b/docs/source/apis/core/core.component.rst @@ -0,0 +1,9 @@ +.. _core-component: + +core.component +===================== + +.. automodule:: core.component + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/source/apis/core/core.data_components.rst b/docs/source/apis/core/core.data_components.rst new file mode 100644 index 000000000..f75d19912 --- /dev/null +++ b/docs/source/apis/core/core.data_components.rst @@ -0,0 +1,9 @@ +.. _core-data_components: + +core.data\_components +============================ + +.. automodule:: core.data_components + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/source/apis/core/core.db.rst b/docs/source/apis/core/core.db.rst new file mode 100644 index 000000000..ca74e130b --- /dev/null +++ b/docs/source/apis/core/core.db.rst @@ -0,0 +1,9 @@ +.. _core-db: + +core.db +============== + +.. automodule:: core.db + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/source/apis/core/core.default_prompt_template.rst b/docs/source/apis/core/core.default_prompt_template.rst new file mode 100644 index 000000000..e9f538faa --- /dev/null +++ b/docs/source/apis/core/core.default_prompt_template.rst @@ -0,0 +1,9 @@ +.. _core-default_prompt_template: + +core.default\_prompt\_template +===================================== + +.. automodule:: core.default_prompt_template + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/source/apis/core/core.document_splitter.rst b/docs/source/apis/core/core.document_splitter.rst new file mode 100644 index 000000000..48c1607aa --- /dev/null +++ b/docs/source/apis/core/core.document_splitter.rst @@ -0,0 +1,9 @@ +.. _core-document_splitter: + +core.document\_splitter +============================== + +.. automodule:: core.document_splitter + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/source/apis/core/core.embedder.rst b/docs/source/apis/core/core.embedder.rst new file mode 100644 index 000000000..195bb6c3a --- /dev/null +++ b/docs/source/apis/core/core.embedder.rst @@ -0,0 +1,9 @@ +.. _core-embedder: + +core.embedder +==================== + +.. automodule:: core.embedder + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/source/apis/core/core.func_tool.rst b/docs/source/apis/core/core.func_tool.rst new file mode 100644 index 000000000..9e3c5e3e0 --- /dev/null +++ b/docs/source/apis/core/core.func_tool.rst @@ -0,0 +1,9 @@ +.. _core-func_tool: + +core.func\_tool +====================== + +.. automodule:: core.func_tool + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/source/apis/core/core.functional.rst b/docs/source/apis/core/core.functional.rst new file mode 100644 index 000000000..222c411c9 --- /dev/null +++ b/docs/source/apis/core/core.functional.rst @@ -0,0 +1,9 @@ +.. _core-functional: + +core.functional +====================== + +.. automodule:: core.functional + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/source/apis/core/core.generator.rst b/docs/source/apis/core/core.generator.rst new file mode 100644 index 000000000..df0c8e38d --- /dev/null +++ b/docs/source/apis/core/core.generator.rst @@ -0,0 +1,9 @@ +.. _core-generator: + +core.generator +===================== + +.. automodule:: core.generator + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/source/apis/core/core.memory.rst b/docs/source/apis/core/core.memory.rst new file mode 100644 index 000000000..178062ab0 --- /dev/null +++ b/docs/source/apis/core/core.memory.rst @@ -0,0 +1,9 @@ +.. _core-memory: + +core.memory +================== + +.. automodule:: core.memory + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/source/apis/core/core.model_client.rst b/docs/source/apis/core/core.model_client.rst new file mode 100644 index 000000000..d4fb3a6db --- /dev/null +++ b/docs/source/apis/core/core.model_client.rst @@ -0,0 +1,9 @@ +.. _core-model_client: + +core.model\_client +========================= + +.. automodule:: core.model_client + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/source/apis/core/core.parameter.rst b/docs/source/apis/core/core.parameter.rst new file mode 100644 index 000000000..467c4b334 --- /dev/null +++ b/docs/source/apis/core/core.parameter.rst @@ -0,0 +1,9 @@ +.. _core-parameter: + +core.parameter +===================== + +.. automodule:: core.parameter + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/source/apis/core/core.prompt_builder.rst b/docs/source/apis/core/core.prompt_builder.rst new file mode 100644 index 000000000..247ae1994 --- /dev/null +++ b/docs/source/apis/core/core.prompt_builder.rst @@ -0,0 +1,9 @@ +.. _core-prompt_builder: + +core.prompt\_builder +=========================== + +.. automodule:: core.prompt_builder + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/source/apis/core/core.retriever.rst b/docs/source/apis/core/core.retriever.rst new file mode 100644 index 000000000..2182485ed --- /dev/null +++ b/docs/source/apis/core/core.retriever.rst @@ -0,0 +1,9 @@ +.. _core-retriever: + +core.retriever +===================== + +.. automodule:: core.retriever + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/source/apis/core/core.string_parser.rst b/docs/source/apis/core/core.string_parser.rst new file mode 100644 index 000000000..e95276027 --- /dev/null +++ b/docs/source/apis/core/core.string_parser.rst @@ -0,0 +1,9 @@ +.. _core-string_parser: + +core.string\_parser +========================== + +.. automodule:: core.string_parser + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/source/apis/core/core.tokenizer.rst b/docs/source/apis/core/core.tokenizer.rst new file mode 100644 index 000000000..de6634567 --- /dev/null +++ b/docs/source/apis/core/core.tokenizer.rst @@ -0,0 +1,9 @@ +.. _core-tokenizer: + +core.tokenizer +===================== + +.. automodule:: core.tokenizer + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/source/apis/core/core.tool_manager.rst b/docs/source/apis/core/core.tool_manager.rst new file mode 100644 index 000000000..852c32659 --- /dev/null +++ b/docs/source/apis/core/core.tool_manager.rst @@ -0,0 +1,9 @@ +.. _core-tool_manager: + +core.tool\_manager +========================= + +.. automodule:: core.tool_manager + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/source/apis/core/core.types.rst b/docs/source/apis/core/core.types.rst new file mode 100644 index 000000000..328324cbd --- /dev/null +++ b/docs/source/apis/core/core.types.rst @@ -0,0 +1,9 @@ +.. _core-types: + +core.types +================= + +.. automodule:: core.types + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/source/apis/eval/eval.answer_match_acc.rst b/docs/source/apis/eval/eval.answer_match_acc.rst new file mode 100644 index 000000000..ec068ca7e --- /dev/null +++ b/docs/source/apis/eval/eval.answer_match_acc.rst @@ -0,0 +1,9 @@ +.. _eval-answer_match_acc: + +eval.answer\_match\_acc +============================== + +.. automodule:: eval.answer_match_acc + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/source/apis/eval/eval.evaluators.rst b/docs/source/apis/eval/eval.evaluators.rst new file mode 100644 index 000000000..d6ccf71b0 --- /dev/null +++ b/docs/source/apis/eval/eval.evaluators.rst @@ -0,0 +1,9 @@ +.. _eval-evaluators: + +eval.evaluators +====================== + +.. automodule:: eval.evaluators + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/source/apis/eval/eval.llm_as_judge.rst b/docs/source/apis/eval/eval.llm_as_judge.rst new file mode 100644 index 000000000..e9da70327 --- /dev/null +++ b/docs/source/apis/eval/eval.llm_as_judge.rst @@ -0,0 +1,9 @@ +.. _eval-llm_as_judge: + +eval.llm\_as\_judge +========================== + +.. automodule:: eval.llm_as_judge + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/source/apis/eval/eval.retriever_recall.rst b/docs/source/apis/eval/eval.retriever_recall.rst new file mode 100644 index 000000000..1e1b7f4d3 --- /dev/null +++ b/docs/source/apis/eval/eval.retriever_recall.rst @@ -0,0 +1,9 @@ +.. _eval-retriever_recall: + +eval.retriever\_recall +============================= + +.. automodule:: eval.retriever_recall + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/source/apis/eval/eval.retriever_relevance.rst b/docs/source/apis/eval/eval.retriever_relevance.rst new file mode 100644 index 000000000..737de569e --- /dev/null +++ b/docs/source/apis/eval/eval.retriever_relevance.rst @@ -0,0 +1,9 @@ +.. _eval-retriever_relevance: + +eval.retriever\_relevance +================================ + +.. automodule:: eval.retriever_relevance + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/source/apis/optim/optim.few_shot_optimizer.rst b/docs/source/apis/optim/optim.few_shot_optimizer.rst new file mode 100644 index 000000000..352302ea7 --- /dev/null +++ b/docs/source/apis/optim/optim.few_shot_optimizer.rst @@ -0,0 +1,9 @@ +.. _optim-few_shot_optimizer: + +optim.few\_shot\_optimizer +================================= + +.. automodule:: optim.few_shot_optimizer + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/source/apis/optim/optim.llm_augment.rst b/docs/source/apis/optim/optim.llm_augment.rst new file mode 100644 index 000000000..ce88ae79c --- /dev/null +++ b/docs/source/apis/optim/optim.llm_augment.rst @@ -0,0 +1,9 @@ +.. _optim-llm_augment: + +optim.llm\_augment +========================= + +.. automodule:: optim.llm_augment + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/source/apis/optim/optim.llm_optimizer.rst b/docs/source/apis/optim/optim.llm_optimizer.rst new file mode 100644 index 000000000..981ca2281 --- /dev/null +++ b/docs/source/apis/optim/optim.llm_optimizer.rst @@ -0,0 +1,9 @@ +.. _optim-llm_optimizer: + +optim.llm\_optimizer +=========================== + +.. automodule:: optim.llm_optimizer + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/source/apis/optim/optim.optimizer.rst b/docs/source/apis/optim/optim.optimizer.rst new file mode 100644 index 000000000..2a0e799a5 --- /dev/null +++ b/docs/source/apis/optim/optim.optimizer.rst @@ -0,0 +1,9 @@ +.. _optim-optimizer: + +optim.optimizer +====================== + +.. automodule:: optim.optimizer + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/source/apis/optim/optim.sampler.rst b/docs/source/apis/optim/optim.sampler.rst new file mode 100644 index 000000000..207a4cb69 --- /dev/null +++ b/docs/source/apis/optim/optim.sampler.rst @@ -0,0 +1,9 @@ +.. _optim-sampler: + +optim.sampler +==================== + +.. automodule:: optim.sampler + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/source/apis/tracing/tracing.decorators.rst b/docs/source/apis/tracing/tracing.decorators.rst new file mode 100644 index 000000000..688c17863 --- /dev/null +++ b/docs/source/apis/tracing/tracing.decorators.rst @@ -0,0 +1,9 @@ +.. _tracing-decorators: + +tracing.decorators +========================= + +.. automodule:: tracing.decorators + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/source/apis/tracing/tracing.generator_call_logger.rst b/docs/source/apis/tracing/tracing.generator_call_logger.rst new file mode 100644 index 000000000..db70d6b6a --- /dev/null +++ b/docs/source/apis/tracing/tracing.generator_call_logger.rst @@ -0,0 +1,9 @@ +.. _tracing-generator_call_logger: + +tracing.generator\_call\_logger +====================================== + +.. automodule:: tracing.generator_call_logger + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/source/apis/tracing/tracing.generator_state_logger.rst b/docs/source/apis/tracing/tracing.generator_state_logger.rst new file mode 100644 index 000000000..1b562a964 --- /dev/null +++ b/docs/source/apis/tracing/tracing.generator_state_logger.rst @@ -0,0 +1,9 @@ +.. _tracing-generator_state_logger: + +tracing.generator\_state\_logger +======================================= + +.. automodule:: tracing.generator_state_logger + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/source/apis/utils/utils.config.rst b/docs/source/apis/utils/utils.config.rst new file mode 100644 index 000000000..a786dc7e7 --- /dev/null +++ b/docs/source/apis/utils/utils.config.rst @@ -0,0 +1,9 @@ +.. _utils-config: + +utils.config +=================== + +.. automodule:: utils.config + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/source/apis/utils/utils.file_io.rst b/docs/source/apis/utils/utils.file_io.rst new file mode 100644 index 000000000..0b3ffb8be --- /dev/null +++ b/docs/source/apis/utils/utils.file_io.rst @@ -0,0 +1,9 @@ +.. _utils-file_io: + +utils.file\_io +===================== + +.. automodule:: utils.file_io + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/source/apis/utils/utils.lazy_import.rst b/docs/source/apis/utils/utils.lazy_import.rst new file mode 100644 index 000000000..b76130ea1 --- /dev/null +++ b/docs/source/apis/utils/utils.lazy_import.rst @@ -0,0 +1,9 @@ +.. _utils-lazy_import: + +utils.lazy\_import +========================= + +.. automodule:: utils.lazy_import + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/source/apis/utils/utils.logger.rst b/docs/source/apis/utils/utils.logger.rst new file mode 100644 index 000000000..4d9d9d427 --- /dev/null +++ b/docs/source/apis/utils/utils.logger.rst @@ -0,0 +1,9 @@ +.. _utils-logger: + +utils.logger +=================== + +.. automodule:: utils.logger + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/source/apis/utils/utils.registry.rst b/docs/source/apis/utils/utils.registry.rst new file mode 100644 index 000000000..77a8095e9 --- /dev/null +++ b/docs/source/apis/utils/utils.registry.rst @@ -0,0 +1,9 @@ +.. _utils-registry: + +utils.registry +===================== + +.. automodule:: utils.registry + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/source/apis/utils/utils.serialization.rst b/docs/source/apis/utils/utils.serialization.rst new file mode 100644 index 000000000..61980a68f --- /dev/null +++ b/docs/source/apis/utils/utils.serialization.rst @@ -0,0 +1,9 @@ +.. _utils-serialization: + +utils.serialization +========================== + +.. automodule:: utils.serialization + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/source/apis/utils/utils.setup_env.rst b/docs/source/apis/utils/utils.setup_env.rst new file mode 100644 index 000000000..dc669ad51 --- /dev/null +++ b/docs/source/apis/utils/utils.setup_env.rst @@ -0,0 +1,9 @@ +.. _utils-setup_env: + +utils.setup\_env +======================= + +.. automodule:: utils.setup_env + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/source/conf.py b/docs/source/conf.py index 59eb52ba5..77ae00ffb 100644 --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -15,7 +15,7 @@ sys.path.insert(0, os.path.abspath("../../")) -sys.path.insert(0, os.path.abspath("../../lightrag")) +sys.path.insert(0, os.path.abspath("../../lightrag/lightrag")) # # need to insert the paths # for dir in os.walk('../../lightrag'): # sys.path.insert(0, dir[0]) diff --git a/docs/source/developer_notes/component.rst b/docs/source/developer_notes/component.rst index 365e970d4..026d5a711 100644 --- a/docs/source/developer_notes/component.rst +++ b/docs/source/developer_notes/component.rst @@ -1,16 +1,17 @@ Component ============ -.. admonition:: Author - :class: highlight - `Li Yin `_ +.. .. admonition:: Author +.. :class: highlight + +.. `Li Yin `_ What you will learn? 1. What is ``Component`` and why is it designed this way? 2. How to use ``Component`` along with helper classes like ``FunComponent`` and ``Sequential``? -Component +Design --------------------------------------- :ref:`Component` is to LLM task pipelines what ``nn.Module`` is to PyTorch models. @@ -64,6 +65,25 @@ Here is the comparison of writing a PyTorch model and a LightRAG task component. def call(self, query: str) -> str: return self.doc(prompt_kwargs={"input_str": query}).data + +As the foundamental building block in LLM task pipeline, the component is designed to serve five main purposes: + +1. **Standarize the interface for all components.** This includes the `__init__` method, the `call` method for synchronous call, the `acall` method for asynchronous call, and the `__call__` which in default calls the `call` method. +2. **Provide a unified way to visualize the structure of the task pipeline** via `__repr__` method. And subclass can additional add `_extra_repr` method to add more information than the default `__repr__` method. +3. **Tracks, adds all subcomponents and parameters automatically and recursively** to assistant the building and optimizing process of the task pipeline. +4. **Manages the states and serialization**, with `state_dict` and `load_state_dict` methods in particular for parameters and `to_dict` method for serialization of all the states fall into the component's attributes, from subcomponents to parameters, to any other attributes of various data type. +5. **Make all components configurable from using `json` or `yaml` files**. This is especially useful for experimenting or building data processing pipelines. + +These features are key to keep LightRAG pipeline transparent, flexible, and easy to use. +By subclassing from the `Component` class, you will get most of these features out of the box. + + +Component in Action +--------------------------------------- + +.. Transparency +.. ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + In this note, we are creating an AI doctor to answer medical questions. Run the ``DocQA`` on a query: @@ -78,22 +98,8 @@ The response is: As a doctor, the best treatment for a headache would depend on the underlying cause of the headache. Typically, over-the-counter pain relievers such as acetaminophen, ibuprofen, or aspirin can help to alleviate the pain. However, if the headache is severe or persistent, it is important to see a doctor for further evaluation and to determine the most appropriate treatment option. Other treatment options may include prescription medications, lifestyle modifications, stress management techniques, and relaxation techniques. -As the foundamental building block in LLM task pipeline, the component is designed to serve four main purposes: - -1. **Standarize the interface for all components.** This includes the `__init__` method, the `call` method for synchronous call, the `acall` method for asynchronous call, and the `__call__` which in default calls the `call` method. -2. **Provide a unified way to visualize the structure of the task pipeline** via `__repr__` method. And subclass can additional add `_extra_repr` method to add more information than the default `__repr__` method. -3. **Tracks, adds all subcomponents and parameters automatically and recursively** to assistant the building and optimizing process of the task pipeline. -4. **Manages the states and serialization**, with `state_dict` and `load_state_dict` methods in particular for parameters and `to_dict` method for serialization of all the states fall into the component's attributes, from subcomponents to parameters, to any other attributes of various data type. - - -Here are the benefits of using the Component class: - -- Transparency. -- Flexibility. -- Searialization and deserialization. - -.. Transparency -.. ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Print the structure +~~~~~~~~~~~~~~~~~~~~~ We can easily visualize the structure via `print`: @@ -108,15 +114,16 @@ The printout: DocQA( - (doc): Generator( - model_kwargs={'model': 'gpt-3.5-turbo'}, model_type=ModelType.LLM - (prompt): Prompt(template: You are a doctor User: {{input_str}}, prompt_variables: ['input_str']) - (model_client): OpenAIClient() - ) + (doc): Generator( + model_kwargs={'model': 'gpt-3.5-turbo'}, model_type=ModelType.LLM + (prompt): Prompt(template: You are a doctor User: {{input_str}}, prompt_variables: ['input_str']) + (model_client): OpenAIClient() + ) ) - +Configure from file +~~~~~~~~~~~~~~~~~~~~~ @@ -149,7 +156,8 @@ You can easily save the detailed states: To adds even more flexibility, we provide :class:`core.component.FunComponent` and :class:`core.component.Sequential` for more advanced use cases. -**Searalization and deserialization** +Searalization and deserialization +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ We provide ``is_pickable`` method to check if the component is pickable. And any of your component, it is a good practise to ensure it is pickable. @@ -244,13 +252,13 @@ The structure of the sequence using ``print(seq)``: Sequential( (0): EnhanceQueryComponent() (1): DocQA( - (doc): Generator( - model_kwargs={'model': 'gpt-3.5-turbo'}, model_type=ModelType.LLM - (prompt): Prompt(template: You are a doctor User: {{input_str}}, prompt_variables: ['input_str']) - (model_client): OpenAIClient() + (doc): Generator( + model_kwargs={'model': 'gpt-3.5-turbo'}, model_type=ModelType.LLM + (prompt): Prompt(template: You are a doctor User: {{input_str}}, prompt_variables: ['input_str']) + (model_client): OpenAIClient() + ) ) ) - ) .. admonition:: API reference :class: highlight diff --git a/docs/source/developer_notes/generator.rst b/docs/source/developer_notes/generator.rst index ee7beaabc..1560d41f2 100644 --- a/docs/source/developer_notes/generator.rst +++ b/docs/source/developer_notes/generator.rst @@ -3,33 +3,51 @@ Generator ========= -.. admonition:: Author - :class: highlight +.. .. admonition:: Author +.. :class: highlight - `Li Yin `_ +.. `Li Yin `_ *The Center of it All* -Generator is the most essential functional component in LightRAG. -It is a user-facing orchestration component with a simple and unified interface for LLM prediction. -It orchestrates the following components along with their required arguments: +Generator is a user-facing orchestration component with a simple and unified interface for LLM prediction. + +Design +--------------------------------------- + +:class:`Generator` is designed to achieve the following goals: -- ``Prompt`` +- Model Agnostic: The Generator should be able to call any LLM model with the same prompt. +- Unified Interface: It should manage the pipeline of prompt(input)->model call -> output parsing. +- Unified Output: This will make it easy to log and save records of all LLM predictions. +- Work with Optimizer: It should be able to work with Optimizer to optimize the prompt. -- ``ModelClient`` +Therefore, it orchestrates the following components: -- Output processors to process the raw string response to desired format. +- ``Prompt``: by taking in ``template`` (string) and ``prompt_kwargs`` (dict) to format the prompt at initialization. When the ``template`` is not given, it defaults to ``DEFAULT_LIGHTRAG_SYSTEM_PROMPT``. -By switching out the model client, you can call any LLM model on your prompt, either API or local. +- ``ModelClient``: by taking in already instantiated ``model_client`` and ``model_kwargs`` to call the model. Switching out the model client will allow you to call any LLM model on the same prompt and output parsing. + +- ``output_processors``: component or chained components via ``Sequential`` to process the raw response to desired format. If no output processor provided, it is decided by Model client, often return raw string response (from the first response message). + +Generator supports both ``call`` (``__call__``) and ``acall`` method. It takes in run time ``prompt_kwargs``(dict) and ``model_kwargs``(dict) to allow to control the prompt fully at call time and the model arguments from the initial model client. GeneratorOutput -^^^^^^^^^^^^^^^ +^^^^^^^^^^^^^^^^^ Different from all other components, we can not alway enforce LLM to output the right format. -We in particular created a :class:`core.types.GeneratorOutput` (a subclass of ``DataClass``) to store `data` (parsed response), `error` (error message if either the model inference SDKs fail or the output parsing fail) and `raw_response` (raw string response for reference) for any LLM predictions. +We in particular created a :class:`GeneratorOutput` (a subclass of ``DataClass``) to store `data` (parsed response), `error` (error message if either the model inference SDKs fail or the output parsing fail) and `raw_response` (raw string response for reference) for any LLM predictions. It is in developers' hands to process the output accordingly. -GeneratorInAction -^^^^^^^^^^^^^^^^^ +Generator In Action +--------------------------------------- + +**Initiation in code** + +**Initiation from dict config** + + +** + Beside of these examples, LLM is like water, even in our library, we have components that have adpated Generator to other various functionalities. - :class:`components.retriever.llm_retriever.LLMRetriever` is a retriever that uses Generator to call LLM to retrieve the most relevant documents. diff --git a/lightrag/components/model_client/openai_client.py b/lightrag/components/model_client/openai_client.py index 30593bfaf..e9d6e76f2 100644 --- a/lightrag/components/model_client/openai_client.py +++ b/lightrag/components/model_client/openai_client.py @@ -116,8 +116,6 @@ def init_async_client(self): raise ValueError("Environment variable OPENAI_API_KEY must be set") return AsyncOpenAI(api_key=api_key) - # save raw response - def parse_chat_completion(self, completion: Completion) -> Any: """Parse the completion to a str.""" log.debug(f"completion: {completion}") diff --git a/lightrag/components/reasoning/__init__.py b/lightrag/components/reasoning/__init__.py deleted file mode 100644 index 11d0f6690..000000000 --- a/lightrag/components/reasoning/__init__.py +++ /dev/null @@ -1 +0,0 @@ -from .chain_of_thought import * diff --git a/lightrag/components/reasoning/chain_of_thought.py b/lightrag/components/reasoning/chain_of_thought.py deleted file mode 100644 index 02579a1b0..000000000 --- a/lightrag/components/reasoning/chain_of_thought.py +++ /dev/null @@ -1,98 +0,0 @@ -""" -https://arxiv.org/abs/2201.11903, published in Jan, 2023 - -Chain of the thought(CoT) is to mimic a step-by-step thought process for arriving at the answer. You can achieve it in two ways: -1. Add instructions such as "Let's think step-by-step to answer this question". -2. Add few-shot examples such as -' -Q: Roger has 5 tennis balls. He buys 2 more cans of tennis balls. Each can has 3 tennis balls. How many tennis balls does he have now? -A: Roger started with 5 balls. 2 cansof 3 tennis balls each is 6 tennis balls. 5 + 6 = 11. The answer is 11. -' - -NOTE: CoT can be helpful for more complicated task, it also varies from task to task and model to model. -For instance, CoT might already be supported in gpt3.5+ api calls. - -Benchmark it with and without CoT to see if it helps. -""" - -from typing import Dict, Optional - -from core.component import Component -from core.generator import Generator -from core.string_parser import JsonParser -from core.model_client import ModelClient -from core.default_prompt_template import DEFAULT_LIGHTRAG_SYSTEM_PROMPT - - -COT_TASK_DESC_STR_BASIC = ( - "You are a helpful assistant. Let's think step-by-step to answer user's query." -) -# Using triple quotes to include JSON-like structure more cleanly -COT_TASK_DESC_STR_WITH_JSON_OUTPUT = f""" -{COT_TASK_DESC_STR_BASIC} Output JSON format: {{"thought": "", "answer": ""}} -""" - - -# ChainOfThought will just be a generator with preset_prompt_kwargs of the task_desc_str = COT_TASK_DESC_STR -# additional you can ask it to generate a json with "thought" and "anwer" keys and use jsonParser - - -class CoTGenerator(Generator): - r""" - CoTGenerator is a subclass of Generator with default task_desc_str preset for Chain of Thought. - Output will be string. - It is exactly the same as using a Generator. - Example: - ``` - cot = CoTGenerator(model_client=model_client, model_kwargs={"model": model}) - ``` - """ - - def __init__( - self, - *, - model_client: ModelClient, - model_kwargs: Dict = {}, - template: Optional[str] = None, - preset_prompt_kwargs: Optional[Dict] = None, - output_processors: Optional[Component] = None, - ) -> None: - - super().__init__( - model_client=model_client, - model_kwargs=model_kwargs, - template=template or DEFAULT_LIGHTRAG_SYSTEM_PROMPT, - preset_prompt_kwargs=preset_prompt_kwargs - or {"task_desc_str": COT_TASK_DESC_STR_BASIC}, - output_processors=output_processors, - ) - - -class CoTGeneratorWithJsonOutput(Generator): - r""" - CoTGeneratorWithJsonOutput is a subclass of Generator with default task_desc_str preset for Chain of Thought. - Output will be parsed as JSON with "thought" and "answer" keys. - Example: - ``` - cot = CoTGeneratorWithJsonOutput(model_client=model_client, model_kwargs={"model": model}) - ``` - """ - - def __init__( - self, - *, - model_client: ModelClient, - model_kwargs: Dict = {}, - template: Optional[str] = None, - preset_prompt_kwargs: Optional[Dict] = None, - output_processors: Optional[Component] = None, - ) -> None: - - super().__init__( - model_client=model_client, - model_kwargs=model_kwargs, - template=template or DEFAULT_LIGHTRAG_SYSTEM_PROMPT, - preset_prompt_kwargs=preset_prompt_kwargs - or {"task_desc_str": COT_TASK_DESC_STR_WITH_JSON_OUTPUT}, - output_processors=output_processors or JsonParser(), - ) diff --git a/lightrag/core/types.py b/lightrag/core/types.py index 09f2d8d0e..8531b4126 100644 --- a/lightrag/core/types.py +++ b/lightrag/core/types.py @@ -204,10 +204,11 @@ class GeneratorOutput(DataClass, Generic[T_co]): usage: Optional[Usage] = field(default=None, metadata={"desc": "Usage tracking"}) raw_response: Optional[str] = field( default=None, metadata={"desc": "Raw string response from the model"} - ) + ) # parsed from model client response -GeneratorOutputType = GeneratorOutput[Any] +GeneratorInputType = str +GeneratorOutputType = GeneratorOutput[object] ####################################################################################### # Data modeling for Retriever component diff --git a/lightrag/lightrag/__init__.py b/lightrag/lightrag/__init__.py new file mode 100644 index 000000000..d33bab7c7 --- /dev/null +++ b/lightrag/lightrag/__init__.py @@ -0,0 +1,3 @@ +from lightrag.utils import setup_env + +setup_env() diff --git a/lightrag/components/__init__.py b/lightrag/lightrag/components/__init__.py similarity index 100% rename from lightrag/components/__init__.py rename to lightrag/lightrag/components/__init__.py diff --git a/lightrag/components/agent/README.md b/lightrag/lightrag/components/agent/README.md similarity index 100% rename from lightrag/components/agent/README.md rename to lightrag/lightrag/components/agent/README.md diff --git a/lightrag/components/agent/__init__.py b/lightrag/lightrag/components/agent/__init__.py similarity index 100% rename from lightrag/components/agent/__init__.py rename to lightrag/lightrag/components/agent/__init__.py diff --git a/lightrag/components/agent/react.py b/lightrag/lightrag/components/agent/react.py similarity index 100% rename from lightrag/components/agent/react.py rename to lightrag/lightrag/components/agent/react.py diff --git a/lightrag/components/data_process/__init__.py b/lightrag/lightrag/components/data_process/__init__.py similarity index 100% rename from lightrag/components/data_process/__init__.py rename to lightrag/lightrag/components/data_process/__init__.py diff --git a/lightrag/components/data_process/data_components.py b/lightrag/lightrag/components/data_process/data_components.py similarity index 100% rename from lightrag/components/data_process/data_components.py rename to lightrag/lightrag/components/data_process/data_components.py diff --git a/lightrag/components/data_process/text_splitter.py b/lightrag/lightrag/components/data_process/text_splitter.py similarity index 79% rename from lightrag/components/data_process/text_splitter.py rename to lightrag/lightrag/components/data_process/text_splitter.py index 2f632c1bd..6d19b1db1 100644 --- a/lightrag/components/data_process/text_splitter.py +++ b/lightrag/lightrag/components/data_process/text_splitter.py @@ -32,15 +32,22 @@ DocumentSplitterOutputType = List[Document] # customizable seperators map -SEPARATORS = {"page": "\f", "passage": "\n\n", "word": " ", "sentence": ".", "token": ""} +SEPARATORS = { + "page": "\f", + "passage": "\n\n", + "word": " ", + "sentence": ".", + "token": "", +} DEFAULT_CHUNK_SIZE = 800 DEFAULT_CHUNK_OVERLAP = 200 tokenizer = Tokenizer() + class TextSplitter(Component): - """ + """ Text Splitter for Chunking Documents ``TextSplitter`` first utilizes ``split_by`` to specify the text-splitting criterion and breaks the long text into smaller texts. @@ -49,11 +56,11 @@ class TextSplitter(Component): **Splitting Types** - ``TextSplitter`` supports 2 types of splitting. - + ``TextSplitter`` supports 2 types of splitting. + * **Type 1:** Specify the exact text splitting point such as space<" "> and periods<".">. It is intuitive, for example, split_by "word": - :: + :: "Hello, world!" -> ["Hello, " ,"world!"] @@ -67,7 +74,7 @@ class TextSplitter(Component): Tokenizer reflects the real token numbers the models take in and helps the developers control budgets. **Definitions** - + * **split_by** specifies the split rule, i.e. the smallest unit during splitting. We support ``"word"``, ``"sentence"``, ``"page"``, ``"passage"``, and ``"token"``. The splitter utilizes the corresponding separator from the ``SEPARATORS`` dictionary. For Type 1 splitting, we apply ``Python str.split()`` to break the text. @@ -76,16 +83,16 @@ class TextSplitter(Component): .. note:: For option ``token``, its separator is "" because we directly split by a tokenizer, instead of text point. - * **chunk_size** is the the maximum number of units in each chunk. + * **chunk_size** is the the maximum number of units in each chunk. * **chunk_overlap** is the number of units that each chunk should overlap. Including context at the borders prevents sudden meaning shift in text between sentences/context, especially in sentiment analysis. - + * **Splitting Details** - Type 1: - The ``TextSplitter`` utilizes Python's ``str.split(separator)`` method. - Developers can refer to - + Type 1: + The ``TextSplitter`` utilizes Python's ``str.split(separator)`` method. + Developers can refer to + .. code-block:: none { @@ -95,30 +102,30 @@ class TextSplitter(Component): "sentence": "." } for exact points of text division. - + .. note:: Developers need to determine how to assign text to each data chunk for the embedding and retrieval tasks. Type 2: We implement a tokenizer using ``cl100k_base`` encoding that aligns with how models see text in the form of tokens. E.g. "tiktoken is great!" -> ["t", "ik", "token", " is", " great", "!"] This helps developers control the token usage and budget better. - + * **Merge Details** Type 1/Type 2 create a list of split texts. ``TextSplitter`` then reattaches the specified separator to each piece of the split text, except for the last segment. This approach maintains the original spacing and punctuation, which is critical in contexts like natural language processing where text formatting can impact interpretations and outcomes. E.g. "hello world!" split by "word" will be kept as "hello " and "world!" - + * **Customization** - You can also customize the ``SEPARATORS``. For example, by defining ``SEPARATORS`` = {"question": "?"} and setting ``split_by`` = "question", the document will be split at each ``?``, ideal for processing text structured + You can also customize the ``SEPARATORS``. For example, by defining ``SEPARATORS`` = {"question": "?"} and setting ``split_by`` = "question", the document will be split at each ``?``, ideal for processing text structured as a series of questions. If you need to customize :class:`tokenizer `, please check `Reference `_. - + * **Integration with Other Document Types** This functionality is ideal for segmenting texts into sentences, words, pages, or passages, which can then be processed further for NLP applications. For **PDFs**, developers will need to extract the text before using the splitter. Libraries like ``PyPDF2`` or ``PDFMiner`` can be utilized for this purpose. ``LightRAG``'s future implementations will introduce splitters for ``JSON``, ``HTML``, ``markdown``, and ``code``. - + Example: - + .. code-block:: python from lightrag.components.data_process.text_splitter import TextSplitter @@ -148,19 +155,20 @@ class TextSplitter(Component): # Document(id=ca0af45b-4f88-49b5-97db-163da9868ea4, text='text. Even more text to ', meta_data=None, vector=[], parent_doc_id=doc1, order=1, score=None) # Document(id=e7b617b2-3927-4248-afce-ec0fc247ac8b, text='to illustrate.', meta_data=None, vector=[], parent_doc_id=doc1, order=2, score=None) """ + def __init__( self, split_by: Literal["word", "sentence", "page", "passage", "token"] = "word", chunk_size: int = DEFAULT_CHUNK_SIZE, chunk_overlap: int = DEFAULT_CHUNK_OVERLAP, - batch_size: int = 1000 + batch_size: int = 1000, ): """ Initializes the TextSplitter with the specified parameters for text splitting. Args: - split_by (str): The specific criterion to use for splitting the text. - Valid options are 'word' to split by ' ', 'sentence' to split by '.', + split_by (str): The specific criterion to use for splitting the text. + Valid options are 'word' to split by ' ', 'sentence' to split by '.', 'page' to split by '\\f', 'passage' to split by '\\n\\n'. chunk_size (int): The size of chunks to generate after splitting. Must be greater than 0. chunk_overlap (int): The number of characters of overlap between chunks. Must be non-negative @@ -173,24 +181,34 @@ def __init__( super().__init__() self.split_by = split_by - assert split_by in SEPARATORS, f"Invalid options for split_by. You must select from {list(SEPARATORS.keys())}." - - assert chunk_overlap < chunk_size, f"chunk_overlap can't be larger than or equal to chunk_size. Received chunk_size: {chunk_size}, chunk_overlap: {chunk_overlap}" - - assert chunk_size > 0, f"chunk_size must be greater than 0. Received value: {chunk_size}" + assert ( + split_by in SEPARATORS + ), f"Invalid options for split_by. You must select from {list(SEPARATORS.keys())}." + + assert ( + chunk_overlap < chunk_size + ), f"chunk_overlap can't be larger than or equal to chunk_size. Received chunk_size: {chunk_size}, chunk_overlap: {chunk_overlap}" + + assert ( + chunk_size > 0 + ), f"chunk_size must be greater than 0. Received value: {chunk_size}" self.chunk_size = chunk_size - assert chunk_overlap >= 0, f"chunk_overlap must be non-negative. Received value: {chunk_overlap}" + assert ( + chunk_overlap >= 0 + ), f"chunk_overlap must be non-negative. Received value: {chunk_overlap}" self.chunk_overlap = chunk_overlap self.batch_size = batch_size - - log.info(f"Initialized TextSplitter with split_by={self.split_by}, chunk_size={self.chunk_size}, chunk_overlap={self.chunk_overlap}, batch_size={self.batch_size}") + + log.info( + f"Initialized TextSplitter with split_by={self.split_by}, chunk_size={self.chunk_size}, chunk_overlap={self.chunk_overlap}, batch_size={self.batch_size}" + ) def split_text(self, text: str) -> List[str]: """ Splits the provided text into chunks. - + Splits based on the specified split_by, chunk size, and chunk overlap settings. Args: @@ -199,24 +217,28 @@ def split_text(self, text: str) -> List[str]: Returns: List[str]: A list of text chunks. """ - log.info(f"Splitting text with split_by: {self.split_by}, chunk_size: {self.chunk_size}, chunk_overlap: {self.chunk_overlap}") + log.info( + f"Splitting text with split_by: {self.split_by}, chunk_size: {self.chunk_size}, chunk_overlap: {self.chunk_overlap}" + ) separator = SEPARATORS[self.split_by] splits = self._split_text_into_units(text, separator) log.info(f"Text split into {len(splits)} parts.") - chunks = self._merge_units_to_chunks(splits, self.chunk_size, self.chunk_overlap, separator) + chunks = self._merge_units_to_chunks( + splits, self.chunk_size, self.chunk_overlap, separator + ) log.info(f"Text merged into {len(chunks)} chunks.") return chunks def call(self, documents: DocumentSplitterInputType) -> DocumentSplitterOutputType: """ Process the splitting task on a list of documents in batch. - + Batch processes a list of documents, splitting each document's text according to the configured split_by, chunk size, and chunk overlap. Args: documents (List[Document]): A list of Document objects to process. - + Returns: List[Document]: A list of new Document objects, each containing a chunk of text from the original documents. @@ -224,20 +246,29 @@ def call(self, documents: DocumentSplitterInputType) -> DocumentSplitterOutputTy TypeError: If 'documents' is not a list or contains non-Document objects. ValueError: If any document's text is None. """ - - if not isinstance(documents, list) or any(not isinstance(doc, Document) for doc in documents): + + if not isinstance(documents, list) or any( + not isinstance(doc, Document) for doc in documents + ): log.error("Input should be a list of Documents.") raise TypeError("Input should be a list of Documents.") - + split_docs = [] # Using range and batch_size to create batches - for start_idx in tqdm(range(0, len(documents), self.batch_size), desc="Splitting Documents in Batches"): - batch_docs = documents[start_idx:start_idx + self.batch_size] - + for start_idx in tqdm( + range(0, len(documents), self.batch_size), + desc="Splitting Documents in Batches", + ): + batch_docs = documents[start_idx : start_idx + self.batch_size] + for doc in batch_docs: if not isinstance(doc, Document): - log.error(f"Each item in documents should be an instance of Document, but got {type(doc).__name__}.") - raise TypeError(f"Each item in documents should be an instance of Document, but got {type(doc).__name__}.") + log.error( + f"Each item in documents should be an instance of Document, but got {type(doc).__name__}." + ) + raise TypeError( + f"Each item in documents should be an instance of Document, but got {type(doc).__name__}." + ) if doc.text is None: log.error(f"Text should not be None. Doc id: {doc.id}") @@ -246,21 +277,24 @@ def call(self, documents: DocumentSplitterInputType) -> DocumentSplitterOutputTy text_splits = self.split_text(doc.text) meta_data = deepcopy(doc.meta_data) - split_docs.extend([ - Document( - text=txt, - meta_data=meta_data, - parent_doc_id=f"{doc.id}", - order=i, - vector=[], - ) - for i, txt in enumerate(text_splits) - ]) - log.info(f"Processed {len(documents)} documents into {len(split_docs)} split documents.") + split_docs.extend( + [ + Document( + text=txt, + meta_data=meta_data, + parent_doc_id=f"{doc.id}", + order=i, + vector=[], + ) + for i, txt in enumerate(text_splits) + ] + ) + log.info( + f"Processed {len(documents)} documents into {len(split_docs)} split documents." + ) return split_docs - - def _split_text_into_units( - self, text: str, separator: str) -> List[str]: + + def _split_text_into_units(self, text: str, separator: str) -> List[str]: """Split text based on the specified separator.""" if self.split_by == "token": splits = tokenizer.encode(text) @@ -268,7 +302,7 @@ def _split_text_into_units( splits = text.split(separator) log.info(f"Text split by '{separator}' into {len(splits)} parts.") return splits - + def _merge_units_to_chunks( self, splits: List[str], chunk_size: int, chunk_overlap: int, separator: str ) -> List[str]: @@ -276,42 +310,44 @@ def _merge_units_to_chunks( Merge split text chunks based on the specified chunk size and overlap. """ chunks = [] - # we use a window to get the text for each trunk, the window size is chunk_size, step is chunk_size - chunk_overlap + # we use a window to get the text for each trunk, the window size is chunk_size, step is chunk_size - chunk_overlap step = chunk_size - chunk_overlap idx = 0 - + for idx in range(0, len(splits), step): # 1. if the window exceeds the list of splitted string, break and process the last chunk # 2. if the window ends exactly the same with the splits, then break and treat the splits[idx:len(splits)] as the last chunk - if idx+chunk_size >= len(splits): + if idx + chunk_size >= len(splits): break - current_splits = splits[idx:idx+chunk_size] + current_splits = splits[idx : idx + chunk_size] # add the separator between each unit and merge the string # this won't be the last chunk, so we need to add the separator at the end if self.split_by == "token": - chunk = current_splits # if token, then keep the original form + chunk = current_splits # if token, then keep the original form else: chunk = separator.join(current_splits) + separator chunks.append(chunk) - + if idx < len(splits): if self.split_by == "token": last_chunk = splits[idx:] # if token, then keep the original form else: - last_chunk = separator.join(splits[idx:]) # if not token, then join into string + last_chunk = separator.join( + splits[idx:] + ) # if not token, then join into string if len(last_chunk) > 0: chunks.append(last_chunk) - - if self.split_by=="token": + + if self.split_by == "token": # decode each chunk here chunks = [tokenizer.decode(chunk) for chunk in chunks] - + log.info(f"Merged into {len(chunks)} chunks.") return chunks - + def _extra_repr(self) -> str: s = f"split_by={self.split_by}, chunk_size={self.chunk_size}, chunk_overlap={self.chunk_overlap}" return s - - -# test the execution llamaindex and langchain \ No newline at end of file + + +# test the execution llamaindex and langchain diff --git a/lightrag/components/memory/__init__.py b/lightrag/lightrag/components/memory/__init__.py similarity index 100% rename from lightrag/components/memory/__init__.py rename to lightrag/lightrag/components/memory/__init__.py diff --git a/lightrag/components/memory/memory.py b/lightrag/lightrag/components/memory/memory.py similarity index 100% rename from lightrag/components/memory/memory.py rename to lightrag/lightrag/components/memory/memory.py diff --git a/lightrag/components/model_client/__init__.py b/lightrag/lightrag/components/model_client/__init__.py similarity index 100% rename from lightrag/components/model_client/__init__.py rename to lightrag/lightrag/components/model_client/__init__.py diff --git a/lightrag/components/model_client/anthropic_client.py b/lightrag/lightrag/components/model_client/anthropic_client.py similarity index 100% rename from lightrag/components/model_client/anthropic_client.py rename to lightrag/lightrag/components/model_client/anthropic_client.py diff --git a/lightrag/components/model_client/cohere_client.py b/lightrag/lightrag/components/model_client/cohere_client.py similarity index 100% rename from lightrag/components/model_client/cohere_client.py rename to lightrag/lightrag/components/model_client/cohere_client.py diff --git a/lightrag/components/model_client/google_client.py b/lightrag/lightrag/components/model_client/google_client.py similarity index 98% rename from lightrag/components/model_client/google_client.py rename to lightrag/lightrag/components/model_client/google_client.py index 2c53fba93..ccc890654 100644 --- a/lightrag/components/model_client/google_client.py +++ b/lightrag/lightrag/components/model_client/google_client.py @@ -11,6 +11,7 @@ BadRequest, GoogleAPICallError, ) +from google.generativeai.types import GenerateContentResponse from lightrag.core.model_client import ModelClient diff --git a/lightrag/components/model_client/groq_client.py b/lightrag/lightrag/components/model_client/groq_client.py similarity index 100% rename from lightrag/components/model_client/groq_client.py rename to lightrag/lightrag/components/model_client/groq_client.py diff --git a/lightrag/lightrag/components/model_client/openai_client.py b/lightrag/lightrag/components/model_client/openai_client.py new file mode 100644 index 000000000..e9d6e76f2 --- /dev/null +++ b/lightrag/lightrag/components/model_client/openai_client.py @@ -0,0 +1,233 @@ +"""OpenAI ModelClient integration.""" + +import os +from typing import Dict, Sequence, Optional, List, Any, TypeVar, Callable + +import logging + +from openai import OpenAI, AsyncOpenAI +from openai import ( + APITimeoutError, + InternalServerError, + RateLimitError, + UnprocessableEntityError, + BadRequestError, +) + +from lightrag.utils.lazy_import import safe_import, OptionalPackages + + +from lightrag.core.model_client import ModelClient +from lightrag.core.types import ModelType, EmbedderOutput, TokenLogProb +from lightrag.components.model_client.utils import parse_embedding_response + + +safe_import(OptionalPackages.OPENAI.value[0], OptionalPackages.OPENAI.value[1]) + + +from openai.types import Completion, CreateEmbeddingResponse + + +import backoff + +log = logging.getLogger(__name__) +T = TypeVar("T") + + +# completion parsing functions and you can combine them into one singple chat completion parser +def get_first_message_content(completion: Completion) -> str: + r"""When we only need the content of the first message. + It is the default parser for chat completion.""" + return completion.choices[0].message.content + + +def get_all_messages_content(completion: Completion) -> List[str]: + r"""When the n > 1, get all the messages content.""" + return [c.message.content for c in completion.choices] + + +def get_probabilities(completion: Completion) -> List[List[TokenLogProb]]: + r"""Get the probabilities of each token in the completion.""" + log_probs = [] + for c in completion.choices: + content = c.logprobs.content + print(content) + log_probs_for_choice = [] + for openai_token_logprob in content: + token = openai_token_logprob.token + logprob = openai_token_logprob.logprob + log_probs_for_choice.append(TokenLogProb(token=token, logprob=logprob)) + log_probs.append(log_probs_for_choice) + return log_probs + + +class OpenAIClient(ModelClient): + __doc__ = r"""A component wrapper for the OpenAI API client. + + Support both embedding and chat completion API. + + Users (1) simplify use ``Embedder`` and ``Generator`` components by passing OpenAIClient() as the model_client. + (2) can use this as an example to create their own API client or extend this class(copying and modifing the code) in their own project. + + Note: + We suggest users not to use `response_format` to enforce output data type or `tools` and `tool_choice` in your model_kwargs when calling the API. + We do not know how OpenAI is doing the formating or what prompt they have added. + Instead + - use :ref:`OutputParser` for response parsing and formating. + + Args: + api_key (Optional[str], optional): OpenAI API key. Defaults to None. + chat_completion_parser (Callable[[Completion], Any], optional): A function to parse the chat completion to a str. Defaults to None. + Default is `get_first_message_content`. + + References: + - Embeddings models: https://platform.openai.com/docs/guides/embeddings + - Chat models: https://platform.openai.com/docs/guides/text-generation + - OpenAI docs: https://platform.openai.com/docs/introduction + """ + + def __init__( + self, + api_key: Optional[str] = None, + chat_completion_parser: Callable[[Completion], Any] = None, + ): + r"""It is recommended to set the OPENAI_API_KEY environment variable instead of passing it as an argument. + + Args: + api_key (Optional[str], optional): OpenAI API key. Defaults to None. + """ + super().__init__() + self._api_key = api_key + self.sync_client = self.init_sync_client() + self.async_client = None # only initialize if the async call is called + self.chat_completion_parser = ( + chat_completion_parser or get_first_message_content + ) + + def init_sync_client(self): + api_key = self._api_key or os.getenv("OPENAI_API_KEY") + if not api_key: + raise ValueError("Environment variable OPENAI_API_KEY must be set") + return OpenAI(api_key=api_key) + + def init_async_client(self): + api_key = self._api_key or os.getenv("OPENAI_API_KEY") + if not api_key: + raise ValueError("Environment variable OPENAI_API_KEY must be set") + return AsyncOpenAI(api_key=api_key) + + def parse_chat_completion(self, completion: Completion) -> Any: + """Parse the completion to a str.""" + log.debug(f"completion: {completion}") + return self.chat_completion_parser(completion) + # return completion.choices[0].message.content + + def parse_embedding_response( + self, response: CreateEmbeddingResponse + ) -> EmbedderOutput: + r"""Parse the embedding response to a structure LightRAG components can understand. + + Should be called in ``Embedder``. + """ + try: + return parse_embedding_response(response) + except Exception as e: + log.error(f"Error parsing the embedding response: {e}") + return EmbedderOutput(data=[], error=str(e), raw_response=response) + + def convert_inputs_to_api_kwargs( + self, + input: Optional[Any] = None, + model_kwargs: Dict = {}, + model_type: ModelType = ModelType.UNDEFINED, + ) -> Dict: + r""" + Specify the API input type and output api_kwargs that will be used in _call and _acall methods. + Convert the Component's standard input, and system_input(chat model) and model_kwargs into API-specific format + """ + final_model_kwargs = model_kwargs.copy() + if model_type == ModelType.EMBEDDER: + if isinstance(input, str): + input = [input] + # convert input to input + assert isinstance(input, Sequence), "input must be a sequence of text" + final_model_kwargs["input"] = input + elif model_type == ModelType.LLM: + # convert input to messages + messages: List[Dict[str, str]] = [] + if input is not None and input != "": + messages.append({"role": "system", "content": input}) + assert isinstance( + messages, Sequence + ), "input must be a sequence of messages" + final_model_kwargs["messages"] = messages + else: + raise ValueError(f"model_type {model_type} is not supported") + return final_model_kwargs + + @backoff.on_exception( + backoff.expo, + ( + APITimeoutError, + InternalServerError, + RateLimitError, + UnprocessableEntityError, + BadRequestError, + ), + max_time=5, + ) + def call(self, api_kwargs: Dict = {}, model_type: ModelType = ModelType.UNDEFINED): + """ + kwargs is the combined input and model_kwargs + """ + log.info(f"api_kwargs: {api_kwargs}") + if model_type == ModelType.EMBEDDER: + return self.sync_client.embeddings.create(**api_kwargs) + elif model_type == ModelType.LLM: + return self.sync_client.chat.completions.create(**api_kwargs) + else: + raise ValueError(f"model_type {model_type} is not supported") + + @backoff.on_exception( + backoff.expo, + ( + APITimeoutError, + InternalServerError, + RateLimitError, + UnprocessableEntityError, + BadRequestError, + ), + max_time=5, + ) + async def acall( + self, api_kwargs: Dict = {}, model_type: ModelType = ModelType.UNDEFINED + ): + """ + kwargs is the combined input and model_kwargs + """ + if self.async_client is None: + self.async_client = self.init_async_client() + if model_type == ModelType.EMBEDDER: + return await self.async_client.embeddings.create(**api_kwargs) + elif model_type == ModelType.LLM: + return await self.async_client.chat.completions.create(**api_kwargs) + else: + raise ValueError(f"model_type {model_type} is not supported") + + @classmethod + def from_dict(cls: type[T], data: Dict[str, Any]) -> T: + obj = super().from_dict(data) + # recreate the existing clients + obj.sync_client = obj.init_sync_client() + obj.async_client = obj.init_async_client() + return obj + + def to_dict(self) -> Dict[str, Any]: + r"""Convert the component to a dictionary.""" + # TODO: not exclude but save yes or no for recreating the clients + exclude = [ + "sync_client", + "async_client", + ] # unserializable object + output = super().to_dict(exclude=exclude) + return output diff --git a/lightrag/components/model_client/transformers_client.py b/lightrag/lightrag/components/model_client/transformers_client.py similarity index 89% rename from lightrag/components/model_client/transformers_client.py rename to lightrag/lightrag/components/model_client/transformers_client.py index a40e651e6..1d9f72d22 100644 --- a/lightrag/components/model_client/transformers_client.py +++ b/lightrag/lightrag/components/model_client/transformers_client.py @@ -13,7 +13,7 @@ AutoTokenizer, AutoModel, AutoModelForSequenceClassification, - AutoModelForCausalLM + AutoModelForCausalLM, ) from lightrag.core.model_client import ModelClient @@ -223,6 +223,7 @@ def __call__(self, **kwargs): else: raise ValueError(f"model {model_name} is not supported") + class TransformerLLM: models: Dict[str, type] = {} @@ -231,56 +232,84 @@ def __init__(self, model_name: Optional[str] = "HuggingFaceH4/zephyr-7b-beta"): if model_name is not None: self.init_model(model_name=model_name) - + def init_model(self, model_name: str): try: self.tokenizer = AutoTokenizer.from_pretrained(model_name) self.model = AutoModelForCausalLM.from_pretrained(model_name) # register the model self.models[model_name] = self.model - self.device = 'cuda' if torch.cuda.is_available() else 'cpu' + self.device = "cuda" if torch.cuda.is_available() else "cpu" log.info(f"Done loading model {model_name}") # Set pad token if it's not already set if self.tokenizer.pad_token is None: self.tokenizer.pad_token = self.tokenizer.eos_token # common fallback - self.model.config.pad_token_id = self.tokenizer.eos_token_id # ensure consistency in the model config + self.model.config.pad_token_id = ( + self.tokenizer.eos_token_id + ) # ensure consistency in the model config except Exception as e: log.error(f"Error loading model {model_name}: {e}") raise e - + def parse_chat_completion(self, input_text: str, response: str): - parsed_response = response.replace(input_text, "").strip() # Safely handle cases where input_text might not be in response - + parsed_response = response.replace( + input_text, "" + ).strip() # Safely handle cases where input_text might not be in response + return parsed_response if parsed_response else response - - def call(self, input_text: str, skip_special_tokens: bool = True, clean_up_tokenization_spaces: bool = False, max_length: int = 150): + + def call( + self, + input_text: str, + skip_special_tokens: bool = True, + clean_up_tokenization_spaces: bool = False, + max_length: int = 150, + ): if not self.model: log.error("Model is not initialized.") raise ValueError("Model is not initialized.") - + # Ensure tokenizer has pad token; set it if not if self.tokenizer.pad_token is None: self.tokenizer.pad_token = self.tokenizer.eos_token - self.model.config.pad_token_id = self.tokenizer.eos_token_id # Sync model config pad token id + self.model.config.pad_token_id = ( + self.tokenizer.eos_token_id + ) # Sync model config pad token id # Process inputs with attention mask and padding - inputs = self.tokenizer(input_text, return_tensors="pt", padding=True).to(self.device) + inputs = self.tokenizer(input_text, return_tensors="pt", padding=True).to( + self.device + ) # inputs = self.tokenizer(input_text, return_tensors="pt", padding="longest", truncation=True).to(self.device) with torch.no_grad(): # Ensures no gradients are calculated to save memory and computations generate_ids = self.model.generate( - inputs['input_ids'], - attention_mask=inputs['attention_mask'], - max_length=max_length # Control the output length more precisely + inputs["input_ids"], + attention_mask=inputs["attention_mask"], + max_length=max_length, # Control the output length more precisely + ) + response = self.tokenizer.decode( + generate_ids[0], + skip_special_tokens=skip_special_tokens, + clean_up_tokenization_spaces=clean_up_tokenization_spaces, ) - response = self.tokenizer.decode(generate_ids[0], skip_special_tokens=skip_special_tokens, clean_up_tokenization_spaces=clean_up_tokenization_spaces) parsed_response = self.parse_chat_completion(input_text, response) return parsed_response - def __call__(self, input_text: str, skip_special_tokens: bool = True, clean_up_tokenization_spaces: bool = False, max_length: int = 150): - return self.call(input_text, skip_special_tokens=skip_special_tokens, clean_up_tokenization_spaces=clean_up_tokenization_spaces, max_length=max_length) - - + def __call__( + self, + input_text: str, + skip_special_tokens: bool = True, + clean_up_tokenization_spaces: bool = False, + max_length: int = 150, + ): + return self.call( + input_text, + skip_special_tokens=skip_special_tokens, + clean_up_tokenization_spaces=clean_up_tokenization_spaces, + max_length=max_length, + ) + # def call(self, input_text: str, skip_special_tokens: bool = True, clean_up_tokenization_spaces: bool = False): # if not self.model: # log.error("Model is not initialized.") @@ -293,8 +322,8 @@ def __call__(self, input_text: str, skip_special_tokens: bool = True, clean_up_t # def __call__(self, input_text: str, skip_special_tokens: bool = True, clean_up_tokenization_spaces: bool = False): # return self.call(input_text, skip_special_tokens=skip_special_tokens, clean_up_tokenization_spaces=clean_up_tokenization_spaces) - - + + class TransformersClient(ModelClient): __doc__ = r"""LightRAG API client for transformers. @@ -308,9 +337,7 @@ class TransformersClient(ModelClient): "BAAI/bge-reranker-base": { "type": ModelType.RERANKER, }, - "HuggingFaceH4/zephyr-7b-beta": { - "type": ModelType.LLM - } + "HuggingFaceH4/zephyr-7b-beta": {"type": ModelType.LLM}, } def __init__(self, model_name: Optional[str] = None) -> None: @@ -333,7 +360,7 @@ def init_sync_client(self): def init_reranker_client(self): return TransformerReranker() - + def init_llm_client(self): return TransformerLLM() @@ -369,7 +396,7 @@ def call(self, api_kwargs: Dict = {}, model_type: ModelType = ModelType.UNDEFINE scores, api_kwargs["top_k"] ) return top_k_indices, top_k_scores - elif ( # LLM + elif ( # LLM model_type == ModelType.LLM and "model" in api_kwargs and api_kwargs["model"] == "HuggingFaceH4/zephyr-7b-beta" @@ -400,4 +427,4 @@ def convert_inputs_to_api_kwargs( final_model_kwargs["input"] = input return final_model_kwargs else: - raise ValueError(f"model_type {model_type} is not supported") \ No newline at end of file + raise ValueError(f"model_type {model_type} is not supported") diff --git a/lightrag/components/model_client/utils.py b/lightrag/lightrag/components/model_client/utils.py similarity index 100% rename from lightrag/components/model_client/utils.py rename to lightrag/lightrag/components/model_client/utils.py diff --git a/lightrag/components/output_parsers/__init__.py b/lightrag/lightrag/components/output_parsers/__init__.py similarity index 100% rename from lightrag/components/output_parsers/__init__.py rename to lightrag/lightrag/components/output_parsers/__init__.py diff --git a/lightrag/components/output_parsers/outputs.py b/lightrag/lightrag/components/output_parsers/outputs.py similarity index 100% rename from lightrag/components/output_parsers/outputs.py rename to lightrag/lightrag/components/output_parsers/outputs.py diff --git a/lightrag/lightrag/components/reasoning/__init__.py b/lightrag/lightrag/components/reasoning/__init__.py new file mode 100644 index 000000000..f9340a778 --- /dev/null +++ b/lightrag/lightrag/components/reasoning/__init__.py @@ -0,0 +1 @@ +from .chain_of_thought import * # noqa: F401, F403 diff --git a/lightrag/lightrag/components/reasoning/chain_of_thought.py b/lightrag/lightrag/components/reasoning/chain_of_thought.py new file mode 100644 index 000000000..699975e21 --- /dev/null +++ b/lightrag/lightrag/components/reasoning/chain_of_thought.py @@ -0,0 +1,96 @@ +""" +https://arxiv.org/abs/2201.11903, published in Jan, 2023 + +Chain of the thought(CoT) is to mimic a step-by-step thought process for arriving at the answer. You can achieve it in two ways: +1. Add instructions such as "Let's think step-by-step to answer this question". +2. Add few-shot examples such as +' +Q: Roger has 5 tennis balls. He buys 2 more cans of tennis balls. Each can has 3 tennis balls. How many tennis balls does he have now? +A: Roger started with 5 balls. 2 cansof 3 tennis balls each is 6 tennis balls. 5 + 6 = 11. The answer is 11. +' + +NOTE: CoT can be helpful for more complicated task, it also varies from task to task and model to model. +For instance, CoT might already be supported in gpt3.5+ api calls. + +Benchmark it with and without CoT to see if it helps. +""" + +# from core.component import Component +# from core.generator import Generator +# from core.string_parser import JsonParser +# from core.model_client import ModelClient +# from core.default_prompt_template import DEFAULT_LIGHTRAG_SYSTEM_PROMPT + + +COT_TASK_DESC_STR_BASIC = ( + "You are a helpful assistant. Let's think step-by-step to answer user's query." +) +# Using triple quotes to include JSON-like structure more cleanly +COT_TASK_DESC_STR_WITH_JSON_OUTPUT = f""" +{COT_TASK_DESC_STR_BASIC} Output JSON format: {{"thought": "", "answer": ""}} +""" + + +# ChainOfThought will just be a generator with preset_prompt_kwargs of the task_desc_str = COT_TASK_DESC_STR +# additional you can ask it to generate a json with "thought" and "anwer" keys and use jsonParser + + +# class CoTGenerator(Generator): +# r""" +# CoTGenerator is a subclass of Generator with default task_desc_str preset for Chain of Thought. +# Output will be string. +# It is exactly the same as using a Generator. +# Example: +# ``` +# cot = CoTGenerator(model_client=model_client, model_kwargs={"model": model}) +# ``` +# """ + +# def __init__( +# self, +# *, +# model_client: ModelClient, +# model_kwargs: Dict = {}, +# template: Optional[str] = None, +# preset_prompt_kwargs: Optional[Dict] = None, +# output_processors: Optional[Component] = None, +# ) -> None: + +# super().__init__( +# model_client=model_client, +# model_kwargs=model_kwargs, +# template=template or DEFAULT_LIGHTRAG_SYSTEM_PROMPT, +# preset_prompt_kwargs=preset_prompt_kwargs +# or {"task_desc_str": COT_TASK_DESC_STR_BASIC}, +# output_processors=output_processors, +# ) + + +# class CoTGeneratorWithJsonOutput(Generator): +# r""" +# CoTGeneratorWithJsonOutput is a subclass of Generator with default task_desc_str preset for Chain of Thought. +# Output will be parsed as JSON with "thought" and "answer" keys. +# Example: +# ``` +# cot = CoTGeneratorWithJsonOutput(model_client=model_client, model_kwargs={"model": model}) +# ``` +# """ + +# def __init__( +# self, +# *, +# model_client: ModelClient, +# model_kwargs: Dict = {}, +# template: Optional[str] = None, +# preset_prompt_kwargs: Optional[Dict] = None, +# output_processors: Optional[Component] = None, +# ) -> None: + +# super().__init__( +# model_client=model_client, +# model_kwargs=model_kwargs, +# template=template or DEFAULT_LIGHTRAG_SYSTEM_PROMPT, +# preset_prompt_kwargs=preset_prompt_kwargs +# or {"task_desc_str": COT_TASK_DESC_STR_WITH_JSON_OUTPUT}, +# output_processors=output_processors or JsonParser(), +# ) diff --git a/lightrag/components/retriever/__init__.py b/lightrag/lightrag/components/retriever/__init__.py similarity index 100% rename from lightrag/components/retriever/__init__.py rename to lightrag/lightrag/components/retriever/__init__.py diff --git a/lightrag/components/retriever/bm25_retriever.py b/lightrag/lightrag/components/retriever/bm25_retriever.py similarity index 100% rename from lightrag/components/retriever/bm25_retriever.py rename to lightrag/lightrag/components/retriever/bm25_retriever.py diff --git a/lightrag/components/retriever/faiss_retriever.py b/lightrag/lightrag/components/retriever/faiss_retriever.py similarity index 100% rename from lightrag/components/retriever/faiss_retriever.py rename to lightrag/lightrag/components/retriever/faiss_retriever.py diff --git a/lightrag/components/retriever/llm_retriever.py b/lightrag/lightrag/components/retriever/llm_retriever.py similarity index 100% rename from lightrag/components/retriever/llm_retriever.py rename to lightrag/lightrag/components/retriever/llm_retriever.py diff --git a/lightrag/components/retriever/postgres_retriever.py b/lightrag/lightrag/components/retriever/postgres_retriever.py similarity index 100% rename from lightrag/components/retriever/postgres_retriever.py rename to lightrag/lightrag/components/retriever/postgres_retriever.py diff --git a/lightrag/components/retriever/reranker_retriever.py b/lightrag/lightrag/components/retriever/reranker_retriever.py similarity index 100% rename from lightrag/components/retriever/reranker_retriever.py rename to lightrag/lightrag/components/retriever/reranker_retriever.py diff --git a/lightrag/core/README.md b/lightrag/lightrag/core/README.md similarity index 100% rename from lightrag/core/README.md rename to lightrag/lightrag/core/README.md diff --git a/lightrag/core/__init__.py b/lightrag/lightrag/core/__init__.py similarity index 100% rename from lightrag/core/__init__.py rename to lightrag/lightrag/core/__init__.py diff --git a/lightrag/core/base_data_class.py b/lightrag/lightrag/core/base_data_class.py similarity index 100% rename from lightrag/core/base_data_class.py rename to lightrag/lightrag/core/base_data_class.py diff --git a/lightrag/core/component.py b/lightrag/lightrag/core/component.py similarity index 100% rename from lightrag/core/component.py rename to lightrag/lightrag/core/component.py diff --git a/lightrag/core/db.py b/lightrag/lightrag/core/db.py similarity index 100% rename from lightrag/core/db.py rename to lightrag/lightrag/core/db.py diff --git a/lightrag/core/default_prompt_template.py b/lightrag/lightrag/core/default_prompt_template.py similarity index 100% rename from lightrag/core/default_prompt_template.py rename to lightrag/lightrag/core/default_prompt_template.py diff --git a/lightrag/core/embedder.py b/lightrag/lightrag/core/embedder.py similarity index 100% rename from lightrag/core/embedder.py rename to lightrag/lightrag/core/embedder.py diff --git a/lightrag/core/func_tool.py b/lightrag/lightrag/core/func_tool.py similarity index 100% rename from lightrag/core/func_tool.py rename to lightrag/lightrag/core/func_tool.py diff --git a/lightrag/core/functional.py b/lightrag/lightrag/core/functional.py similarity index 100% rename from lightrag/core/functional.py rename to lightrag/lightrag/core/functional.py diff --git a/lightrag/core/generator.py b/lightrag/lightrag/core/generator.py similarity index 96% rename from lightrag/core/generator.py rename to lightrag/lightrag/core/generator.py index 732ff8359..ced813800 100644 --- a/lightrag/core/generator.py +++ b/lightrag/lightrag/core/generator.py @@ -2,7 +2,11 @@ from copy import deepcopy import logging -from lightrag.core.types import ModelType, GeneratorOutput +from lightrag.core.types import ( + ModelType, + GeneratorOutput, + GeneratorOutputType, +) from lightrag.core.component import Component from lightrag.core.parameter import Parameter from lightrag.core.prompt_builder import Prompt @@ -11,9 +15,6 @@ from lightrag.core.default_prompt_template import DEFAULT_LIGHTRAG_SYSTEM_PROMPT -GeneratorInputType = str -GeneratorOutputType = GeneratorOutput - log = logging.getLogger(__name__) @@ -31,8 +32,8 @@ class Generator(Component): model_client (ModelClient): The model client to use for the generator. model_kwargs (Dict[str, Any], optional): The model kwargs to pass to the model client. Defaults to {}. Please refer to :ref:`ModelClient` for the details on how to set the model_kwargs for your specific model if it is from our library. template (Optional[str], optional): The template for the prompt. Defaults to :ref:`DEFAULT_LIGHTRAG_SYSTEM_PROMPT`. - preset_prompt_kwargs (Optional[Dict], optional): The preset prompt kwargs to fill in the variables in the prompt. Defaults to None. - output_processors (Optional[Component], optional): The output processors after model call. Defaults to None. + prompt_kwargs (Optional[Dict], optional): The preset prompt kwargs to fill in the variables in the prompt. Defaults to None. + output_processors (Optional[Component], optional): The output processors after model call. It can be a single component or a chained component via ``Sequential``. Defaults to None. trainable_params (Optional[List[str]], optional): The list of trainable parameters. Defaults to []. Note: diff --git a/lightrag/core/model_client.py b/lightrag/lightrag/core/model_client.py similarity index 100% rename from lightrag/core/model_client.py rename to lightrag/lightrag/core/model_client.py diff --git a/lightrag/core/parameter.py b/lightrag/lightrag/core/parameter.py similarity index 100% rename from lightrag/core/parameter.py rename to lightrag/lightrag/core/parameter.py diff --git a/lightrag/core/prompt_builder.py b/lightrag/lightrag/core/prompt_builder.py similarity index 100% rename from lightrag/core/prompt_builder.py rename to lightrag/lightrag/core/prompt_builder.py diff --git a/lightrag/core/retriever.py b/lightrag/lightrag/core/retriever.py similarity index 100% rename from lightrag/core/retriever.py rename to lightrag/lightrag/core/retriever.py diff --git a/lightrag/core/string_parser.py b/lightrag/lightrag/core/string_parser.py similarity index 100% rename from lightrag/core/string_parser.py rename to lightrag/lightrag/core/string_parser.py diff --git a/lightrag/core/tokenizer.py b/lightrag/lightrag/core/tokenizer.py similarity index 100% rename from lightrag/core/tokenizer.py rename to lightrag/lightrag/core/tokenizer.py diff --git a/lightrag/core/tool_manager.py b/lightrag/lightrag/core/tool_manager.py similarity index 100% rename from lightrag/core/tool_manager.py rename to lightrag/lightrag/core/tool_manager.py diff --git a/lightrag/lightrag/core/types.py b/lightrag/lightrag/core/types.py new file mode 100644 index 000000000..8531b4126 --- /dev/null +++ b/lightrag/lightrag/core/types.py @@ -0,0 +1,738 @@ +"""Functional data classes to support functional components like Generator, Retriever, and Assistant.""" + +from enum import Enum, auto +from typing import ( + List, + Dict, + Any, + Optional, + Union, + Generic, + TypeVar, + Sequence, + Literal, + Callable, + Awaitable, +) +from collections import OrderedDict +from dataclasses import ( + dataclass, + field, + InitVar, +) +from uuid import UUID +from datetime import datetime +import uuid +import logging + +from lightrag.core.base_data_class import DataClass, required_field +from lightrag.core.tokenizer import Tokenizer +from lightrag.core.functional import ( + is_normalized, + generate_function_call_expression_from_callable, +) +from lightrag.components.model_client import ( + CohereAPIClient, + TransformersClient, + AnthropicAPIClient, + GroqAPIClient, + OpenAIClient, +) + + +logger = logging.getLogger(__name__) + +T_co = TypeVar("T_co", covariant=True) + + +####################################################################################### +# Data modeling for ModelClient +###################################################################################### +class ModelType(Enum): + EMBEDDER = auto() + LLM = auto() + RERANKER = auto() # ranking model + UNDEFINED = auto() + + +@dataclass +class ModelClientType: + COHERE = CohereAPIClient + TRANSFORMERS = TransformersClient + ANTHROPIC = AnthropicAPIClient + GROQ = GroqAPIClient + OPENAI = OpenAIClient + + +# TODO: define standard required outputs +def get_model_args(model_type: ModelType) -> List[str]: + r"""Get the required keys in model_kwargs for a specific model type. + + note: + If your model inference sdk uses different keys, you need to convert them to the standard keys here in their specifc ModelClient. + + Args: + model_type (ModelType): The model type + + Returns: + List[str]: The required keys in model_kwargs + """ + if model_type == ModelType.EMBEDDER: + return ["model"] + elif model_type == ModelType.LLM: + return ["model"] + elif model_type == ModelType.RERANKER: + return ["model", "top_k", "documents", "query"] + else: + return [] + + +####################################################################################### +# Data modeling for Embedder component +###################################################################################### +@dataclass +class Embedding: + """ + Container for a single embedding. + + In sync with api spec, same as openai/types/embedding.py + """ + + embedding: List[float] + index: Optional[int] # match with the index of the input, in case some are missing + + +@dataclass +class Usage: + """ + In sync with api spec, same as openai/types/create_embedding_response.py + """ + + prompt_tokens: int + total_tokens: int + + +@dataclass +class EmbedderOutput(DataClass): + __doc__ = r"""Container to hold the response from an Embedder model. Only Per-batch. + + Data standard for Embedder model output to interact with other components. + Batch processing is often available, thus we need a list of Embedding objects. + """ + + data: List[Embedding] = field( + default_factory=list, metadata={"desc": "List of embeddings"} + ) + model: Optional[str] = field(default=None, metadata={"desc": "Model name"}) + usage: Optional[Usage] = field(default=None, metadata={"desc": "Usage tracking"}) + error: Optional[str] = field(default=None, metadata={"desc": "Error message"}) + raw_response: Optional[Any] = field( + default=None, metadata={"desc": "Raw response"} + ) # only used if error + input: Optional[List[str]] = field(default=None, metadata={"desc": "Input text"}) + + @property + def length(self) -> int: + return len(self.data) if self.data and isinstance(self.data, Sequence) else 0 + + @property + def embedding_dim(self) -> int: + r"""The dimension of the embedding, assuming all embeddings have the same dimension. + + Returns: + int: The dimension of the embedding, -1 if no embedding is available + """ + return ( + len(self.data[0].embedding) if self.data and self.data[0].embedding else -1 + ) + + @property + def is_normalized(self) -> bool: + r"""Check if the embeddings are normalized to unit vectors. + + Returns: + bool: True if the embeddings are normalized, False otherwise + """ + return ( + is_normalized(self.data[0].embedding) + if self.data and self.data[0].embedding + else False + ) + + +EmbedderInputType = Union[str, Sequence[str]] +EmbedderOutputType = EmbedderOutput + +BatchEmbedderInputType = EmbedderInputType +BatchEmbedderOutputType = List[EmbedderOutputType] + + +####################################################################################### +# Data modeling for Generator component +###################################################################################### +@dataclass +class TokenLogProb: + r"""similar to openai.ChatCompletionTokenLogprob""" + + token: str + logprob: float + + +@dataclass +class GeneratorOutput(DataClass, Generic[T_co]): + __doc__ = r""" + The output data class for the Generator component. + We can not control its output 100%, so we use this to track the error_message and + allow the raw string output to be passed through. + + (1) When model predict and output processors are both without error, + we have data as the final output, error as None. + (2) When either model predict or output processors have error, + we have data as None, error as the error message. + + Raw_response will depends on the model predict. + """ + + data: T_co = field( + default=None, + metadata={"desc": "The final output data potentially after output parsers"}, + ) + error: Optional[str] = field( + default=None, + metadata={"desc": "Error message if any"}, + ) + usage: Optional[Usage] = field(default=None, metadata={"desc": "Usage tracking"}) + raw_response: Optional[str] = field( + default=None, metadata={"desc": "Raw string response from the model"} + ) # parsed from model client response + + +GeneratorInputType = str +GeneratorOutputType = GeneratorOutput[object] + +####################################################################################### +# Data modeling for Retriever component +###################################################################################### + +RetrieverQueryType = TypeVar("RetrieverQueryType", contravariant=True) +RetrieverStrQueryType = str +RetrieverQueriesType = Union[RetrieverQueryType, Sequence[RetrieverQueryType]] +RetrieverStrQueriesType = Union[str, Sequence[RetrieverStrQueryType]] + +RetrieverDocumentType = TypeVar("RetrieverDocumentType", contravariant=True) +RetrieverStrDocumentType = str # for text retrieval +RetrieverDocumentsType = Sequence[RetrieverDocumentType] + + +@dataclass +class RetrieverOutput(DataClass): + __doc__ = r"""Save the output of a single query in retrievers. + + It is up to the subclass of Retriever to specify the type of query and document. + """ + + doc_indices: List[int] = field(metadata={"desc": "List of document indices"}) + doc_scores: Optional[List[float]] = field( + default=None, metadata={"desc": "List of document scores"} + ) + query: Optional[RetrieverQueryType] = field( + default=None, metadata={"desc": "The query used to retrieve the documents"} + ) + documents: Optional[List[RetrieverDocumentType]] = field( + default=None, metadata={"desc": "List of retrieved documents"} + ) + + +RetrieverOutputType = List[RetrieverOutput] # so to support multiple queries at once + + +####################################################################################### +# Data modeling for function calls +###################################################################################### +AsyncCallable = Callable[..., Awaitable[Any]] + + +@dataclass +class FunctionDefinition(DataClass): + __doc__ = r"""The data modeling of a function definition, including the name, description, and parameters.""" + + func_name: str = field(metadata={"desc": "The name of the tool"}) + func_desc: Optional[str] = field( + default=None, metadata={"desc": "The description of the tool"} + ) + func_parameters: Dict[str, object] = field( + default_factory=dict, metadata={"desc": "The schema of the parameters"} + ) + + def fn_schema_str(self, type: Literal["json", "yaml"] = "json") -> str: + r"""Get the function definition str to be used in the prompt. + + You should also directly use :meth:`to_json` and :meth:`to_yaml` to get the schema in JSON or YAML format. + """ + if type == "json": + return self.to_json() + elif type == "yaml": + return self.to_yaml() + else: + raise ValueError(f"Unsupported type: {type}") + + +@dataclass +class Function(DataClass): + __doc__ = r"""The data modeling of a function call, including the name and keyword arguments. + + You can use the exclude in :meth:`to_json` and :meth:`to_yaml` to exclude the `thought` field if you do not want to use chain-of-thought pattern. + + Example: + + .. code-block:: python + + # assume the function is added in a context_map + # context_map = {"add": add} + + def add(a, b): + return a + b + + # call function add with arguments 1 and 2 + fun = Function(name="add", kwargs={"a": 1, "b": 2}) + # evaluate the function + result = context_map[fun.name](**fun.kwargs) + + # or call with positional arguments + fun = Function(name="add", args=[1, 2]) + result = context_map[fun.name](*fun.args) + """ + thought: Optional[str] = field( + default=None, metadata={"desc": "Why the function is called"} + ) + name: str = field(default="", metadata={"desc": "The name of the function"}) + args: Optional[List[object]] = field( + default_factory=list, + metadata={"desc": "The positional arguments of the function"}, + ) + kwargs: Optional[Dict[str, object]] = field( + default_factory=dict, + metadata={"desc": "The keyword arguments of the function"}, + ) + + +@dataclass +class FunctionExpression(DataClass): + __doc__ = r"""The data modeling of a function expression for a call, including the name and arguments. + + Example: + + .. code-block:: python + + def add(a, b): + return a + b + + # call function add with positional arguments 1 and 2 + fun_expr = FunctionExpression(action="add(1, 2)") + # evaluate the expression + result = eval(fun_expr.action) + print(result) + # Output: 3 + + # call function add with keyword arguments + fun_expr = FunctionExpression(action="add(a=1, b=2)") + result = eval(fun_expr.action) + print(result) + # Output: 3 + + Why asking LLM to generate function expression (code snippet) for a function call? + - It is more efficient/compact to call a function. + - It is more flexible. + (1) for the full range of Python expressions, including arithmetic operations, nested function calls, and more. + (2) allow to pass variables as arguments. + - Ease of parsing using ``ast`` module. + + The benefits are less failed function calls. + """ + thought: Optional[str] = field( + default=None, metadata={"desc": "Why the function is called"} + ) + action: str = field( + default_factory=required_field, + # metadata={"desc": "FuncName(, )"}, + metadata={ + "desc": """FuncName() \ + Valid function call expression. \ + Example: "FuncName(a=1, b=2)" \ + Follow the data type specified in the function parameters.\ + e.g. for Type object with x,y properties, use "ObjectType(x=1, y=2)""" + }, + ) + + @classmethod + def from_function( + cls, + func: Union[Callable[..., Any], AsyncCallable], + thought: Optional[str] = None, + *args, + **kwargs, + ) -> "FunctionExpression": + r"""Create a FunctionExpression object from a function. + + Args: + fun (Union[Callable[..., Any], AsyncCallable]): The function to be converted + + Returns: + FunctionExpression: The FunctionExpression object + + Usage: + 1. Create a FunctionExpression object from a function call: + 2. use :meth:`to_json` and :meth:`to_yaml` to get the schema in JSON or YAML format. + 3. This will be used as an example in prompt showing LLM how to call the function. + """ + try: + action = generate_function_call_expression_from_callable( + func, *args, **kwargs + ) + except Exception as e: + logger.error(f"Error generating function expression: {e}") + raise ValueError(f"Error generating function expression: {e}") + return cls(action=action, thought=thought) + + +# saves the output of a function tool. + + +@dataclass +class FunctionOutput(DataClass): + __doc__ = ( + r"""The output of a tool, which could be a function, a class, or a module.""" + ) + name: Optional[str] = field( + default=None, metadata={"desc": "The name of the function"} + ) + input: Optional[Union[Function, FunctionExpression]] = field( + default=None, metadata={"desc": "The Function or FunctionExpression object"} + ) + parsed_input: Optional[Function] = field( + default=None, + metadata={ + "desc": "The parsed Function object if the input is FunctionExpression" + }, + ) + output: Optional[object] = field( + default=None, metadata={"desc": "The output of the function execution"} + ) + error: Optional[str] = field( + default=None, metadata={"desc": "The error message if any"} + ) + + +####################################################################################### +# Data modeling for agent component +###################################################################################### +@dataclass +class StepOutput(DataClass): + __doc__ = r"""The output of a single step in the agent.""" + step: int = field( + default=0, metadata={"desc": "The order of the step in the agent"} + ) + thought: Optional[str] = field( + default="", metadata={"desc": "The thought of the agent in the step"} + ) + action: str = field( + default="", metadata={"desc": "The action of the agent in the step"} + ) + fun_name: Optional[str] = field( + default=None, metadata={"desc": "The function named parsed from action"} + ) + fun_args: Optional[List[Any]] = field( + default=None, + metadata={"desc": "The function positional arguments parsed from action"}, + ) + fun_kwargs: Optional[Dict[str, Any]] = field( + default=None, + metadata={"desc": "The function keyword arguments parsed from action"}, + ) + observation: Optional[str] = field( + default=None, metadata={"desc": "The result of the action"} + ) + + def __str__(self): + return f"Thought {self.step}: {self.thought}\nAction {self.step}: {self.action}\nObservation {self.step}: {self.observation}" + + +####################################################################################### +# Data modeling for data processing pipleline such as Text splitting and Embedding +###################################################################################### +@dataclass +class Document(DataClass): + __doc__ = r"""A text container with optional metadata and vector representation. + + It is the data structure to support functions like Retriever, DocumentSplitter, and used with LocalDB. + """ + + text: str = field(metadata={"desc": "The main text"}) + + meta_data: Optional[Dict[str, Any]] = field( + default=None, metadata={"desc": "Metadata for the document"} + ) + # can save data for filtering at retrieval time too + vector: List[float] = field( + default_factory=list, + metadata={"desc": "The vector representation of the document"}, + ) + # the vector representation of the document + + id: Optional[str] = field( + default_factory=lambda: str(uuid.uuid4()), metadata={"desc": "Unique id"} + ) # unique id of the document + order: Optional[int] = field( + default=None, + metadata={"desc": "Order of the chunked document in the original document"}, + ) + + score: Optional[float] = field( + default=None, + metadata={"desc": "Score of the document, likely used in retrieval output"}, + ) + parent_doc_id: Optional[Union[str, UUID]] = field( + default=None, metadata={"desc": "id of the Document where the chunk is from"} + ) + + estimated_num_tokens: Optional[int] = field( + default=None, + metadata={ + "desc": "Estimated number of tokens in the text, useful for cost estimation" + }, + ) + + def __post_init__(self): + if self.estimated_num_tokens is None and self.text: + tokenizer = Tokenizer() + self.estimated_num_tokens = tokenizer.count_tokens(self.text) + + @classmethod + def from_dict(cls, doc: Dict): + """Create a Document object from a dictionary. + + Example: + + .. code-block :: python + + doc = Document.from_dict({ + "id": "123", + "text": "Hello world", + "meta_data": {"title": "Greeting"} + }) + """ + + doc = doc.copy() + assert "meta_data" in doc, "meta_data is required" + assert "text" in doc, "text is required" + if "estimated_num_tokens" not in doc: + tokenizer = Tokenizer() + doc["estimated_num_tokens"] = tokenizer.count_tokens(doc["text"]) + if "id" not in doc or not doc["id"]: + doc["id"] = uuid.uuid4() + + return super().from_dict(doc) + + def __repr__(self): + """Custom repr method to truncate the text to 100 characters and vector to 10 floats.""" + max_chars_to_show = 100 + truncated_text = ( + self.text[:max_chars_to_show] + "..." + if len(self.text) > max_chars_to_show + else self.text + ) + truncated_vector = ( + f"len: {len(self.vector)}" if len(self.vector) else self.vector + ) + return ( + f"Document(id={self.id}, text={truncated_text!r}, meta_data={self.meta_data}, " + f"vector={truncated_vector!r}, parent_doc_id={self.parent_doc_id}, order={self.order}, " + f"score={self.score})" + ) + + +####################################################################################### +# Data modeling for dialog system +###################################################################################### +@dataclass +class UserQuery: + query_str: str + metadata: Optional[Dict[str, Any]] = None + + +@dataclass +class AssistantResponse: + response_str: str + metadata: Optional[Dict[str, Any]] = None + + +# There could more other roles in a multi-party conversation. We might consider in the future. +@dataclass +class DialogTurn(DataClass): + __doc__ = r"""A turn consists of a user query and the assistant response. + + The dataformat is designed to fit into a relational database, where each turn is a row. + Use `session_id` to group the turns into a dialog session with the `order` field and + `user_query_timestamp` and `assistant_response_timestamp` to order the turns. + + Args: + + id (str): The unique id of the turn. + user_id (str, optional): The unique id of the user. + session_id (str, optional): The unique id of the dialog session. + order (int, optional): The order of the turn in the dialog session, starts from 0. + user_query (UserQuery, optional): The user query in the turn. + assistant_response (AssistantResponse, optional): The assistant response in the turn. + user_query_timestamp (datetime, optional): The timestamp of the user query. + assistant_response_timestamp (datetime, optional): The timestamp of the assistant response. + metadata (Dict[str, Any], optional): Additional metadata. + + Examples: + + - User: Hi, how are you? + - Assistant: I'm fine, thank you! + DialogTurn(id=uuid4(), user_query=UserQuery("Hi, how are you?"), assistant_response=AssistantResponse("I'm fine, thank you!")) + """ + + id: str = field( + default_factory=lambda: str(uuid.uuid4()), + metadata={"desc": "The unique id of the turn"}, + ) + user_id: Optional[str] = field( + default=None, metadata={"desc": "The unique id of the user"} + ) + conversation_id: Optional[str] = field( + default=None, + metadata={"desc": "The unique id of the conversation it belongs to"}, + ) + order: Optional[int] = field( + default=None, + metadata={"desc": "The order of the turn in the Dialog Session, starts from 0"}, + ) + + user_query: Optional[UserQuery] = field( + default=None, metadata={"desc": "The user query in the turn"} + ) + assistant_response: Optional[AssistantResponse] = field( + default=None, metadata={"desc": "The assistant response in the turn"} + ) + user_query_timestamp: Optional[datetime] = field( + default_factory=datetime.now, + metadata={"desc": "The timestamp of the user query"}, + ) + assistant_response_timestamp: Optional[datetime] = field( + default_factory=datetime.now, + metadata={"desc": "The timestamp of the assistant response"}, + ) + metadata: Optional[Dict[str, Any]] = field( + default=None, metadata={"desc": "Additional metadata"} + ) + vector: Optional[List[float]] = field( + default=None, + metadata={"desc": "The vector representation of the dialog turn"}, + ) + + def set_user_query( + self, user_query: UserQuery, user_query_timestamp: Optional[datetime] = None + ): + self.user_query = user_query + if not user_query_timestamp: + user_query_timestamp = datetime.now() + self.user_query_timestamp = user_query_timestamp + + def set_assistant_response( + self, + assistant_response: AssistantResponse, + assistant_response_timestamp: Optional[datetime] = None, + ): + self.assistant_response = assistant_response + if not assistant_response_timestamp: + assistant_response_timestamp = datetime.now() + self.assistant_response_timestamp = assistant_response_timestamp + + +# TODO: This part and the Memory class is still WIP, and will need more work in the future. +@dataclass +class Conversation: + __doc__ = r"""A conversation manages the dialog turns in a whole conversation as a session. + + This class is mainly used in-memory for the dialog system/app to manage active conversations. + You won't need this class for past conversations which have already been persisted in a database as a form of + record or history. + """ + + id: str = field( + default_factory=lambda: str(uuid.uuid4()), + metadata={"desc": "The id of the conversation"}, + ) # the id of the conversation + name: Optional[str] = field( + default=None, metadata={"desc": "The name of the conversation"} + ) + user_id: Optional[str] = field( + default=None, metadata={"desc": "The id of the user"} + ) + dialog_turns: OrderedDict[int, DialogTurn] = field( + default_factory=OrderedDict, metadata={"desc": "The dialog turns"} + ) + # int is the order of the turn, starts from 0 + metadata: Optional[Dict[str, Any]] = field( + default=None, metadata={"desc": "Additional metadata"} + ) + + created_at: Optional[datetime] = field( + default_factory=datetime.now, + metadata={"desc": "The timestamp of the conversation creation"}, + ) + + # InitVar type annotation is used for parameters that are used in __post_init__ + # but not meant to be fields in the dataclass. + dialog_turns_input: InitVar[ + Optional[Union[OrderedDict[int, DialogTurn], List[DialogTurn]]] + ] = None + + def __post_init__( + self, + dialog_turns_input: Optional[ + Union[OrderedDict[int, DialogTurn], List[DialogTurn]] + ] = None, + ): + if dialog_turns_input: + if isinstance(dialog_turns_input, list): + # Assume the list is of DialogTurn objects and needs to be added to an OrderedDict + for order, dialog_turn in enumerate(dialog_turns_input): + self.append_dialog_turn(dialog_turn) + elif isinstance(dialog_turns_input, OrderedDict): + self.dialog_turns = dialog_turns_input + else: + raise ValueError( + "dialog_turns should be a list of DialogTurn or an OrderedDict" + ) + + def get_next_order(self): + return len(self.dialog_turns) + + def append_dialog_turn(self, dialog_turn: DialogTurn): + next_order = self.get_next_order() + if dialog_turn.order is None: + dialog_turn.order = next_order + else: + assert dialog_turn.order == next_order, f"order should be {next_order}" + self.dialog_turns[next_order] = dialog_turn + + def get_dialog_turns(self) -> OrderedDict[int, DialogTurn]: + return self.dialog_turns + + def get_chat_history_str(self) -> str: + chat_history_str = "" + for order, dialog_turn in self.dialog_turns.items(): + chat_history_str += f"User: {dialog_turn.user_query.query_str}\n" + chat_history_str += ( + f"Assistant: {dialog_turn.assistant_response.response_str}\n" + ) + return chat_history_str + + def delete_dialog_turn(self, order: int): + self.dialog_turns.pop(order) + + def update_dialog_turn(self, order: int, dialog_turn: DialogTurn): + self.dialog_turns[order] = dialog_turn diff --git a/lightrag/database/README.md b/lightrag/lightrag/database/README.md similarity index 100% rename from lightrag/database/README.md rename to lightrag/lightrag/database/README.md diff --git a/lightrag/database/__init__.py b/lightrag/lightrag/database/__init__.py similarity index 100% rename from lightrag/database/__init__.py rename to lightrag/lightrag/database/__init__.py diff --git a/lightrag/database/sqlalchemy/__init__.py b/lightrag/lightrag/database/sqlalchemy/__init__.py similarity index 100% rename from lightrag/database/sqlalchemy/__init__.py rename to lightrag/lightrag/database/sqlalchemy/__init__.py diff --git a/lightrag/database/sqlalchemy/base.py b/lightrag/lightrag/database/sqlalchemy/base.py similarity index 100% rename from lightrag/database/sqlalchemy/base.py rename to lightrag/lightrag/database/sqlalchemy/base.py diff --git a/lightrag/database/sqlalchemy/model.py b/lightrag/lightrag/database/sqlalchemy/model.py similarity index 100% rename from lightrag/database/sqlalchemy/model.py rename to lightrag/lightrag/database/sqlalchemy/model.py diff --git a/lightrag/database/sqlalchemy/pipeline/__init__.py b/lightrag/lightrag/database/sqlalchemy/pipeline/__init__.py similarity index 100% rename from lightrag/database/sqlalchemy/pipeline/__init__.py rename to lightrag/lightrag/database/sqlalchemy/pipeline/__init__.py diff --git a/lightrag/database/sqlalchemy/pipeline/create_tables.py b/lightrag/lightrag/database/sqlalchemy/pipeline/create_tables.py similarity index 100% rename from lightrag/database/sqlalchemy/pipeline/create_tables.py rename to lightrag/lightrag/database/sqlalchemy/pipeline/create_tables.py diff --git a/lightrag/database/sqlalchemy/pipeline/default_config.py b/lightrag/lightrag/database/sqlalchemy/pipeline/default_config.py similarity index 100% rename from lightrag/database/sqlalchemy/pipeline/default_config.py rename to lightrag/lightrag/database/sqlalchemy/pipeline/default_config.py diff --git a/lightrag/database/sqlalchemy/pipeline/inject_data.py b/lightrag/lightrag/database/sqlalchemy/pipeline/inject_data.py similarity index 100% rename from lightrag/database/sqlalchemy/pipeline/inject_data.py rename to lightrag/lightrag/database/sqlalchemy/pipeline/inject_data.py diff --git a/lightrag/database/sqlalchemy/sqlachemy_manager.py b/lightrag/lightrag/database/sqlalchemy/sqlachemy_manager.py similarity index 100% rename from lightrag/database/sqlalchemy/sqlachemy_manager.py rename to lightrag/lightrag/database/sqlalchemy/sqlachemy_manager.py diff --git a/lightrag/eval/__init__.py b/lightrag/lightrag/eval/__init__.py similarity index 100% rename from lightrag/eval/__init__.py rename to lightrag/lightrag/eval/__init__.py diff --git a/lightrag/eval/answer_match_acc.py b/lightrag/lightrag/eval/answer_match_acc.py similarity index 100% rename from lightrag/eval/answer_match_acc.py rename to lightrag/lightrag/eval/answer_match_acc.py diff --git a/lightrag/eval/llm_as_judge.py b/lightrag/lightrag/eval/llm_as_judge.py similarity index 97% rename from lightrag/eval/llm_as_judge.py rename to lightrag/lightrag/eval/llm_as_judge.py index 55646f666..19cd6c2b4 100644 --- a/lightrag/eval/llm_as_judge.py +++ b/lightrag/lightrag/eval/llm_as_judge.py @@ -43,7 +43,7 @@ class DefaultLLMJudge(Component): __doc__ = r"""Demonstrate how to use an LLM/Generator to output True or False for a judgement query. You can use any any of your template to adapt to more tasks and sometimes you can directly ask LLM to output a score in range [0, 1] instead of only True or False. - + A call on the LLM judge equalize to _compute_single_item method. Args: @@ -59,7 +59,7 @@ def __init__( super().__init__() self.model_client = model_client if model_client is None: - log.info(f"model_client is None, default to OpenAIClient.") + log.info("model_client is None, default to OpenAIClient.") try: from lightrag.components.model_client import OpenAIClient except ImportError: @@ -172,8 +172,6 @@ def compute( if __name__ == "__main__": - from lightrag.utils import setup_env - from lightrag.components.model_client import OpenAIClient questions = [ "Is Beijing in China?", diff --git a/lightrag/eval/retriever_recall.py b/lightrag/lightrag/eval/retriever_recall.py similarity index 100% rename from lightrag/eval/retriever_recall.py rename to lightrag/lightrag/eval/retriever_recall.py diff --git a/lightrag/eval/retriever_relevance.py b/lightrag/lightrag/eval/retriever_relevance.py similarity index 100% rename from lightrag/eval/retriever_relevance.py rename to lightrag/lightrag/eval/retriever_relevance.py diff --git a/lightrag/icl/README.md b/lightrag/lightrag/icl/README.md similarity index 68% rename from lightrag/icl/README.md rename to lightrag/lightrag/icl/README.md index 9e8133fb2..ed28807c2 100644 --- a/lightrag/icl/README.md +++ b/lightrag/lightrag/icl/README.md @@ -1,3 +1,3 @@ -ICL with few-shots or many-shots if you have a large-context LLM is a must when we bootstrap any ML tasks or to compare with model finetune performances. +ICL with few-shots or many-shots if you have a large-context LLM is a must when we bootstrap any ML tasks or to compare with model finetune performances. -When ICL is used for classical ML like classification, if we have the logits of tokens, we can use `constrainded decoding` to \ No newline at end of file +When ICL is used for classical ML like classification, if we have the logits of tokens, we can use `constrainded decoding` to diff --git a/lightrag/icl/__init__.py b/lightrag/lightrag/icl/__init__.py similarity index 100% rename from lightrag/icl/__init__.py rename to lightrag/lightrag/icl/__init__.py diff --git a/lightrag/icl/retrieval_icl.py b/lightrag/lightrag/icl/retrieval_icl.py similarity index 100% rename from lightrag/icl/retrieval_icl.py rename to lightrag/lightrag/icl/retrieval_icl.py diff --git a/lightrag/optim/__init__.py b/lightrag/lightrag/optim/__init__.py similarity index 60% rename from lightrag/optim/__init__.py rename to lightrag/lightrag/optim/__init__.py index 6c97a705f..76a50dafb 100644 --- a/lightrag/optim/__init__.py +++ b/lightrag/lightrag/optim/__init__.py @@ -1,6 +1,6 @@ -from .few_shot_optimizer import * -from .llm_optimizer import * -from .optimizer import * +from .few_shot_optimizer import BootstrapFewShot +from .llm_optimizer import LLMOptimizer +from .optimizer import Optimizer from .sampler import RandomSampler, ClassSampler, Sampler __all__ = [ diff --git a/lightrag/optim/few_shot_optimizer.py b/lightrag/lightrag/optim/few_shot_optimizer.py similarity index 97% rename from lightrag/optim/few_shot_optimizer.py rename to lightrag/lightrag/optim/few_shot_optimizer.py index 27737ea58..101b77204 100644 --- a/lightrag/optim/few_shot_optimizer.py +++ b/lightrag/lightrag/optim/few_shot_optimizer.py @@ -84,7 +84,7 @@ def random_replace( ): assert ( len(self.current) == self.num_shots - ), f"Ensure you have called init() first to setup the current examples before replacing a subset of them." + ), "Ensure you have called init() first to setup the current examples before replacing a subset of them." self.proposed = self.sampler.random_replace( shots, deepcopy(self.current), weights_per_class=weights_per_class ) diff --git a/lightrag/optim/llm_augment.py b/lightrag/lightrag/optim/llm_augment.py similarity index 100% rename from lightrag/optim/llm_augment.py rename to lightrag/lightrag/optim/llm_augment.py diff --git a/lightrag/optim/llm_optimizer.py b/lightrag/lightrag/optim/llm_optimizer.py similarity index 98% rename from lightrag/optim/llm_optimizer.py rename to lightrag/lightrag/optim/llm_optimizer.py index 857527e31..5a6150437 100644 --- a/lightrag/optim/llm_optimizer.py +++ b/lightrag/lightrag/optim/llm_optimizer.py @@ -4,7 +4,7 @@ """ from typing import Dict, Any, List, Optional -from dataclasses import dataclass, field +from dataclasses import field from copy import deepcopy from lightrag.core.base_data_class import DataClass @@ -30,8 +30,8 @@ Below are some of your previous instructions and their scores, the higher the score the better the instruction: {% for instruction in instructions %} -- {{loop.index}}. -- text: {{instruction.text}} +- {{loop.index}}. +- text: {{instruction.text}} - score: {{instruction.score}}) {% if instruction.responses is defined %} - responses: {{instruction.responses}} diff --git a/lightrag/optim/optimizer.py b/lightrag/lightrag/optim/optimizer.py similarity index 100% rename from lightrag/optim/optimizer.py rename to lightrag/lightrag/optim/optimizer.py diff --git a/lightrag/optim/sampler.py b/lightrag/lightrag/optim/sampler.py similarity index 100% rename from lightrag/optim/sampler.py rename to lightrag/lightrag/optim/sampler.py diff --git a/lightrag/tracing/__init__.py b/lightrag/lightrag/tracing/__init__.py similarity index 100% rename from lightrag/tracing/__init__.py rename to lightrag/lightrag/tracing/__init__.py diff --git a/lightrag/tracing/decorators.py b/lightrag/lightrag/tracing/decorators.py similarity index 97% rename from lightrag/tracing/decorators.py rename to lightrag/lightrag/tracing/decorators.py index 0311ef7bf..c360b3d7f 100644 --- a/lightrag/tracing/decorators.py +++ b/lightrag/lightrag/tracing/decorators.py @@ -15,13 +15,13 @@ def trace_generator_states( project_name: Optional[str] = None, filename: Optional[str] = None, ): - __doc__ = r"""Decorator to trace generators in a task component. + r"""Decorator to trace generators in a task component. It dynamically attaches a GeneratorLogger to the target generator attribute and logs the prompt states of the generator. You can use it on any component that has attributes pointing to a generator object. Args: - attributes (List[str], Optional): The list of attributes that point to the generator objects. + attributes (List[str], Optional): The list of attributes that point to the generator objects. If not provided, it will automatically detect the attributes that are instances of Generator. filepath (str, Optional): The path to the directory where the trace file will be saved. Default is "./traces/". filename (str, Optional): The name of the trace file. If not provided, it will be "{class_name}_generator_trace.json". @@ -101,7 +101,7 @@ def trace_generator_call( save_dir: Optional[str] = "./traces/", error_only: bool = True, ): - __doc__ = r"""Decorator to trace generator predictions in a task component, especially failed ones. + r"""Decorator to trace generator predictions in a task component, especially failed ones. This decorator is a wrapper around the generator call method. It logs the generator call by reading its GeneratorOutput and logs the call if the output is an error. @@ -123,7 +123,7 @@ def trace_generator_call( >>> ) >>> # now you will see ./traces/TestGenerator dir being created. >>> # If the generator call has an error, it will be logged in the error file generator_call.jsonl - + If you want to decorate a component(such as LLMRetriever) from the library where you do not have access to the source code, you can do it like this: .. code-block:: python diff --git a/lightrag/tracing/generator_call_logger.py b/lightrag/lightrag/tracing/generator_call_logger.py similarity index 100% rename from lightrag/tracing/generator_call_logger.py rename to lightrag/lightrag/tracing/generator_call_logger.py diff --git a/lightrag/tracing/generator_state_logger.py b/lightrag/lightrag/tracing/generator_state_logger.py similarity index 100% rename from lightrag/tracing/generator_state_logger.py rename to lightrag/lightrag/tracing/generator_state_logger.py diff --git a/lightrag/utils/__init__.py b/lightrag/lightrag/utils/__init__.py similarity index 94% rename from lightrag/utils/__init__.py rename to lightrag/lightrag/utils/__init__.py index e190e2e5a..64f2865cd 100644 --- a/lightrag/utils/__init__.py +++ b/lightrag/lightrag/utils/__init__.py @@ -18,6 +18,7 @@ from .registry import EntityMapping from .config import new_components_from_config, new_component from .lazy_import import LazyImport, OptionalPackages, safe_import +from .setup_env import setup_env __all__ = [ @@ -42,4 +43,5 @@ "append_to_jsonl", "write_list_to_jsonl", "safe_import", + "setup_env", ] diff --git a/lightrag/utils/config.py b/lightrag/lightrag/utils/config.py similarity index 100% rename from lightrag/utils/config.py rename to lightrag/lightrag/utils/config.py diff --git a/lightrag/utils/file_io.py b/lightrag/lightrag/utils/file_io.py similarity index 100% rename from lightrag/utils/file_io.py rename to lightrag/lightrag/utils/file_io.py diff --git a/lightrag/utils/lazy_import.py b/lightrag/lightrag/utils/lazy_import.py similarity index 100% rename from lightrag/utils/lazy_import.py rename to lightrag/lightrag/utils/lazy_import.py diff --git a/lightrag/utils/logger.py b/lightrag/lightrag/utils/logger.py similarity index 100% rename from lightrag/utils/logger.py rename to lightrag/lightrag/utils/logger.py diff --git a/lightrag/utils/registry.py b/lightrag/lightrag/utils/registry.py similarity index 96% rename from lightrag/utils/registry.py rename to lightrag/lightrag/utils/registry.py index 0b74f120b..a89db7133 100644 --- a/lightrag/utils/registry.py +++ b/lightrag/lightrag/utils/registry.py @@ -4,7 +4,7 @@ class EntityMapping: __doc__ = r"""A registry for entities, components,classes, function. - This can be used to configure classes, functions, or components in a registry. + This can be used to configure classes, functions, or components in a registry. """ _registry: Dict[str, Type] = {} diff --git a/lightrag/utils/serialization.py b/lightrag/lightrag/utils/serialization.py similarity index 100% rename from lightrag/utils/serialization.py rename to lightrag/lightrag/utils/serialization.py diff --git a/lightrag/lightrag/utils/setup_env.py b/lightrag/lightrag/utils/setup_env.py new file mode 100644 index 000000000..6255b0602 --- /dev/null +++ b/lightrag/lightrag/utils/setup_env.py @@ -0,0 +1,5 @@ +import dotenv + + +def setup_env(): + dotenv.load_dotenv(dotenv_path=".env", override=True) diff --git a/lightrag/poetry.lock b/lightrag/poetry.lock index 5c4b93945..9c1a98e2c 100644 --- a/lightrag/poetry.lock +++ b/lightrag/poetry.lock @@ -944,13 +944,13 @@ files = [ [[package]] name = "nvidia-nvjitlink-cu12" -version = "12.5.40" +version = "12.5.82" description = "Nvidia JIT LTO Library" optional = false python-versions = ">=3" files = [ - {file = "nvidia_nvjitlink_cu12-12.5.40-py3-none-manylinux2014_x86_64.whl", hash = "sha256:d9714f27c1d0f0895cd8915c07a87a1d0029a0aa36acaf9156952ec2a8a12189"}, - {file = "nvidia_nvjitlink_cu12-12.5.40-py3-none-win_amd64.whl", hash = "sha256:c3401dc8543b52d3a8158007a0c1ab4e9c768fcbd24153a48c86972102197ddd"}, + {file = "nvidia_nvjitlink_cu12-12.5.82-py3-none-manylinux2014_x86_64.whl", hash = "sha256:f9b37bc5c8cf7509665cb6ada5aaa0ce65618f2332b7d3e78e9790511f111212"}, + {file = "nvidia_nvjitlink_cu12-12.5.82-py3-none-win_amd64.whl", hash = "sha256:e782564d705ff0bf61ac3e1bf730166da66dd2fe9012f111ede5fc49b64ae697"}, ] [[package]] @@ -1049,109 +1049,121 @@ virtualenv = ">=20.10.0" [[package]] name = "pydantic" -version = "2.7.4" +version = "2.8.0" description = "Data validation using Python type hints" optional = false python-versions = ">=3.8" files = [ - {file = "pydantic-2.7.4-py3-none-any.whl", hash = "sha256:ee8538d41ccb9c0a9ad3e0e5f07bf15ed8015b481ced539a1759d8cc89ae90d0"}, - {file = "pydantic-2.7.4.tar.gz", hash = "sha256:0c84efd9548d545f63ac0060c1e4d39bb9b14db8b3c0652338aecc07b5adec52"}, + {file = "pydantic-2.8.0-py3-none-any.whl", hash = "sha256:ead4f3a1e92386a734ca1411cb25d94147cf8778ed5be6b56749047676d6364e"}, + {file = "pydantic-2.8.0.tar.gz", hash = "sha256:d970ffb9d030b710795878940bd0489842c638e7252fc4a19c3ae2f7da4d6141"}, ] [package.dependencies] annotated-types = ">=0.4.0" -pydantic-core = "2.18.4" -typing-extensions = ">=4.6.1" +pydantic-core = "2.20.0" +typing-extensions = [ + {version = ">=4.12.2", markers = "python_version >= \"3.13\""}, + {version = ">=4.6.1", markers = "python_version < \"3.13\""}, +] [package.extras] email = ["email-validator (>=2.0.0)"] [[package]] name = "pydantic-core" -version = "2.18.4" +version = "2.20.0" description = "Core functionality for Pydantic validation and serialization" optional = false python-versions = ">=3.8" files = [ - {file = "pydantic_core-2.18.4-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:f76d0ad001edd426b92233d45c746fd08f467d56100fd8f30e9ace4b005266e4"}, - {file = "pydantic_core-2.18.4-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:59ff3e89f4eaf14050c8022011862df275b552caef8082e37b542b066ce1ff26"}, - {file = "pydantic_core-2.18.4-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a55b5b16c839df1070bc113c1f7f94a0af4433fcfa1b41799ce7606e5c79ce0a"}, - {file = "pydantic_core-2.18.4-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:4d0dcc59664fcb8974b356fe0a18a672d6d7cf9f54746c05f43275fc48636851"}, - {file = "pydantic_core-2.18.4-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8951eee36c57cd128f779e641e21eb40bc5073eb28b2d23f33eb0ef14ffb3f5d"}, - {file = "pydantic_core-2.18.4-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4701b19f7e3a06ea655513f7938de6f108123bf7c86bbebb1196eb9bd35cf724"}, - {file = "pydantic_core-2.18.4-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e00a3f196329e08e43d99b79b286d60ce46bed10f2280d25a1718399457e06be"}, - {file = "pydantic_core-2.18.4-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:97736815b9cc893b2b7f663628e63f436018b75f44854c8027040e05230eeddb"}, - {file = "pydantic_core-2.18.4-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:6891a2ae0e8692679c07728819b6e2b822fb30ca7445f67bbf6509b25a96332c"}, - {file = "pydantic_core-2.18.4-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:bc4ff9805858bd54d1a20efff925ccd89c9d2e7cf4986144b30802bf78091c3e"}, - {file = "pydantic_core-2.18.4-cp310-none-win32.whl", hash = "sha256:1b4de2e51bbcb61fdebd0ab86ef28062704f62c82bbf4addc4e37fa4b00b7cbc"}, - {file = "pydantic_core-2.18.4-cp310-none-win_amd64.whl", hash = "sha256:6a750aec7bf431517a9fd78cb93c97b9b0c496090fee84a47a0d23668976b4b0"}, - {file = "pydantic_core-2.18.4-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:942ba11e7dfb66dc70f9ae66b33452f51ac7bb90676da39a7345e99ffb55402d"}, - {file = "pydantic_core-2.18.4-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:b2ebef0e0b4454320274f5e83a41844c63438fdc874ea40a8b5b4ecb7693f1c4"}, - {file = "pydantic_core-2.18.4-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a642295cd0c8df1b86fc3dced1d067874c353a188dc8e0f744626d49e9aa51c4"}, - {file = "pydantic_core-2.18.4-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:5f09baa656c904807e832cf9cce799c6460c450c4ad80803517032da0cd062e2"}, - {file = "pydantic_core-2.18.4-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:98906207f29bc2c459ff64fa007afd10a8c8ac080f7e4d5beff4c97086a3dabd"}, - {file = "pydantic_core-2.18.4-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:19894b95aacfa98e7cb093cd7881a0c76f55731efad31073db4521e2b6ff5b7d"}, - {file = "pydantic_core-2.18.4-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0fbbdc827fe5e42e4d196c746b890b3d72876bdbf160b0eafe9f0334525119c8"}, - {file = "pydantic_core-2.18.4-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:f85d05aa0918283cf29a30b547b4df2fbb56b45b135f9e35b6807cb28bc47951"}, - {file = "pydantic_core-2.18.4-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:e85637bc8fe81ddb73fda9e56bab24560bdddfa98aa64f87aaa4e4b6730c23d2"}, - {file = "pydantic_core-2.18.4-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:2f5966897e5461f818e136b8451d0551a2e77259eb0f73a837027b47dc95dab9"}, - {file = "pydantic_core-2.18.4-cp311-none-win32.whl", hash = "sha256:44c7486a4228413c317952e9d89598bcdfb06399735e49e0f8df643e1ccd0558"}, - {file = "pydantic_core-2.18.4-cp311-none-win_amd64.whl", hash = "sha256:8a7164fe2005d03c64fd3b85649891cd4953a8de53107940bf272500ba8a788b"}, - {file = "pydantic_core-2.18.4-cp311-none-win_arm64.whl", hash = "sha256:4e99bc050fe65c450344421017f98298a97cefc18c53bb2f7b3531eb39bc7805"}, - {file = "pydantic_core-2.18.4-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:6f5c4d41b2771c730ea1c34e458e781b18cc668d194958e0112455fff4e402b2"}, - {file = "pydantic_core-2.18.4-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:2fdf2156aa3d017fddf8aea5adfba9f777db1d6022d392b682d2a8329e087cef"}, - {file = "pydantic_core-2.18.4-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4748321b5078216070b151d5271ef3e7cc905ab170bbfd27d5c83ee3ec436695"}, - {file = "pydantic_core-2.18.4-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:847a35c4d58721c5dc3dba599878ebbdfd96784f3fb8bb2c356e123bdcd73f34"}, - {file = "pydantic_core-2.18.4-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3c40d4eaad41f78e3bbda31b89edc46a3f3dc6e171bf0ecf097ff7a0ffff7cb1"}, - {file = "pydantic_core-2.18.4-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:21a5e440dbe315ab9825fcd459b8814bb92b27c974cbc23c3e8baa2b76890077"}, - {file = "pydantic_core-2.18.4-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:01dd777215e2aa86dfd664daed5957704b769e726626393438f9c87690ce78c3"}, - {file = "pydantic_core-2.18.4-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:4b06beb3b3f1479d32befd1f3079cc47b34fa2da62457cdf6c963393340b56e9"}, - {file = "pydantic_core-2.18.4-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:564d7922e4b13a16b98772441879fcdcbe82ff50daa622d681dd682175ea918c"}, - {file = "pydantic_core-2.18.4-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:0eb2a4f660fcd8e2b1c90ad566db2b98d7f3f4717c64fe0a83e0adb39766d5b8"}, - {file = "pydantic_core-2.18.4-cp312-none-win32.whl", hash = "sha256:8b8bab4c97248095ae0c4455b5a1cd1cdd96e4e4769306ab19dda135ea4cdb07"}, - {file = "pydantic_core-2.18.4-cp312-none-win_amd64.whl", hash = "sha256:14601cdb733d741b8958224030e2bfe21a4a881fb3dd6fbb21f071cabd48fa0a"}, - {file = "pydantic_core-2.18.4-cp312-none-win_arm64.whl", hash = "sha256:c1322d7dd74713dcc157a2b7898a564ab091ca6c58302d5c7b4c07296e3fd00f"}, - {file = "pydantic_core-2.18.4-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:823be1deb01793da05ecb0484d6c9e20baebb39bd42b5d72636ae9cf8350dbd2"}, - {file = "pydantic_core-2.18.4-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:ebef0dd9bf9b812bf75bda96743f2a6c5734a02092ae7f721c048d156d5fabae"}, - {file = "pydantic_core-2.18.4-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ae1d6df168efb88d7d522664693607b80b4080be6750c913eefb77e34c12c71a"}, - {file = "pydantic_core-2.18.4-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:f9899c94762343f2cc2fc64c13e7cae4c3cc65cdfc87dd810a31654c9b7358cc"}, - {file = "pydantic_core-2.18.4-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:99457f184ad90235cfe8461c4d70ab7dd2680e28821c29eca00252ba90308c78"}, - {file = "pydantic_core-2.18.4-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:18f469a3d2a2fdafe99296a87e8a4c37748b5080a26b806a707f25a902c040a8"}, - {file = "pydantic_core-2.18.4-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b7cdf28938ac6b8b49ae5e92f2735056a7ba99c9b110a474473fd71185c1af5d"}, - {file = "pydantic_core-2.18.4-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:938cb21650855054dc54dfd9120a851c974f95450f00683399006aa6e8abb057"}, - {file = "pydantic_core-2.18.4-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:44cd83ab6a51da80fb5adbd9560e26018e2ac7826f9626bc06ca3dc074cd198b"}, - {file = "pydantic_core-2.18.4-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:972658f4a72d02b8abfa2581d92d59f59897d2e9f7e708fdabe922f9087773af"}, - {file = "pydantic_core-2.18.4-cp38-none-win32.whl", hash = "sha256:1d886dc848e60cb7666f771e406acae54ab279b9f1e4143babc9c2258213daa2"}, - {file = "pydantic_core-2.18.4-cp38-none-win_amd64.whl", hash = "sha256:bb4462bd43c2460774914b8525f79b00f8f407c945d50881568f294c1d9b4443"}, - {file = "pydantic_core-2.18.4-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:44a688331d4a4e2129140a8118479443bd6f1905231138971372fcde37e43528"}, - {file = "pydantic_core-2.18.4-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:a2fdd81edd64342c85ac7cf2753ccae0b79bf2dfa063785503cb85a7d3593223"}, - {file = "pydantic_core-2.18.4-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:86110d7e1907ab36691f80b33eb2da87d780f4739ae773e5fc83fb272f88825f"}, - {file = "pydantic_core-2.18.4-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:46387e38bd641b3ee5ce247563b60c5ca098da9c56c75c157a05eaa0933ed154"}, - {file = "pydantic_core-2.18.4-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:123c3cec203e3f5ac7b000bd82235f1a3eced8665b63d18be751f115588fea30"}, - {file = "pydantic_core-2.18.4-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:dc1803ac5c32ec324c5261c7209e8f8ce88e83254c4e1aebdc8b0a39f9ddb443"}, - {file = "pydantic_core-2.18.4-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:53db086f9f6ab2b4061958d9c276d1dbe3690e8dd727d6abf2321d6cce37fa94"}, - {file = "pydantic_core-2.18.4-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:abc267fa9837245cc28ea6929f19fa335f3dc330a35d2e45509b6566dc18be23"}, - {file = "pydantic_core-2.18.4-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:a0d829524aaefdebccb869eed855e2d04c21d2d7479b6cada7ace5448416597b"}, - {file = "pydantic_core-2.18.4-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:509daade3b8649f80d4e5ff21aa5673e4ebe58590b25fe42fac5f0f52c6f034a"}, - {file = "pydantic_core-2.18.4-cp39-none-win32.whl", hash = "sha256:ca26a1e73c48cfc54c4a76ff78df3727b9d9f4ccc8dbee4ae3f73306a591676d"}, - {file = "pydantic_core-2.18.4-cp39-none-win_amd64.whl", hash = "sha256:c67598100338d5d985db1b3d21f3619ef392e185e71b8d52bceacc4a7771ea7e"}, - {file = "pydantic_core-2.18.4-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:574d92eac874f7f4db0ca653514d823a0d22e2354359d0759e3f6a406db5d55d"}, - {file = "pydantic_core-2.18.4-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:1f4d26ceb5eb9eed4af91bebeae4b06c3fb28966ca3a8fb765208cf6b51102ab"}, - {file = "pydantic_core-2.18.4-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:77450e6d20016ec41f43ca4a6c63e9fdde03f0ae3fe90e7c27bdbeaece8b1ed4"}, - {file = "pydantic_core-2.18.4-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d323a01da91851a4f17bf592faf46149c9169d68430b3146dcba2bb5e5719abc"}, - {file = "pydantic_core-2.18.4-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:43d447dd2ae072a0065389092a231283f62d960030ecd27565672bd40746c507"}, - {file = "pydantic_core-2.18.4-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:578e24f761f3b425834f297b9935e1ce2e30f51400964ce4801002435a1b41ef"}, - {file = "pydantic_core-2.18.4-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:81b5efb2f126454586d0f40c4d834010979cb80785173d1586df845a632e4e6d"}, - {file = "pydantic_core-2.18.4-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:ab86ce7c8f9bea87b9d12c7f0af71102acbf5ecbc66c17796cff45dae54ef9a5"}, - {file = "pydantic_core-2.18.4-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:90afc12421df2b1b4dcc975f814e21bc1754640d502a2fbcc6d41e77af5ec312"}, - {file = "pydantic_core-2.18.4-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:51991a89639a912c17bef4b45c87bd83593aee0437d8102556af4885811d59f5"}, - {file = "pydantic_core-2.18.4-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:293afe532740370aba8c060882f7d26cfd00c94cae32fd2e212a3a6e3b7bc15e"}, - {file = "pydantic_core-2.18.4-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b48ece5bde2e768197a2d0f6e925f9d7e3e826f0ad2271120f8144a9db18d5c8"}, - {file = "pydantic_core-2.18.4-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:eae237477a873ab46e8dd748e515c72c0c804fb380fbe6c85533c7de51f23a8f"}, - {file = "pydantic_core-2.18.4-pp39-pypy39_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:834b5230b5dfc0c1ec37b2fda433b271cbbc0e507560b5d1588e2cc1148cf1ce"}, - {file = "pydantic_core-2.18.4-pp39-pypy39_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:e858ac0a25074ba4bce653f9b5d0a85b7456eaddadc0ce82d3878c22489fa4ee"}, - {file = "pydantic_core-2.18.4-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:2fd41f6eff4c20778d717af1cc50eca52f5afe7805ee530a4fbd0bae284f16e9"}, - {file = "pydantic_core-2.18.4.tar.gz", hash = "sha256:ec3beeada09ff865c344ff3bc2f427f5e6c26401cc6113d77e372c3fdac73864"}, + {file = "pydantic_core-2.20.0-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:e9dcd7fb34f7bfb239b5fa420033642fff0ad676b765559c3737b91f664d4fa9"}, + {file = "pydantic_core-2.20.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:649a764d9b0da29816889424697b2a3746963ad36d3e0968784ceed6e40c6355"}, + {file = "pydantic_core-2.20.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7701df088d0b05f3460f7ba15aec81ac8b0fb5690367dfd072a6c38cf5b7fdb5"}, + {file = "pydantic_core-2.20.0-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:ab760f17c3e792225cdaef31ca23c0aea45c14ce80d8eff62503f86a5ab76bff"}, + {file = "pydantic_core-2.20.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:cb1ad5b4d73cde784cf64580166568074f5ccd2548d765e690546cff3d80937d"}, + {file = "pydantic_core-2.20.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b81ec2efc04fc1dbf400647d4357d64fb25543bae38d2d19787d69360aad21c9"}, + {file = "pydantic_core-2.20.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c4a9732a5cad764ba37f3aa873dccb41b584f69c347a57323eda0930deec8e10"}, + {file = "pydantic_core-2.20.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:6dc85b9e10cc21d9c1055f15684f76fa4facadddcb6cd63abab702eb93c98943"}, + {file = "pydantic_core-2.20.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:21d9f7e24f63fdc7118e6cc49defaab8c1d27570782f7e5256169d77498cf7c7"}, + {file = "pydantic_core-2.20.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:8b315685832ab9287e6124b5d74fc12dda31e6421d7f6b08525791452844bc2d"}, + {file = "pydantic_core-2.20.0-cp310-none-win32.whl", hash = "sha256:c3dc8ec8b87c7ad534c75b8855168a08a7036fdb9deeeed5705ba9410721c84d"}, + {file = "pydantic_core-2.20.0-cp310-none-win_amd64.whl", hash = "sha256:85770b4b37bb36ef93a6122601795231225641003e0318d23c6233c59b424279"}, + {file = "pydantic_core-2.20.0-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:58e251bb5a5998f7226dc90b0b753eeffa720bd66664eba51927c2a7a2d5f32c"}, + {file = "pydantic_core-2.20.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:78d584caac52c24240ef9ecd75de64c760bbd0e20dbf6973631815e3ef16ef8b"}, + {file = "pydantic_core-2.20.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5084ec9721f82bef5ff7c4d1ee65e1626783abb585f8c0993833490b63fe1792"}, + {file = "pydantic_core-2.20.0-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:6d0f52684868db7c218437d260e14d37948b094493f2646f22d3dda7229bbe3f"}, + {file = "pydantic_core-2.20.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1def125d59a87fe451212a72ab9ed34c118ff771e5473fef4f2f95d8ede26d75"}, + {file = "pydantic_core-2.20.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b34480fd6778ab356abf1e9086a4ced95002a1e195e8d2fd182b0def9d944d11"}, + {file = "pydantic_core-2.20.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d42669d319db366cb567c3b444f43caa7ffb779bf9530692c6f244fc635a41eb"}, + {file = "pydantic_core-2.20.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:53b06aea7a48919a254b32107647be9128c066aaa6ee6d5d08222325f25ef175"}, + {file = "pydantic_core-2.20.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:1f038156b696a1c39d763b2080aeefa87ddb4162c10aa9fabfefffc3dd8180fa"}, + {file = "pydantic_core-2.20.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:3f0f3a4a23717280a5ee3ac4fb1f81d6fde604c9ec5100f7f6f987716bb8c137"}, + {file = "pydantic_core-2.20.0-cp311-none-win32.whl", hash = "sha256:316fe7c3fec017affd916a0c83d6f1ec697cbbbdf1124769fa73328e7907cc2e"}, + {file = "pydantic_core-2.20.0-cp311-none-win_amd64.whl", hash = "sha256:2d06a7fa437f93782e3f32d739c3ec189f82fca74336c08255f9e20cea1ed378"}, + {file = "pydantic_core-2.20.0-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:d6f8c49657f3eb7720ed4c9b26624063da14937fc94d1812f1e04a2204db3e17"}, + {file = "pydantic_core-2.20.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:ad1bd2f377f56fec11d5cfd0977c30061cd19f4fa199bf138b200ec0d5e27eeb"}, + {file = "pydantic_core-2.20.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ed741183719a5271f97d93bbcc45ed64619fa38068aaa6e90027d1d17e30dc8d"}, + {file = "pydantic_core-2.20.0-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:d82e5ed3a05f2dcb89c6ead2fd0dbff7ac09bc02c1b4028ece2d3a3854d049ce"}, + {file = "pydantic_core-2.20.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b2ba34a099576234671f2e4274e5bc6813b22e28778c216d680eabd0db3f7dad"}, + {file = "pydantic_core-2.20.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:879ae6bb08a063b3e1b7ac8c860096d8fd6b48dd9b2690b7f2738b8c835e744b"}, + {file = "pydantic_core-2.20.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0b0eefc7633a04c0694340aad91fbfd1986fe1a1e0c63a22793ba40a18fcbdc8"}, + {file = "pydantic_core-2.20.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:73deadd6fd8a23e2f40b412b3ac617a112143c8989a4fe265050fd91ba5c0608"}, + {file = "pydantic_core-2.20.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:35681445dc85446fb105943d81ae7569aa7e89de80d1ca4ac3229e05c311bdb1"}, + {file = "pydantic_core-2.20.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:0f6dd3612a3b9f91f2e63924ea18a4476656c6d01843ca20a4c09e00422195af"}, + {file = "pydantic_core-2.20.0-cp312-none-win32.whl", hash = "sha256:7e37b6bb6e90c2b8412b06373c6978d9d81e7199a40e24a6ef480e8acdeaf918"}, + {file = "pydantic_core-2.20.0-cp312-none-win_amd64.whl", hash = "sha256:7d4df13d1c55e84351fab51383520b84f490740a9f1fec905362aa64590b7a5d"}, + {file = "pydantic_core-2.20.0-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:d43e7ab3b65e4dc35a7612cfff7b0fd62dce5bc11a7cd198310b57f39847fd6c"}, + {file = "pydantic_core-2.20.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7b6a24d7b5893392f2b8e3b7a0031ae3b14c6c1942a4615f0d8794fdeeefb08b"}, + {file = "pydantic_core-2.20.0-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:b2f13c3e955a087c3ec86f97661d9f72a76e221281b2262956af381224cfc243"}, + {file = "pydantic_core-2.20.0-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:72432fd6e868c8d0a6849869e004b8bcae233a3c56383954c228316694920b38"}, + {file = "pydantic_core-2.20.0-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d70a8ff2d4953afb4cbe6211f17268ad29c0b47e73d3372f40e7775904bc28fc"}, + {file = "pydantic_core-2.20.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8e49524917b8d3c2f42cd0d2df61178e08e50f5f029f9af1f402b3ee64574392"}, + {file = "pydantic_core-2.20.0-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:a4f0f71653b1c1bad0350bc0b4cc057ab87b438ff18fa6392533811ebd01439c"}, + {file = "pydantic_core-2.20.0-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:16197e6f4fdecb9892ed2436e507e44f0a1aa2cff3b9306d1c879ea2f9200997"}, + {file = "pydantic_core-2.20.0-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:763602504bf640b3ded3bba3f8ed8a1cc2fc6a87b8d55c1c5689f428c49c947e"}, + {file = "pydantic_core-2.20.0-cp313-none-win32.whl", hash = "sha256:a3f243f318bd9523277fa123b3163f4c005a3e8619d4b867064de02f287a564d"}, + {file = "pydantic_core-2.20.0-cp313-none-win_amd64.whl", hash = "sha256:03aceaf6a5adaad3bec2233edc5a7905026553916615888e53154807e404545c"}, + {file = "pydantic_core-2.20.0-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:d6f2d8b8da1f03f577243b07bbdd3412eee3d37d1f2fd71d1513cbc76a8c1239"}, + {file = "pydantic_core-2.20.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:a272785a226869416c6b3c1b7e450506152d3844207331f02f27173562c917e0"}, + {file = "pydantic_core-2.20.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:efbb412d55a4ffe73963fed95c09ccb83647ec63b711c4b3752be10a56f0090b"}, + {file = "pydantic_core-2.20.0-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:1e4f46189d8740561b43655263a41aac75ff0388febcb2c9ec4f1b60a0ec12f3"}, + {file = "pydantic_core-2.20.0-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:87d3df115f4a3c8c5e4d5acf067d399c6466d7e604fc9ee9acbe6f0c88a0c3cf"}, + {file = "pydantic_core-2.20.0-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a340d2bdebe819d08f605e9705ed551c3feb97e4fd71822d7147c1e4bdbb9508"}, + {file = "pydantic_core-2.20.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:616b9c2f882393d422ba11b40e72382fe975e806ad693095e9a3b67c59ea6150"}, + {file = "pydantic_core-2.20.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:25c46bb2ff6084859bbcfdf4f1a63004b98e88b6d04053e8bf324e115398e9e7"}, + {file = "pydantic_core-2.20.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:23425eccef8f2c342f78d3a238c824623836c6c874d93c726673dbf7e56c78c0"}, + {file = "pydantic_core-2.20.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:52527e8f223ba29608d999d65b204676398009725007c9336651c2ec2d93cffc"}, + {file = "pydantic_core-2.20.0-cp38-none-win32.whl", hash = "sha256:1c3c5b7f70dd19a6845292b0775295ea81c61540f68671ae06bfe4421b3222c2"}, + {file = "pydantic_core-2.20.0-cp38-none-win_amd64.whl", hash = "sha256:8093473d7b9e908af1cef30025609afc8f5fd2a16ff07f97440fd911421e4432"}, + {file = "pydantic_core-2.20.0-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:ee7785938e407418795e4399b2bf5b5f3cf6cf728077a7f26973220d58d885cf"}, + {file = "pydantic_core-2.20.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:0e75794883d635071cf6b4ed2a5d7a1e50672ab7a051454c76446ef1ebcdcc91"}, + {file = "pydantic_core-2.20.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:344e352c96e53b4f56b53d24728217c69399b8129c16789f70236083c6ceb2ac"}, + {file = "pydantic_core-2.20.0-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:978d4123ad1e605daf1ba5e01d4f235bcf7b6e340ef07e7122e8e9cfe3eb61ab"}, + {file = "pydantic_core-2.20.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3c05eaf6c863781eb834ab41f5963604ab92855822a2062897958089d1335dad"}, + {file = "pydantic_core-2.20.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:bc7e43b4a528ffca8c9151b6a2ca34482c2fdc05e6aa24a84b7f475c896fc51d"}, + {file = "pydantic_core-2.20.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:658287a29351166510ebbe0a75c373600cc4367a3d9337b964dada8d38bcc0f4"}, + {file = "pydantic_core-2.20.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:1dacf660d6de692fe351e8c806e7efccf09ee5184865893afbe8e59be4920b4a"}, + {file = "pydantic_core-2.20.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:3e147fc6e27b9a487320d78515c5f29798b539179f7777018cedf51b7749e4f4"}, + {file = "pydantic_core-2.20.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:c867230d715a3dd1d962c8d9bef0d3168994ed663e21bf748b6e3a529a129aab"}, + {file = "pydantic_core-2.20.0-cp39-none-win32.whl", hash = "sha256:22b813baf0dbf612752d8143a2dbf8e33ccb850656b7850e009bad2e101fc377"}, + {file = "pydantic_core-2.20.0-cp39-none-win_amd64.whl", hash = "sha256:3a7235b46c1bbe201f09b6f0f5e6c36b16bad3d0532a10493742f91fbdc8035f"}, + {file = "pydantic_core-2.20.0-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:cafde15a6f7feaec2f570646e2ffc5b73412295d29134a29067e70740ec6ee20"}, + {file = "pydantic_core-2.20.0-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:2aec8eeea0b08fd6bc2213d8e86811a07491849fd3d79955b62d83e32fa2ad5f"}, + {file = "pydantic_core-2.20.0-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:840200827984f1c4e114008abc2f5ede362d6e11ed0b5931681884dd41852ff1"}, + {file = "pydantic_core-2.20.0-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f8ea1d8b7df522e5ced34993c423c3bf3735c53df8b2a15688a2f03a7d678800"}, + {file = "pydantic_core-2.20.0-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:d5b8376a867047bf08910573deb95d3c8dfb976eb014ee24f3b5a61ccc5bee1b"}, + {file = "pydantic_core-2.20.0-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:d08264b4460326cefacc179fc1411304d5af388a79910832835e6f641512358b"}, + {file = "pydantic_core-2.20.0-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:7a3639011c2e8a9628466f616ed7fb413f30032b891898e10895a0a8b5857d6c"}, + {file = "pydantic_core-2.20.0-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:05e83ce2f7eba29e627dd8066aa6c4c0269b2d4f889c0eba157233a353053cea"}, + {file = "pydantic_core-2.20.0-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:603a843fea76a595c8f661cd4da4d2281dff1e38c4a836a928eac1a2f8fe88e4"}, + {file = "pydantic_core-2.20.0-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:ac76f30d5d3454f4c28826d891fe74d25121a346c69523c9810ebba43f3b1cec"}, + {file = "pydantic_core-2.20.0-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:22e3b1d4b1b3f6082849f9b28427ef147a5b46a6132a3dbaf9ca1baa40c88609"}, + {file = "pydantic_core-2.20.0-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2761f71faed820e25ec62eacba670d1b5c2709bb131a19fcdbfbb09884593e5a"}, + {file = "pydantic_core-2.20.0-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:a0586cddbf4380e24569b8a05f234e7305717cc8323f50114dfb2051fcbce2a3"}, + {file = "pydantic_core-2.20.0-pp39-pypy39_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:b8c46a8cf53e849eea7090f331ae2202cd0f1ceb090b00f5902c423bd1e11805"}, + {file = "pydantic_core-2.20.0-pp39-pypy39_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:b4a085bd04af7245e140d1b95619fe8abb445a3d7fdf219b3f80c940853268ef"}, + {file = "pydantic_core-2.20.0-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:116b326ac82c8b315e7348390f6d30bcfe6e688a7d3f1de50ff7bcc2042a23c2"}, + {file = "pydantic_core-2.20.0.tar.gz", hash = "sha256:366be8e64e0cb63d87cf79b4e1765c0703dd6313c729b22e7b9e378db6b96877"}, ] [package.dependencies] diff --git a/lightrag/pyproject.toml b/lightrag/pyproject.toml index 0e9af8d91..e350bb9f8 100644 --- a/lightrag/pyproject.toml +++ b/lightrag/pyproject.toml @@ -1,26 +1,36 @@ [tool.poetry] name = "lightrag" -packages = [ - { include = "core", from = "." }, - { include = "components", from = "." }, - { include = "eval", from = "." }, - { include = "utils", from = "." }, - { include = "tracing", from = "." }, - { include = "optim", from = "." }, - { include = "icl", from = "." }, -] -version = "0.1.0" +version = "0.0.0-alpha.6" description = "The 'PyTorch' library for LLM applications. RAG=Retriever-Agent-Generator." -authors = ["Li Yin "] +authors = ["Li Yin "] readme = "README.md" +repository = "https://github.com/SylphAI-Inc/LightRAG" + license = "MIT" +maintainers = ["Xiaoyi Gu ", "Li Yin "] classifiers = [ "Topic :: Software Development :: Build Tools", "Topic :: Software Development :: Libraries :: Python Modules", + "Topic :: Scientific/Engineering :: Artificial Intelligence", + "Topic :: Software Development :: Libraries :: Application Frameworks", +] +keywords = ["LLM", "NLP", "RAG", "devtools", "retrieval", "agent"] + +include = [ + "lightrag/core/*", + "lightrag/components/*", + "lightrag/eval/*", + "lightrag/utils/*", + "lightrag/tracing/*", + "lightrag/optim/*", + # "lightrag/icl/*", ] +packages = [{ include = "lightrag", from = "." }] + + [tool.poetry.dependencies] python = ">=3.10, <4.0" @@ -50,6 +60,7 @@ groq = "^0.5.0" # should only be installed if groq client is used faiss-cpu = "^1.8.0" sqlalchemy = "^2.0.30" + [tool.ruff] exclude = ["images"] diff --git a/lightrag/pytest.ini b/lightrag/pytest.ini index 70f8b7469..fa2e48573 100644 --- a/lightrag/pytest.ini +++ b/lightrag/pytest.ini @@ -4,4 +4,3 @@ python_files = test_*.py python_classes = Test* python_functions = test_* norecursedirs = *_test -addopts = --ignore=li_test/test_li_datasets.py --ignore=li_test/test_li_dspy.py --ignore=li_test/test_li_haystack.py --ignore=li_test/test_li_llamaindex_huggingface_llm.py --ignore=li_test/test_li_llamaindex_router.py --ignore=li_test/test_li_ollama.py --ignore=li_test/test_li_transformer.py --ignore=li_test/test_li_transformers_small_model.py diff --git a/lightrag/tests/test_generator.py b/lightrag/tests/test_generator.py index d5f0e0213..953e73d29 100644 --- a/lightrag/tests/test_generator.py +++ b/lightrag/tests/test_generator.py @@ -6,6 +6,7 @@ from lightrag.core.types import GeneratorOutput from lightrag.core.generator import Generator + from lightrag.core.model_client import ModelClient from lightrag.tracing import GeneratorStateLogger @@ -13,7 +14,9 @@ class TestGenerator(IsolatedAsyncioTestCase): def setUp(self): # Assuming that OpenAIClient is correctly mocked and passed to Generator - with patch("core.model_client.ModelClient", spec=ModelClient) as MockAPI: + with patch( + "lightrag.core.model_client.ModelClient", spec=ModelClient + ) as MockAPI: mock_api_client = Mock(ModelClient) MockAPI.return_value = mock_api_client mock_api_client.call.return_value = "Generated text response" diff --git a/lightrag/tests/test_text_splitter.py b/lightrag/tests/test_text_splitter.py index 98fbc5300..eb03a3de9 100644 --- a/lightrag/tests/test_text_splitter.py +++ b/lightrag/tests/test_text_splitter.py @@ -1,6 +1,7 @@ import unittest from lightrag.core.types import Document -from lightrag.components.data_process.text_splitter import TextSplitter +from lightrag.components.data_process.text_splitter import TextSplitter + class TestTextSplitter(unittest.TestCase): @@ -58,5 +59,6 @@ def test_document_splitting(self): # with self.assertRaises(ValueError): # self.splitter.call([Document(text=None, id="1")]) -if __name__ == '__main__': - unittest.main() \ No newline at end of file + +if __name__ == "__main__": + unittest.main() diff --git a/lightrag/tests/test_transformer_client.py b/lightrag/tests/test_transformer_client.py index cdbc1931d..78c5bdf24 100644 --- a/lightrag/tests/test_transformer_client.py +++ b/lightrag/tests/test_transformer_client.py @@ -1,13 +1,6 @@ import unittest import torch -from lightrag.components.model_client import ( - TransformersClient, - TransformerReranker, - TransformerLLM, - TransformerEmbedder, -) -from lightrag.core.types import ModelType # Set the number of threads for PyTorch, avoid segementation fault torch.set_num_threads(1) @@ -102,27 +95,25 @@ def setUp(self) -> None: # ) # self.assertEqual(type(output), tuple) - # def test_transformer_llm_response(self): # """Test the TransformerLLM model with zephyr-7b-beta for generating a response.""" # transformer_llm_model = "HuggingFaceH4/zephyr-7b-beta" # transformer_llm_model_component = TransformerLLM(model_name=transformer_llm_model) - + # # Define a sample input # input_text = "Hello, what's the weather today?" - + # # Test generating a response, providing the 'model' keyword # # response = transformer_llm_model_component(input=input_text, model=transformer_llm_model) # response = transformer_llm_model_component(input_text=input_text) - # # Check if the response is valid # self.assertIsInstance(response, str, "The response should be a string.") # self.assertTrue(len(response) > 0, "The response should not be empty.") - + # # Optionally, print the response for visual verification during testing # print(f"Generated response: {response}") - -if __name__ == '__main__': - unittest.main() \ No newline at end of file + +if __name__ == "__main__": + unittest.main() diff --git a/lightrag/utils/setup_env.py b/lightrag/utils/setup_env.py deleted file mode 100644 index 15fee2327..000000000 --- a/lightrag/utils/setup_env.py +++ /dev/null @@ -1,3 +0,0 @@ -import dotenv - -dotenv.load_dotenv(dotenv_path=".env", override=True) diff --git a/poetry.lock b/poetry.lock index db7926658..d0e981181 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1451,13 +1451,13 @@ uritemplate = ">=3.0.1,<5" [[package]] name = "google-auth" -version = "2.30.0" +version = "2.31.0" description = "Google Authentication Library" optional = false python-versions = ">=3.7" files = [ - {file = "google-auth-2.30.0.tar.gz", hash = "sha256:ab630a1320f6720909ad76a7dbdb6841cdf5c66b328d690027e4867bdfb16688"}, - {file = "google_auth-2.30.0-py2.py3-none-any.whl", hash = "sha256:8df7da660f62757388b8a7f249df13549b3373f24388cb5d2f1dd91cc18180b5"}, + {file = "google-auth-2.31.0.tar.gz", hash = "sha256:87805c36970047247c8afe614d4e3af8eceafc1ebba0c679fe75ddd1d575e871"}, + {file = "google_auth-2.31.0-py2.py3-none-any.whl", hash = "sha256:042c4702efa9f7d3c48d3a69341c209381b125faa6dbf3ebe56bc7e40ae05c23"}, ] [package.dependencies] @@ -1858,13 +1858,13 @@ files = [ [[package]] name = "ipykernel" -version = "6.29.4" +version = "6.29.5" description = "IPython Kernel for Jupyter" optional = false python-versions = ">=3.8" files = [ - {file = "ipykernel-6.29.4-py3-none-any.whl", hash = "sha256:1181e653d95c6808039c509ef8e67c4126b3b3af7781496c7cbfb5ed938a27da"}, - {file = "ipykernel-6.29.4.tar.gz", hash = "sha256:3d44070060f9475ac2092b760123fadf105d2e2493c24848b6691a7c4f42af5c"}, + {file = "ipykernel-6.29.5-py3-none-any.whl", hash = "sha256:afdb66ba5aa354b09b91379bac28ae4afebbb30e8b39510c9690afb7a10421b5"}, + {file = "ipykernel-6.29.5.tar.gz", hash = "sha256:f093a22c4a40f8828f8e330a9c297cb93dcab13bd9678ded6de8e5cf81c56215"}, ] [package.dependencies] @@ -2717,7 +2717,7 @@ typing = ["mypy (>=1.0.0)", "types-setuptools"] [[package]] name = "lightrag" -version = "0.1.0" +version = "0.0.0-alpha.6" description = "The 'PyTorch' library for LLM applications. RAG=Retriever-Agent-Generator." optional = false python-versions = ">=3.10, <4.0" @@ -2860,13 +2860,13 @@ llama-index-core = ">=0.10.1,<0.11.0" [[package]] name = "llama-index-indices-managed-llama-cloud" -version = "0.2.1" +version = "0.2.2" description = "llama-index indices llama-cloud integration" optional = false python-versions = "<4.0,>=3.8.1" files = [ - {file = "llama_index_indices_managed_llama_cloud-0.2.1-py3-none-any.whl", hash = "sha256:69abd37bc7b57abcea841eea2a89cb0adee29bce3fd05c61e3082ae50f047b87"}, - {file = "llama_index_indices_managed_llama_cloud-0.2.1.tar.gz", hash = "sha256:b07fa606f1085e22918d2d45e00ab86f3430f36057e115322bd360b695eef565"}, + {file = "llama_index_indices_managed_llama_cloud-0.2.2-py3-none-any.whl", hash = "sha256:30c73a77fc54fa83c4a183fcdc3b5138a6b709a6fefc9539d0cb0c6315b0f2fc"}, + {file = "llama_index_indices_managed_llama_cloud-0.2.2.tar.gz", hash = "sha256:9a3db075878bc7adf798a74ec4d6220dec5421f46c0675702a94894934d17a7a"}, ] [package.dependencies] @@ -3028,13 +3028,13 @@ sqlalchemy = {version = ">=1.4.49,<2.1", extras = ["asyncio"]} [[package]] name = "llama-parse" -version = "0.4.4" +version = "0.4.5" description = "Parse files into RAG-Optimized formats." optional = false python-versions = "<4.0,>=3.8.1" files = [ - {file = "llama_parse-0.4.4-py3-none-any.whl", hash = "sha256:bb9724d04fd31ed037000896c7cef7fcb9051325497db4592a15f8144754cd00"}, - {file = "llama_parse-0.4.4.tar.gz", hash = "sha256:b45c2db33a0d6b7a2d5f59e3d0ec7ee7f8227a852eaa56b04aa12b12f2c0d521"}, + {file = "llama_parse-0.4.5-py3-none-any.whl", hash = "sha256:a68fc91a2b0bce98a4960b8f709ca3c2f90b421da66e0d8522f0ea45b78846b9"}, + {file = "llama_parse-0.4.5.tar.gz", hash = "sha256:08a48bcf4af5b623bf26fa6266038572b95409f7be64746067db8d38f6927fe5"}, ] [package.dependencies] @@ -3745,13 +3745,13 @@ files = [ [[package]] name = "nvidia-nvjitlink-cu12" -version = "12.5.40" +version = "12.5.82" description = "Nvidia JIT LTO Library" optional = false python-versions = ">=3" files = [ - {file = "nvidia_nvjitlink_cu12-12.5.40-py3-none-manylinux2014_x86_64.whl", hash = "sha256:d9714f27c1d0f0895cd8915c07a87a1d0029a0aa36acaf9156952ec2a8a12189"}, - {file = "nvidia_nvjitlink_cu12-12.5.40-py3-none-win_amd64.whl", hash = "sha256:c3401dc8543b52d3a8158007a0c1ab4e9c768fcbd24153a48c86972102197ddd"}, + {file = "nvidia_nvjitlink_cu12-12.5.82-py3-none-manylinux2014_x86_64.whl", hash = "sha256:f9b37bc5c8cf7509665cb6ada5aaa0ce65618f2332b7d3e78e9790511f111212"}, + {file = "nvidia_nvjitlink_cu12-12.5.82-py3-none-win_amd64.whl", hash = "sha256:e782564d705ff0bf61ac3e1bf730166da66dd2fe9012f111ede5fc49b64ae697"}, ] [[package]] @@ -4020,84 +4020,95 @@ numpy = "*" [[package]] name = "pillow" -version = "10.3.0" +version = "10.4.0" description = "Python Imaging Library (Fork)" optional = false python-versions = ">=3.8" files = [ - {file = "pillow-10.3.0-cp310-cp310-macosx_10_10_x86_64.whl", hash = "sha256:90b9e29824800e90c84e4022dd5cc16eb2d9605ee13f05d47641eb183cd73d45"}, - {file = "pillow-10.3.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:a2c405445c79c3f5a124573a051062300936b0281fee57637e706453e452746c"}, - {file = "pillow-10.3.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:78618cdbccaa74d3f88d0ad6cb8ac3007f1a6fa5c6f19af64b55ca170bfa1edf"}, - {file = "pillow-10.3.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:261ddb7ca91fcf71757979534fb4c128448b5b4c55cb6152d280312062f69599"}, - {file = "pillow-10.3.0-cp310-cp310-manylinux_2_28_aarch64.whl", hash = "sha256:ce49c67f4ea0609933d01c0731b34b8695a7a748d6c8d186f95e7d085d2fe475"}, - {file = "pillow-10.3.0-cp310-cp310-manylinux_2_28_x86_64.whl", hash = "sha256:b14f16f94cbc61215115b9b1236f9c18403c15dd3c52cf629072afa9d54c1cbf"}, - {file = "pillow-10.3.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:d33891be6df59d93df4d846640f0e46f1a807339f09e79a8040bc887bdcd7ed3"}, - {file = "pillow-10.3.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:b50811d664d392f02f7761621303eba9d1b056fb1868c8cdf4231279645c25f5"}, - {file = "pillow-10.3.0-cp310-cp310-win32.whl", hash = "sha256:ca2870d5d10d8726a27396d3ca4cf7976cec0f3cb706debe88e3a5bd4610f7d2"}, - {file = "pillow-10.3.0-cp310-cp310-win_amd64.whl", hash = "sha256:f0d0591a0aeaefdaf9a5e545e7485f89910c977087e7de2b6c388aec32011e9f"}, - {file = "pillow-10.3.0-cp310-cp310-win_arm64.whl", hash = "sha256:ccce24b7ad89adb5a1e34a6ba96ac2530046763912806ad4c247356a8f33a67b"}, - {file = "pillow-10.3.0-cp311-cp311-macosx_10_10_x86_64.whl", hash = "sha256:5f77cf66e96ae734717d341c145c5949c63180842a545c47a0ce7ae52ca83795"}, - {file = "pillow-10.3.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:e4b878386c4bf293578b48fc570b84ecfe477d3b77ba39a6e87150af77f40c57"}, - {file = "pillow-10.3.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fdcbb4068117dfd9ce0138d068ac512843c52295ed996ae6dd1faf537b6dbc27"}, - {file = "pillow-10.3.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9797a6c8fe16f25749b371c02e2ade0efb51155e767a971c61734b1bf6293994"}, - {file = "pillow-10.3.0-cp311-cp311-manylinux_2_28_aarch64.whl", hash = "sha256:9e91179a242bbc99be65e139e30690e081fe6cb91a8e77faf4c409653de39451"}, - {file = "pillow-10.3.0-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:1b87bd9d81d179bd8ab871603bd80d8645729939f90b71e62914e816a76fc6bd"}, - {file = "pillow-10.3.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:81d09caa7b27ef4e61cb7d8fbf1714f5aec1c6b6c5270ee53504981e6e9121ad"}, - {file = "pillow-10.3.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:048ad577748b9fa4a99a0548c64f2cb8d672d5bf2e643a739ac8faff1164238c"}, - {file = "pillow-10.3.0-cp311-cp311-win32.whl", hash = "sha256:7161ec49ef0800947dc5570f86568a7bb36fa97dd09e9827dc02b718c5643f09"}, - {file = "pillow-10.3.0-cp311-cp311-win_amd64.whl", hash = "sha256:8eb0908e954d093b02a543dc963984d6e99ad2b5e36503d8a0aaf040505f747d"}, - {file = "pillow-10.3.0-cp311-cp311-win_arm64.whl", hash = "sha256:4e6f7d1c414191c1199f8996d3f2282b9ebea0945693fb67392c75a3a320941f"}, - {file = "pillow-10.3.0-cp312-cp312-macosx_10_10_x86_64.whl", hash = "sha256:e46f38133e5a060d46bd630faa4d9fa0202377495df1f068a8299fd78c84de84"}, - {file = "pillow-10.3.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:50b8eae8f7334ec826d6eeffaeeb00e36b5e24aa0b9df322c247539714c6df19"}, - {file = "pillow-10.3.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9d3bea1c75f8c53ee4d505c3e67d8c158ad4df0d83170605b50b64025917f338"}, - {file = "pillow-10.3.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:19aeb96d43902f0a783946a0a87dbdad5c84c936025b8419da0a0cd7724356b1"}, - {file = "pillow-10.3.0-cp312-cp312-manylinux_2_28_aarch64.whl", hash = "sha256:74d28c17412d9caa1066f7a31df8403ec23d5268ba46cd0ad2c50fb82ae40462"}, - {file = "pillow-10.3.0-cp312-cp312-manylinux_2_28_x86_64.whl", hash = "sha256:ff61bfd9253c3915e6d41c651d5f962da23eda633cf02262990094a18a55371a"}, - {file = "pillow-10.3.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:d886f5d353333b4771d21267c7ecc75b710f1a73d72d03ca06df49b09015a9ef"}, - {file = "pillow-10.3.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:4b5ec25d8b17217d635f8935dbc1b9aa5907962fae29dff220f2659487891cd3"}, - {file = "pillow-10.3.0-cp312-cp312-win32.whl", hash = "sha256:51243f1ed5161b9945011a7360e997729776f6e5d7005ba0c6879267d4c5139d"}, - {file = "pillow-10.3.0-cp312-cp312-win_amd64.whl", hash = "sha256:412444afb8c4c7a6cc11a47dade32982439925537e483be7c0ae0cf96c4f6a0b"}, - {file = "pillow-10.3.0-cp312-cp312-win_arm64.whl", hash = "sha256:798232c92e7665fe82ac085f9d8e8ca98826f8e27859d9a96b41d519ecd2e49a"}, - {file = "pillow-10.3.0-cp38-cp38-macosx_10_10_x86_64.whl", hash = "sha256:4eaa22f0d22b1a7e93ff0a596d57fdede2e550aecffb5a1ef1106aaece48e96b"}, - {file = "pillow-10.3.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:cd5e14fbf22a87321b24c88669aad3a51ec052eb145315b3da3b7e3cc105b9a2"}, - {file = "pillow-10.3.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1530e8f3a4b965eb6a7785cf17a426c779333eb62c9a7d1bbcf3ffd5bf77a4aa"}, - {file = "pillow-10.3.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5d512aafa1d32efa014fa041d38868fda85028e3f930a96f85d49c7d8ddc0383"}, - {file = "pillow-10.3.0-cp38-cp38-manylinux_2_28_aarch64.whl", hash = "sha256:339894035d0ede518b16073bdc2feef4c991ee991a29774b33e515f1d308e08d"}, - {file = "pillow-10.3.0-cp38-cp38-manylinux_2_28_x86_64.whl", hash = "sha256:aa7e402ce11f0885305bfb6afb3434b3cd8f53b563ac065452d9d5654c7b86fd"}, - {file = "pillow-10.3.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:0ea2a783a2bdf2a561808fe4a7a12e9aa3799b701ba305de596bc48b8bdfce9d"}, - {file = "pillow-10.3.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:c78e1b00a87ce43bb37642c0812315b411e856a905d58d597750eb79802aaaa3"}, - {file = "pillow-10.3.0-cp38-cp38-win32.whl", hash = "sha256:72d622d262e463dfb7595202d229f5f3ab4b852289a1cd09650362db23b9eb0b"}, - {file = "pillow-10.3.0-cp38-cp38-win_amd64.whl", hash = "sha256:2034f6759a722da3a3dbd91a81148cf884e91d1b747992ca288ab88c1de15999"}, - {file = "pillow-10.3.0-cp39-cp39-macosx_10_10_x86_64.whl", hash = "sha256:2ed854e716a89b1afcedea551cd85f2eb2a807613752ab997b9974aaa0d56936"}, - {file = "pillow-10.3.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:dc1a390a82755a8c26c9964d457d4c9cbec5405896cba94cf51f36ea0d855002"}, - {file = "pillow-10.3.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4203efca580f0dd6f882ca211f923168548f7ba334c189e9eab1178ab840bf60"}, - {file = "pillow-10.3.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3102045a10945173d38336f6e71a8dc71bcaeed55c3123ad4af82c52807b9375"}, - {file = "pillow-10.3.0-cp39-cp39-manylinux_2_28_aarch64.whl", hash = "sha256:6fb1b30043271ec92dc65f6d9f0b7a830c210b8a96423074b15c7bc999975f57"}, - {file = "pillow-10.3.0-cp39-cp39-manylinux_2_28_x86_64.whl", hash = "sha256:1dfc94946bc60ea375cc39cff0b8da6c7e5f8fcdc1d946beb8da5c216156ddd8"}, - {file = "pillow-10.3.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:b09b86b27a064c9624d0a6c54da01c1beaf5b6cadfa609cf63789b1d08a797b9"}, - {file = "pillow-10.3.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:d3b2348a78bc939b4fed6552abfd2e7988e0f81443ef3911a4b8498ca084f6eb"}, - {file = "pillow-10.3.0-cp39-cp39-win32.whl", hash = "sha256:45ebc7b45406febf07fef35d856f0293a92e7417ae7933207e90bf9090b70572"}, - {file = "pillow-10.3.0-cp39-cp39-win_amd64.whl", hash = "sha256:0ba26351b137ca4e0db0342d5d00d2e355eb29372c05afd544ebf47c0956ffeb"}, - {file = "pillow-10.3.0-cp39-cp39-win_arm64.whl", hash = "sha256:50fd3f6b26e3441ae07b7c979309638b72abc1a25da31a81a7fbd9495713ef4f"}, - {file = "pillow-10.3.0-pp310-pypy310_pp73-macosx_10_10_x86_64.whl", hash = "sha256:6b02471b72526ab8a18c39cb7967b72d194ec53c1fd0a70b050565a0f366d355"}, - {file = "pillow-10.3.0-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:8ab74c06ffdab957d7670c2a5a6e1a70181cd10b727cd788c4dd9005b6a8acd9"}, - {file = "pillow-10.3.0-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:048eeade4c33fdf7e08da40ef402e748df113fd0b4584e32c4af74fe78baaeb2"}, - {file = "pillow-10.3.0-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9e2ec1e921fd07c7cda7962bad283acc2f2a9ccc1b971ee4b216b75fad6f0463"}, - {file = "pillow-10.3.0-pp310-pypy310_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:4c8e73e99da7db1b4cad7f8d682cf6abad7844da39834c288fbfa394a47bbced"}, - {file = "pillow-10.3.0-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:16563993329b79513f59142a6b02055e10514c1a8e86dca8b48a893e33cf91e3"}, - {file = "pillow-10.3.0-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:dd78700f5788ae180b5ee8902c6aea5a5726bac7c364b202b4b3e3ba2d293170"}, - {file = "pillow-10.3.0-pp39-pypy39_pp73-macosx_10_10_x86_64.whl", hash = "sha256:aff76a55a8aa8364d25400a210a65ff59d0168e0b4285ba6bf2bd83cf675ba32"}, - {file = "pillow-10.3.0-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:b7bc2176354defba3edc2b9a777744462da2f8e921fbaf61e52acb95bafa9828"}, - {file = "pillow-10.3.0-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:793b4e24db2e8742ca6423d3fde8396db336698c55cd34b660663ee9e45ed37f"}, - {file = "pillow-10.3.0-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d93480005693d247f8346bc8ee28c72a2191bdf1f6b5db469c096c0c867ac015"}, - {file = "pillow-10.3.0-pp39-pypy39_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:c83341b89884e2b2e55886e8fbbf37c3fa5efd6c8907124aeb72f285ae5696e5"}, - {file = "pillow-10.3.0-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:1a1d1915db1a4fdb2754b9de292642a39a7fb28f1736699527bb649484fb966a"}, - {file = "pillow-10.3.0-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:a0eaa93d054751ee9964afa21c06247779b90440ca41d184aeb5d410f20ff591"}, - {file = "pillow-10.3.0.tar.gz", hash = "sha256:9d2455fbf44c914840c793e89aa82d0e1763a14253a000743719ae5946814b2d"}, + {file = "pillow-10.4.0-cp310-cp310-macosx_10_10_x86_64.whl", hash = "sha256:4d9667937cfa347525b319ae34375c37b9ee6b525440f3ef48542fcf66f2731e"}, + {file = "pillow-10.4.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:543f3dc61c18dafb755773efc89aae60d06b6596a63914107f75459cf984164d"}, + {file = "pillow-10.4.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7928ecbf1ece13956b95d9cbcfc77137652b02763ba384d9ab508099a2eca856"}, + {file = "pillow-10.4.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e4d49b85c4348ea0b31ea63bc75a9f3857869174e2bf17e7aba02945cd218e6f"}, + {file = "pillow-10.4.0-cp310-cp310-manylinux_2_28_aarch64.whl", hash = "sha256:6c762a5b0997f5659a5ef2266abc1d8851ad7749ad9a6a5506eb23d314e4f46b"}, + {file = "pillow-10.4.0-cp310-cp310-manylinux_2_28_x86_64.whl", hash = "sha256:a985e028fc183bf12a77a8bbf36318db4238a3ded7fa9df1b9a133f1cb79f8fc"}, + {file = "pillow-10.4.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:812f7342b0eee081eaec84d91423d1b4650bb9828eb53d8511bcef8ce5aecf1e"}, + {file = "pillow-10.4.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:ac1452d2fbe4978c2eec89fb5a23b8387aba707ac72810d9490118817d9c0b46"}, + {file = "pillow-10.4.0-cp310-cp310-win32.whl", hash = "sha256:bcd5e41a859bf2e84fdc42f4edb7d9aba0a13d29a2abadccafad99de3feff984"}, + {file = "pillow-10.4.0-cp310-cp310-win_amd64.whl", hash = "sha256:ecd85a8d3e79cd7158dec1c9e5808e821feea088e2f69a974db5edf84dc53141"}, + {file = "pillow-10.4.0-cp310-cp310-win_arm64.whl", hash = "sha256:ff337c552345e95702c5fde3158acb0625111017d0e5f24bf3acdb9cc16b90d1"}, + {file = "pillow-10.4.0-cp311-cp311-macosx_10_10_x86_64.whl", hash = "sha256:0a9ec697746f268507404647e531e92889890a087e03681a3606d9b920fbee3c"}, + {file = "pillow-10.4.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:dfe91cb65544a1321e631e696759491ae04a2ea11d36715eca01ce07284738be"}, + {file = "pillow-10.4.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5dc6761a6efc781e6a1544206f22c80c3af4c8cf461206d46a1e6006e4429ff3"}, + {file = "pillow-10.4.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5e84b6cc6a4a3d76c153a6b19270b3526a5a8ed6b09501d3af891daa2a9de7d6"}, + {file = "pillow-10.4.0-cp311-cp311-manylinux_2_28_aarch64.whl", hash = "sha256:bbc527b519bd3aa9d7f429d152fea69f9ad37c95f0b02aebddff592688998abe"}, + {file = "pillow-10.4.0-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:76a911dfe51a36041f2e756b00f96ed84677cdeb75d25c767f296c1c1eda1319"}, + {file = "pillow-10.4.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:59291fb29317122398786c2d44427bbd1a6d7ff54017075b22be9d21aa59bd8d"}, + {file = "pillow-10.4.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:416d3a5d0e8cfe4f27f574362435bc9bae57f679a7158e0096ad2beb427b8696"}, + {file = "pillow-10.4.0-cp311-cp311-win32.whl", hash = "sha256:7086cc1d5eebb91ad24ded9f58bec6c688e9f0ed7eb3dbbf1e4800280a896496"}, + {file = "pillow-10.4.0-cp311-cp311-win_amd64.whl", hash = "sha256:cbed61494057c0f83b83eb3a310f0bf774b09513307c434d4366ed64f4128a91"}, + {file = "pillow-10.4.0-cp311-cp311-win_arm64.whl", hash = "sha256:f5f0c3e969c8f12dd2bb7e0b15d5c468b51e5017e01e2e867335c81903046a22"}, + {file = "pillow-10.4.0-cp312-cp312-macosx_10_10_x86_64.whl", hash = "sha256:673655af3eadf4df6b5457033f086e90299fdd7a47983a13827acf7459c15d94"}, + {file = "pillow-10.4.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:866b6942a92f56300012f5fbac71f2d610312ee65e22f1aa2609e491284e5597"}, + {file = "pillow-10.4.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:29dbdc4207642ea6aad70fbde1a9338753d33fb23ed6956e706936706f52dd80"}, + {file = "pillow-10.4.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bf2342ac639c4cf38799a44950bbc2dfcb685f052b9e262f446482afaf4bffca"}, + {file = "pillow-10.4.0-cp312-cp312-manylinux_2_28_aarch64.whl", hash = "sha256:f5b92f4d70791b4a67157321c4e8225d60b119c5cc9aee8ecf153aace4aad4ef"}, + {file = "pillow-10.4.0-cp312-cp312-manylinux_2_28_x86_64.whl", hash = "sha256:86dcb5a1eb778d8b25659d5e4341269e8590ad6b4e8b44d9f4b07f8d136c414a"}, + {file = "pillow-10.4.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:780c072c2e11c9b2c7ca37f9a2ee8ba66f44367ac3e5c7832afcfe5104fd6d1b"}, + {file = "pillow-10.4.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:37fb69d905be665f68f28a8bba3c6d3223c8efe1edf14cc4cfa06c241f8c81d9"}, + {file = "pillow-10.4.0-cp312-cp312-win32.whl", hash = "sha256:7dfecdbad5c301d7b5bde160150b4db4c659cee2b69589705b6f8a0c509d9f42"}, + {file = "pillow-10.4.0-cp312-cp312-win_amd64.whl", hash = "sha256:1d846aea995ad352d4bdcc847535bd56e0fd88d36829d2c90be880ef1ee4668a"}, + {file = "pillow-10.4.0-cp312-cp312-win_arm64.whl", hash = "sha256:e553cad5179a66ba15bb18b353a19020e73a7921296a7979c4a2b7f6a5cd57f9"}, + {file = "pillow-10.4.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:8bc1a764ed8c957a2e9cacf97c8b2b053b70307cf2996aafd70e91a082e70df3"}, + {file = "pillow-10.4.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:6209bb41dc692ddfee4942517c19ee81b86c864b626dbfca272ec0f7cff5d9fb"}, + {file = "pillow-10.4.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bee197b30783295d2eb680b311af15a20a8b24024a19c3a26431ff83eb8d1f70"}, + {file = "pillow-10.4.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1ef61f5dd14c300786318482456481463b9d6b91ebe5ef12f405afbba77ed0be"}, + {file = "pillow-10.4.0-cp313-cp313-manylinux_2_28_aarch64.whl", hash = "sha256:297e388da6e248c98bc4a02e018966af0c5f92dfacf5a5ca22fa01cb3179bca0"}, + {file = "pillow-10.4.0-cp313-cp313-manylinux_2_28_x86_64.whl", hash = "sha256:e4db64794ccdf6cb83a59d73405f63adbe2a1887012e308828596100a0b2f6cc"}, + {file = "pillow-10.4.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:bd2880a07482090a3bcb01f4265f1936a903d70bc740bfcb1fd4e8a2ffe5cf5a"}, + {file = "pillow-10.4.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:4b35b21b819ac1dbd1233317adeecd63495f6babf21b7b2512d244ff6c6ce309"}, + {file = "pillow-10.4.0-cp313-cp313-win32.whl", hash = "sha256:551d3fd6e9dc15e4c1eb6fc4ba2b39c0c7933fa113b220057a34f4bb3268a060"}, + {file = "pillow-10.4.0-cp313-cp313-win_amd64.whl", hash = "sha256:030abdbe43ee02e0de642aee345efa443740aa4d828bfe8e2eb11922ea6a21ea"}, + {file = "pillow-10.4.0-cp313-cp313-win_arm64.whl", hash = "sha256:5b001114dd152cfd6b23befeb28d7aee43553e2402c9f159807bf55f33af8a8d"}, + {file = "pillow-10.4.0-cp38-cp38-macosx_10_10_x86_64.whl", hash = "sha256:8d4d5063501b6dd4024b8ac2f04962d661222d120381272deea52e3fc52d3736"}, + {file = "pillow-10.4.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:7c1ee6f42250df403c5f103cbd2768a28fe1a0ea1f0f03fe151c8741e1469c8b"}, + {file = "pillow-10.4.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b15e02e9bb4c21e39876698abf233c8c579127986f8207200bc8a8f6bb27acf2"}, + {file = "pillow-10.4.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7a8d4bade9952ea9a77d0c3e49cbd8b2890a399422258a77f357b9cc9be8d680"}, + {file = "pillow-10.4.0-cp38-cp38-manylinux_2_28_aarch64.whl", hash = "sha256:43efea75eb06b95d1631cb784aa40156177bf9dd5b4b03ff38979e048258bc6b"}, + {file = "pillow-10.4.0-cp38-cp38-manylinux_2_28_x86_64.whl", hash = "sha256:950be4d8ba92aca4b2bb0741285a46bfae3ca699ef913ec8416c1b78eadd64cd"}, + {file = "pillow-10.4.0-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:d7480af14364494365e89d6fddc510a13e5a2c3584cb19ef65415ca57252fb84"}, + {file = "pillow-10.4.0-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:73664fe514b34c8f02452ffb73b7a92c6774e39a647087f83d67f010eb9a0cf0"}, + {file = "pillow-10.4.0-cp38-cp38-win32.whl", hash = "sha256:e88d5e6ad0d026fba7bdab8c3f225a69f063f116462c49892b0149e21b6c0a0e"}, + {file = "pillow-10.4.0-cp38-cp38-win_amd64.whl", hash = "sha256:5161eef006d335e46895297f642341111945e2c1c899eb406882a6c61a4357ab"}, + {file = "pillow-10.4.0-cp39-cp39-macosx_10_10_x86_64.whl", hash = "sha256:0ae24a547e8b711ccaaf99c9ae3cd975470e1a30caa80a6aaee9a2f19c05701d"}, + {file = "pillow-10.4.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:298478fe4f77a4408895605f3482b6cc6222c018b2ce565c2b6b9c354ac3229b"}, + {file = "pillow-10.4.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:134ace6dc392116566980ee7436477d844520a26a4b1bd4053f6f47d096997fd"}, + {file = "pillow-10.4.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:930044bb7679ab003b14023138b50181899da3f25de50e9dbee23b61b4de2126"}, + {file = "pillow-10.4.0-cp39-cp39-manylinux_2_28_aarch64.whl", hash = "sha256:c76e5786951e72ed3686e122d14c5d7012f16c8303a674d18cdcd6d89557fc5b"}, + {file = "pillow-10.4.0-cp39-cp39-manylinux_2_28_x86_64.whl", hash = "sha256:b2724fdb354a868ddf9a880cb84d102da914e99119211ef7ecbdc613b8c96b3c"}, + {file = "pillow-10.4.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:dbc6ae66518ab3c5847659e9988c3b60dc94ffb48ef9168656e0019a93dbf8a1"}, + {file = "pillow-10.4.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:06b2f7898047ae93fad74467ec3d28fe84f7831370e3c258afa533f81ef7f3df"}, + {file = "pillow-10.4.0-cp39-cp39-win32.whl", hash = "sha256:7970285ab628a3779aecc35823296a7869f889b8329c16ad5a71e4901a3dc4ef"}, + {file = "pillow-10.4.0-cp39-cp39-win_amd64.whl", hash = "sha256:961a7293b2457b405967af9c77dcaa43cc1a8cd50d23c532e62d48ab6cdd56f5"}, + {file = "pillow-10.4.0-cp39-cp39-win_arm64.whl", hash = "sha256:32cda9e3d601a52baccb2856b8ea1fc213c90b340c542dcef77140dfa3278a9e"}, + {file = "pillow-10.4.0-pp310-pypy310_pp73-macosx_10_15_x86_64.whl", hash = "sha256:5b4815f2e65b30f5fbae9dfffa8636d992d49705723fe86a3661806e069352d4"}, + {file = "pillow-10.4.0-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:8f0aef4ef59694b12cadee839e2ba6afeab89c0f39a3adc02ed51d109117b8da"}, + {file = "pillow-10.4.0-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9f4727572e2918acaa9077c919cbbeb73bd2b3ebcfe033b72f858fc9fbef0026"}, + {file = "pillow-10.4.0-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ff25afb18123cea58a591ea0244b92eb1e61a1fd497bf6d6384f09bc3262ec3e"}, + {file = "pillow-10.4.0-pp310-pypy310_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:dc3e2db6ba09ffd7d02ae9141cfa0ae23393ee7687248d46a7507b75d610f4f5"}, + {file = "pillow-10.4.0-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:02a2be69f9c9b8c1e97cf2713e789d4e398c751ecfd9967c18d0ce304efbf885"}, + {file = "pillow-10.4.0-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:0755ffd4a0c6f267cccbae2e9903d95477ca2f77c4fcf3a3a09570001856c8a5"}, + {file = "pillow-10.4.0-pp39-pypy39_pp73-macosx_10_15_x86_64.whl", hash = "sha256:a02364621fe369e06200d4a16558e056fe2805d3468350df3aef21e00d26214b"}, + {file = "pillow-10.4.0-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:1b5dea9831a90e9d0721ec417a80d4cbd7022093ac38a568db2dd78363b00908"}, + {file = "pillow-10.4.0-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9b885f89040bb8c4a1573566bbb2f44f5c505ef6e74cec7ab9068c900047f04b"}, + {file = "pillow-10.4.0-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:87dd88ded2e6d74d31e1e0a99a726a6765cda32d00ba72dc37f0651f306daaa8"}, + {file = "pillow-10.4.0-pp39-pypy39_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:2db98790afc70118bd0255c2eeb465e9767ecf1f3c25f9a1abb8ffc8cfd1fe0a"}, + {file = "pillow-10.4.0-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:f7baece4ce06bade126fb84b8af1c33439a76d8a6fd818970215e0560ca28c27"}, + {file = "pillow-10.4.0-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:cfdd747216947628af7b259d274771d84db2268ca062dd5faf373639d00113a3"}, + {file = "pillow-10.4.0.tar.gz", hash = "sha256:166c1cd4d24309b30d61f79f4a9114b7b2313d7450912277855ff5dfd7cd4a06"}, ] [package.extras] -docs = ["furo", "olefile", "sphinx (>=2.4)", "sphinx-copybutton", "sphinx-inline-tabs", "sphinx-removed-in", "sphinxext-opengraph"] +docs = ["furo", "olefile", "sphinx (>=7.3)", "sphinx-copybutton", "sphinx-inline-tabs", "sphinxext-opengraph"] fpx = ["olefile"] mic = ["olefile"] tests = ["check-manifest", "coverage", "defusedxml", "markdown2", "olefile", "packaging", "pyroma", "pytest", "pytest-cov", "pytest-timeout"] @@ -4465,109 +4476,121 @@ files = [ [[package]] name = "pydantic" -version = "2.7.4" +version = "2.8.0" description = "Data validation using Python type hints" optional = false python-versions = ">=3.8" files = [ - {file = "pydantic-2.7.4-py3-none-any.whl", hash = "sha256:ee8538d41ccb9c0a9ad3e0e5f07bf15ed8015b481ced539a1759d8cc89ae90d0"}, - {file = "pydantic-2.7.4.tar.gz", hash = "sha256:0c84efd9548d545f63ac0060c1e4d39bb9b14db8b3c0652338aecc07b5adec52"}, + {file = "pydantic-2.8.0-py3-none-any.whl", hash = "sha256:ead4f3a1e92386a734ca1411cb25d94147cf8778ed5be6b56749047676d6364e"}, + {file = "pydantic-2.8.0.tar.gz", hash = "sha256:d970ffb9d030b710795878940bd0489842c638e7252fc4a19c3ae2f7da4d6141"}, ] [package.dependencies] annotated-types = ">=0.4.0" -pydantic-core = "2.18.4" -typing-extensions = ">=4.6.1" +pydantic-core = "2.20.0" +typing-extensions = [ + {version = ">=4.6.1", markers = "python_version < \"3.13\""}, + {version = ">=4.12.2", markers = "python_version >= \"3.13\""}, +] [package.extras] email = ["email-validator (>=2.0.0)"] [[package]] name = "pydantic-core" -version = "2.18.4" +version = "2.20.0" description = "Core functionality for Pydantic validation and serialization" optional = false python-versions = ">=3.8" files = [ - {file = "pydantic_core-2.18.4-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:f76d0ad001edd426b92233d45c746fd08f467d56100fd8f30e9ace4b005266e4"}, - {file = "pydantic_core-2.18.4-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:59ff3e89f4eaf14050c8022011862df275b552caef8082e37b542b066ce1ff26"}, - {file = "pydantic_core-2.18.4-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a55b5b16c839df1070bc113c1f7f94a0af4433fcfa1b41799ce7606e5c79ce0a"}, - {file = "pydantic_core-2.18.4-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:4d0dcc59664fcb8974b356fe0a18a672d6d7cf9f54746c05f43275fc48636851"}, - {file = "pydantic_core-2.18.4-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8951eee36c57cd128f779e641e21eb40bc5073eb28b2d23f33eb0ef14ffb3f5d"}, - {file = "pydantic_core-2.18.4-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4701b19f7e3a06ea655513f7938de6f108123bf7c86bbebb1196eb9bd35cf724"}, - {file = "pydantic_core-2.18.4-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e00a3f196329e08e43d99b79b286d60ce46bed10f2280d25a1718399457e06be"}, - {file = "pydantic_core-2.18.4-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:97736815b9cc893b2b7f663628e63f436018b75f44854c8027040e05230eeddb"}, - {file = "pydantic_core-2.18.4-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:6891a2ae0e8692679c07728819b6e2b822fb30ca7445f67bbf6509b25a96332c"}, - {file = "pydantic_core-2.18.4-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:bc4ff9805858bd54d1a20efff925ccd89c9d2e7cf4986144b30802bf78091c3e"}, - {file = "pydantic_core-2.18.4-cp310-none-win32.whl", hash = "sha256:1b4de2e51bbcb61fdebd0ab86ef28062704f62c82bbf4addc4e37fa4b00b7cbc"}, - {file = "pydantic_core-2.18.4-cp310-none-win_amd64.whl", hash = "sha256:6a750aec7bf431517a9fd78cb93c97b9b0c496090fee84a47a0d23668976b4b0"}, - {file = "pydantic_core-2.18.4-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:942ba11e7dfb66dc70f9ae66b33452f51ac7bb90676da39a7345e99ffb55402d"}, - {file = "pydantic_core-2.18.4-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:b2ebef0e0b4454320274f5e83a41844c63438fdc874ea40a8b5b4ecb7693f1c4"}, - {file = "pydantic_core-2.18.4-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a642295cd0c8df1b86fc3dced1d067874c353a188dc8e0f744626d49e9aa51c4"}, - {file = "pydantic_core-2.18.4-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:5f09baa656c904807e832cf9cce799c6460c450c4ad80803517032da0cd062e2"}, - {file = "pydantic_core-2.18.4-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:98906207f29bc2c459ff64fa007afd10a8c8ac080f7e4d5beff4c97086a3dabd"}, - {file = "pydantic_core-2.18.4-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:19894b95aacfa98e7cb093cd7881a0c76f55731efad31073db4521e2b6ff5b7d"}, - {file = "pydantic_core-2.18.4-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0fbbdc827fe5e42e4d196c746b890b3d72876bdbf160b0eafe9f0334525119c8"}, - {file = "pydantic_core-2.18.4-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:f85d05aa0918283cf29a30b547b4df2fbb56b45b135f9e35b6807cb28bc47951"}, - {file = "pydantic_core-2.18.4-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:e85637bc8fe81ddb73fda9e56bab24560bdddfa98aa64f87aaa4e4b6730c23d2"}, - {file = "pydantic_core-2.18.4-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:2f5966897e5461f818e136b8451d0551a2e77259eb0f73a837027b47dc95dab9"}, - {file = "pydantic_core-2.18.4-cp311-none-win32.whl", hash = "sha256:44c7486a4228413c317952e9d89598bcdfb06399735e49e0f8df643e1ccd0558"}, - {file = "pydantic_core-2.18.4-cp311-none-win_amd64.whl", hash = "sha256:8a7164fe2005d03c64fd3b85649891cd4953a8de53107940bf272500ba8a788b"}, - {file = "pydantic_core-2.18.4-cp311-none-win_arm64.whl", hash = "sha256:4e99bc050fe65c450344421017f98298a97cefc18c53bb2f7b3531eb39bc7805"}, - {file = "pydantic_core-2.18.4-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:6f5c4d41b2771c730ea1c34e458e781b18cc668d194958e0112455fff4e402b2"}, - {file = "pydantic_core-2.18.4-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:2fdf2156aa3d017fddf8aea5adfba9f777db1d6022d392b682d2a8329e087cef"}, - {file = "pydantic_core-2.18.4-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4748321b5078216070b151d5271ef3e7cc905ab170bbfd27d5c83ee3ec436695"}, - {file = "pydantic_core-2.18.4-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:847a35c4d58721c5dc3dba599878ebbdfd96784f3fb8bb2c356e123bdcd73f34"}, - {file = "pydantic_core-2.18.4-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3c40d4eaad41f78e3bbda31b89edc46a3f3dc6e171bf0ecf097ff7a0ffff7cb1"}, - {file = "pydantic_core-2.18.4-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:21a5e440dbe315ab9825fcd459b8814bb92b27c974cbc23c3e8baa2b76890077"}, - {file = "pydantic_core-2.18.4-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:01dd777215e2aa86dfd664daed5957704b769e726626393438f9c87690ce78c3"}, - {file = "pydantic_core-2.18.4-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:4b06beb3b3f1479d32befd1f3079cc47b34fa2da62457cdf6c963393340b56e9"}, - {file = "pydantic_core-2.18.4-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:564d7922e4b13a16b98772441879fcdcbe82ff50daa622d681dd682175ea918c"}, - {file = "pydantic_core-2.18.4-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:0eb2a4f660fcd8e2b1c90ad566db2b98d7f3f4717c64fe0a83e0adb39766d5b8"}, - {file = "pydantic_core-2.18.4-cp312-none-win32.whl", hash = "sha256:8b8bab4c97248095ae0c4455b5a1cd1cdd96e4e4769306ab19dda135ea4cdb07"}, - {file = "pydantic_core-2.18.4-cp312-none-win_amd64.whl", hash = "sha256:14601cdb733d741b8958224030e2bfe21a4a881fb3dd6fbb21f071cabd48fa0a"}, - {file = "pydantic_core-2.18.4-cp312-none-win_arm64.whl", hash = "sha256:c1322d7dd74713dcc157a2b7898a564ab091ca6c58302d5c7b4c07296e3fd00f"}, - {file = "pydantic_core-2.18.4-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:823be1deb01793da05ecb0484d6c9e20baebb39bd42b5d72636ae9cf8350dbd2"}, - {file = "pydantic_core-2.18.4-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:ebef0dd9bf9b812bf75bda96743f2a6c5734a02092ae7f721c048d156d5fabae"}, - {file = "pydantic_core-2.18.4-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ae1d6df168efb88d7d522664693607b80b4080be6750c913eefb77e34c12c71a"}, - {file = "pydantic_core-2.18.4-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:f9899c94762343f2cc2fc64c13e7cae4c3cc65cdfc87dd810a31654c9b7358cc"}, - {file = "pydantic_core-2.18.4-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:99457f184ad90235cfe8461c4d70ab7dd2680e28821c29eca00252ba90308c78"}, - {file = "pydantic_core-2.18.4-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:18f469a3d2a2fdafe99296a87e8a4c37748b5080a26b806a707f25a902c040a8"}, - {file = "pydantic_core-2.18.4-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b7cdf28938ac6b8b49ae5e92f2735056a7ba99c9b110a474473fd71185c1af5d"}, - {file = "pydantic_core-2.18.4-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:938cb21650855054dc54dfd9120a851c974f95450f00683399006aa6e8abb057"}, - {file = "pydantic_core-2.18.4-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:44cd83ab6a51da80fb5adbd9560e26018e2ac7826f9626bc06ca3dc074cd198b"}, - {file = "pydantic_core-2.18.4-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:972658f4a72d02b8abfa2581d92d59f59897d2e9f7e708fdabe922f9087773af"}, - {file = "pydantic_core-2.18.4-cp38-none-win32.whl", hash = "sha256:1d886dc848e60cb7666f771e406acae54ab279b9f1e4143babc9c2258213daa2"}, - {file = "pydantic_core-2.18.4-cp38-none-win_amd64.whl", hash = "sha256:bb4462bd43c2460774914b8525f79b00f8f407c945d50881568f294c1d9b4443"}, - {file = "pydantic_core-2.18.4-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:44a688331d4a4e2129140a8118479443bd6f1905231138971372fcde37e43528"}, - {file = "pydantic_core-2.18.4-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:a2fdd81edd64342c85ac7cf2753ccae0b79bf2dfa063785503cb85a7d3593223"}, - {file = "pydantic_core-2.18.4-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:86110d7e1907ab36691f80b33eb2da87d780f4739ae773e5fc83fb272f88825f"}, - {file = "pydantic_core-2.18.4-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:46387e38bd641b3ee5ce247563b60c5ca098da9c56c75c157a05eaa0933ed154"}, - {file = "pydantic_core-2.18.4-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:123c3cec203e3f5ac7b000bd82235f1a3eced8665b63d18be751f115588fea30"}, - {file = "pydantic_core-2.18.4-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:dc1803ac5c32ec324c5261c7209e8f8ce88e83254c4e1aebdc8b0a39f9ddb443"}, - {file = "pydantic_core-2.18.4-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:53db086f9f6ab2b4061958d9c276d1dbe3690e8dd727d6abf2321d6cce37fa94"}, - {file = "pydantic_core-2.18.4-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:abc267fa9837245cc28ea6929f19fa335f3dc330a35d2e45509b6566dc18be23"}, - {file = "pydantic_core-2.18.4-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:a0d829524aaefdebccb869eed855e2d04c21d2d7479b6cada7ace5448416597b"}, - {file = "pydantic_core-2.18.4-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:509daade3b8649f80d4e5ff21aa5673e4ebe58590b25fe42fac5f0f52c6f034a"}, - {file = "pydantic_core-2.18.4-cp39-none-win32.whl", hash = "sha256:ca26a1e73c48cfc54c4a76ff78df3727b9d9f4ccc8dbee4ae3f73306a591676d"}, - {file = "pydantic_core-2.18.4-cp39-none-win_amd64.whl", hash = "sha256:c67598100338d5d985db1b3d21f3619ef392e185e71b8d52bceacc4a7771ea7e"}, - {file = "pydantic_core-2.18.4-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:574d92eac874f7f4db0ca653514d823a0d22e2354359d0759e3f6a406db5d55d"}, - {file = "pydantic_core-2.18.4-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:1f4d26ceb5eb9eed4af91bebeae4b06c3fb28966ca3a8fb765208cf6b51102ab"}, - {file = "pydantic_core-2.18.4-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:77450e6d20016ec41f43ca4a6c63e9fdde03f0ae3fe90e7c27bdbeaece8b1ed4"}, - {file = "pydantic_core-2.18.4-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d323a01da91851a4f17bf592faf46149c9169d68430b3146dcba2bb5e5719abc"}, - {file = "pydantic_core-2.18.4-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:43d447dd2ae072a0065389092a231283f62d960030ecd27565672bd40746c507"}, - {file = "pydantic_core-2.18.4-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:578e24f761f3b425834f297b9935e1ce2e30f51400964ce4801002435a1b41ef"}, - {file = "pydantic_core-2.18.4-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:81b5efb2f126454586d0f40c4d834010979cb80785173d1586df845a632e4e6d"}, - {file = "pydantic_core-2.18.4-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:ab86ce7c8f9bea87b9d12c7f0af71102acbf5ecbc66c17796cff45dae54ef9a5"}, - {file = "pydantic_core-2.18.4-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:90afc12421df2b1b4dcc975f814e21bc1754640d502a2fbcc6d41e77af5ec312"}, - {file = "pydantic_core-2.18.4-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:51991a89639a912c17bef4b45c87bd83593aee0437d8102556af4885811d59f5"}, - {file = "pydantic_core-2.18.4-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:293afe532740370aba8c060882f7d26cfd00c94cae32fd2e212a3a6e3b7bc15e"}, - {file = "pydantic_core-2.18.4-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b48ece5bde2e768197a2d0f6e925f9d7e3e826f0ad2271120f8144a9db18d5c8"}, - {file = "pydantic_core-2.18.4-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:eae237477a873ab46e8dd748e515c72c0c804fb380fbe6c85533c7de51f23a8f"}, - {file = "pydantic_core-2.18.4-pp39-pypy39_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:834b5230b5dfc0c1ec37b2fda433b271cbbc0e507560b5d1588e2cc1148cf1ce"}, - {file = "pydantic_core-2.18.4-pp39-pypy39_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:e858ac0a25074ba4bce653f9b5d0a85b7456eaddadc0ce82d3878c22489fa4ee"}, - {file = "pydantic_core-2.18.4-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:2fd41f6eff4c20778d717af1cc50eca52f5afe7805ee530a4fbd0bae284f16e9"}, - {file = "pydantic_core-2.18.4.tar.gz", hash = "sha256:ec3beeada09ff865c344ff3bc2f427f5e6c26401cc6113d77e372c3fdac73864"}, + {file = "pydantic_core-2.20.0-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:e9dcd7fb34f7bfb239b5fa420033642fff0ad676b765559c3737b91f664d4fa9"}, + {file = "pydantic_core-2.20.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:649a764d9b0da29816889424697b2a3746963ad36d3e0968784ceed6e40c6355"}, + {file = "pydantic_core-2.20.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7701df088d0b05f3460f7ba15aec81ac8b0fb5690367dfd072a6c38cf5b7fdb5"}, + {file = "pydantic_core-2.20.0-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:ab760f17c3e792225cdaef31ca23c0aea45c14ce80d8eff62503f86a5ab76bff"}, + {file = "pydantic_core-2.20.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:cb1ad5b4d73cde784cf64580166568074f5ccd2548d765e690546cff3d80937d"}, + {file = "pydantic_core-2.20.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b81ec2efc04fc1dbf400647d4357d64fb25543bae38d2d19787d69360aad21c9"}, + {file = "pydantic_core-2.20.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c4a9732a5cad764ba37f3aa873dccb41b584f69c347a57323eda0930deec8e10"}, + {file = "pydantic_core-2.20.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:6dc85b9e10cc21d9c1055f15684f76fa4facadddcb6cd63abab702eb93c98943"}, + {file = "pydantic_core-2.20.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:21d9f7e24f63fdc7118e6cc49defaab8c1d27570782f7e5256169d77498cf7c7"}, + {file = "pydantic_core-2.20.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:8b315685832ab9287e6124b5d74fc12dda31e6421d7f6b08525791452844bc2d"}, + {file = "pydantic_core-2.20.0-cp310-none-win32.whl", hash = "sha256:c3dc8ec8b87c7ad534c75b8855168a08a7036fdb9deeeed5705ba9410721c84d"}, + {file = "pydantic_core-2.20.0-cp310-none-win_amd64.whl", hash = "sha256:85770b4b37bb36ef93a6122601795231225641003e0318d23c6233c59b424279"}, + {file = "pydantic_core-2.20.0-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:58e251bb5a5998f7226dc90b0b753eeffa720bd66664eba51927c2a7a2d5f32c"}, + {file = "pydantic_core-2.20.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:78d584caac52c24240ef9ecd75de64c760bbd0e20dbf6973631815e3ef16ef8b"}, + {file = "pydantic_core-2.20.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5084ec9721f82bef5ff7c4d1ee65e1626783abb585f8c0993833490b63fe1792"}, + {file = "pydantic_core-2.20.0-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:6d0f52684868db7c218437d260e14d37948b094493f2646f22d3dda7229bbe3f"}, + {file = "pydantic_core-2.20.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1def125d59a87fe451212a72ab9ed34c118ff771e5473fef4f2f95d8ede26d75"}, + {file = "pydantic_core-2.20.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b34480fd6778ab356abf1e9086a4ced95002a1e195e8d2fd182b0def9d944d11"}, + {file = "pydantic_core-2.20.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d42669d319db366cb567c3b444f43caa7ffb779bf9530692c6f244fc635a41eb"}, + {file = "pydantic_core-2.20.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:53b06aea7a48919a254b32107647be9128c066aaa6ee6d5d08222325f25ef175"}, + {file = "pydantic_core-2.20.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:1f038156b696a1c39d763b2080aeefa87ddb4162c10aa9fabfefffc3dd8180fa"}, + {file = "pydantic_core-2.20.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:3f0f3a4a23717280a5ee3ac4fb1f81d6fde604c9ec5100f7f6f987716bb8c137"}, + {file = "pydantic_core-2.20.0-cp311-none-win32.whl", hash = "sha256:316fe7c3fec017affd916a0c83d6f1ec697cbbbdf1124769fa73328e7907cc2e"}, + {file = "pydantic_core-2.20.0-cp311-none-win_amd64.whl", hash = "sha256:2d06a7fa437f93782e3f32d739c3ec189f82fca74336c08255f9e20cea1ed378"}, + {file = "pydantic_core-2.20.0-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:d6f8c49657f3eb7720ed4c9b26624063da14937fc94d1812f1e04a2204db3e17"}, + {file = "pydantic_core-2.20.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:ad1bd2f377f56fec11d5cfd0977c30061cd19f4fa199bf138b200ec0d5e27eeb"}, + {file = "pydantic_core-2.20.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ed741183719a5271f97d93bbcc45ed64619fa38068aaa6e90027d1d17e30dc8d"}, + {file = "pydantic_core-2.20.0-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:d82e5ed3a05f2dcb89c6ead2fd0dbff7ac09bc02c1b4028ece2d3a3854d049ce"}, + {file = "pydantic_core-2.20.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b2ba34a099576234671f2e4274e5bc6813b22e28778c216d680eabd0db3f7dad"}, + {file = "pydantic_core-2.20.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:879ae6bb08a063b3e1b7ac8c860096d8fd6b48dd9b2690b7f2738b8c835e744b"}, + {file = "pydantic_core-2.20.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0b0eefc7633a04c0694340aad91fbfd1986fe1a1e0c63a22793ba40a18fcbdc8"}, + {file = "pydantic_core-2.20.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:73deadd6fd8a23e2f40b412b3ac617a112143c8989a4fe265050fd91ba5c0608"}, + {file = "pydantic_core-2.20.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:35681445dc85446fb105943d81ae7569aa7e89de80d1ca4ac3229e05c311bdb1"}, + {file = "pydantic_core-2.20.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:0f6dd3612a3b9f91f2e63924ea18a4476656c6d01843ca20a4c09e00422195af"}, + {file = "pydantic_core-2.20.0-cp312-none-win32.whl", hash = "sha256:7e37b6bb6e90c2b8412b06373c6978d9d81e7199a40e24a6ef480e8acdeaf918"}, + {file = "pydantic_core-2.20.0-cp312-none-win_amd64.whl", hash = "sha256:7d4df13d1c55e84351fab51383520b84f490740a9f1fec905362aa64590b7a5d"}, + {file = "pydantic_core-2.20.0-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:d43e7ab3b65e4dc35a7612cfff7b0fd62dce5bc11a7cd198310b57f39847fd6c"}, + {file = "pydantic_core-2.20.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7b6a24d7b5893392f2b8e3b7a0031ae3b14c6c1942a4615f0d8794fdeeefb08b"}, + {file = "pydantic_core-2.20.0-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:b2f13c3e955a087c3ec86f97661d9f72a76e221281b2262956af381224cfc243"}, + {file = "pydantic_core-2.20.0-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:72432fd6e868c8d0a6849869e004b8bcae233a3c56383954c228316694920b38"}, + {file = "pydantic_core-2.20.0-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d70a8ff2d4953afb4cbe6211f17268ad29c0b47e73d3372f40e7775904bc28fc"}, + {file = "pydantic_core-2.20.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8e49524917b8d3c2f42cd0d2df61178e08e50f5f029f9af1f402b3ee64574392"}, + {file = "pydantic_core-2.20.0-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:a4f0f71653b1c1bad0350bc0b4cc057ab87b438ff18fa6392533811ebd01439c"}, + {file = "pydantic_core-2.20.0-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:16197e6f4fdecb9892ed2436e507e44f0a1aa2cff3b9306d1c879ea2f9200997"}, + {file = "pydantic_core-2.20.0-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:763602504bf640b3ded3bba3f8ed8a1cc2fc6a87b8d55c1c5689f428c49c947e"}, + {file = "pydantic_core-2.20.0-cp313-none-win32.whl", hash = "sha256:a3f243f318bd9523277fa123b3163f4c005a3e8619d4b867064de02f287a564d"}, + {file = "pydantic_core-2.20.0-cp313-none-win_amd64.whl", hash = "sha256:03aceaf6a5adaad3bec2233edc5a7905026553916615888e53154807e404545c"}, + {file = "pydantic_core-2.20.0-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:d6f2d8b8da1f03f577243b07bbdd3412eee3d37d1f2fd71d1513cbc76a8c1239"}, + {file = "pydantic_core-2.20.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:a272785a226869416c6b3c1b7e450506152d3844207331f02f27173562c917e0"}, + {file = "pydantic_core-2.20.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:efbb412d55a4ffe73963fed95c09ccb83647ec63b711c4b3752be10a56f0090b"}, + {file = "pydantic_core-2.20.0-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:1e4f46189d8740561b43655263a41aac75ff0388febcb2c9ec4f1b60a0ec12f3"}, + {file = "pydantic_core-2.20.0-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:87d3df115f4a3c8c5e4d5acf067d399c6466d7e604fc9ee9acbe6f0c88a0c3cf"}, + {file = "pydantic_core-2.20.0-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a340d2bdebe819d08f605e9705ed551c3feb97e4fd71822d7147c1e4bdbb9508"}, + {file = "pydantic_core-2.20.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:616b9c2f882393d422ba11b40e72382fe975e806ad693095e9a3b67c59ea6150"}, + {file = "pydantic_core-2.20.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:25c46bb2ff6084859bbcfdf4f1a63004b98e88b6d04053e8bf324e115398e9e7"}, + {file = "pydantic_core-2.20.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:23425eccef8f2c342f78d3a238c824623836c6c874d93c726673dbf7e56c78c0"}, + {file = "pydantic_core-2.20.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:52527e8f223ba29608d999d65b204676398009725007c9336651c2ec2d93cffc"}, + {file = "pydantic_core-2.20.0-cp38-none-win32.whl", hash = "sha256:1c3c5b7f70dd19a6845292b0775295ea81c61540f68671ae06bfe4421b3222c2"}, + {file = "pydantic_core-2.20.0-cp38-none-win_amd64.whl", hash = "sha256:8093473d7b9e908af1cef30025609afc8f5fd2a16ff07f97440fd911421e4432"}, + {file = "pydantic_core-2.20.0-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:ee7785938e407418795e4399b2bf5b5f3cf6cf728077a7f26973220d58d885cf"}, + {file = "pydantic_core-2.20.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:0e75794883d635071cf6b4ed2a5d7a1e50672ab7a051454c76446ef1ebcdcc91"}, + {file = "pydantic_core-2.20.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:344e352c96e53b4f56b53d24728217c69399b8129c16789f70236083c6ceb2ac"}, + {file = "pydantic_core-2.20.0-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:978d4123ad1e605daf1ba5e01d4f235bcf7b6e340ef07e7122e8e9cfe3eb61ab"}, + {file = "pydantic_core-2.20.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3c05eaf6c863781eb834ab41f5963604ab92855822a2062897958089d1335dad"}, + {file = "pydantic_core-2.20.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:bc7e43b4a528ffca8c9151b6a2ca34482c2fdc05e6aa24a84b7f475c896fc51d"}, + {file = "pydantic_core-2.20.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:658287a29351166510ebbe0a75c373600cc4367a3d9337b964dada8d38bcc0f4"}, + {file = "pydantic_core-2.20.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:1dacf660d6de692fe351e8c806e7efccf09ee5184865893afbe8e59be4920b4a"}, + {file = "pydantic_core-2.20.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:3e147fc6e27b9a487320d78515c5f29798b539179f7777018cedf51b7749e4f4"}, + {file = "pydantic_core-2.20.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:c867230d715a3dd1d962c8d9bef0d3168994ed663e21bf748b6e3a529a129aab"}, + {file = "pydantic_core-2.20.0-cp39-none-win32.whl", hash = "sha256:22b813baf0dbf612752d8143a2dbf8e33ccb850656b7850e009bad2e101fc377"}, + {file = "pydantic_core-2.20.0-cp39-none-win_amd64.whl", hash = "sha256:3a7235b46c1bbe201f09b6f0f5e6c36b16bad3d0532a10493742f91fbdc8035f"}, + {file = "pydantic_core-2.20.0-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:cafde15a6f7feaec2f570646e2ffc5b73412295d29134a29067e70740ec6ee20"}, + {file = "pydantic_core-2.20.0-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:2aec8eeea0b08fd6bc2213d8e86811a07491849fd3d79955b62d83e32fa2ad5f"}, + {file = "pydantic_core-2.20.0-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:840200827984f1c4e114008abc2f5ede362d6e11ed0b5931681884dd41852ff1"}, + {file = "pydantic_core-2.20.0-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f8ea1d8b7df522e5ced34993c423c3bf3735c53df8b2a15688a2f03a7d678800"}, + {file = "pydantic_core-2.20.0-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:d5b8376a867047bf08910573deb95d3c8dfb976eb014ee24f3b5a61ccc5bee1b"}, + {file = "pydantic_core-2.20.0-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:d08264b4460326cefacc179fc1411304d5af388a79910832835e6f641512358b"}, + {file = "pydantic_core-2.20.0-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:7a3639011c2e8a9628466f616ed7fb413f30032b891898e10895a0a8b5857d6c"}, + {file = "pydantic_core-2.20.0-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:05e83ce2f7eba29e627dd8066aa6c4c0269b2d4f889c0eba157233a353053cea"}, + {file = "pydantic_core-2.20.0-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:603a843fea76a595c8f661cd4da4d2281dff1e38c4a836a928eac1a2f8fe88e4"}, + {file = "pydantic_core-2.20.0-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:ac76f30d5d3454f4c28826d891fe74d25121a346c69523c9810ebba43f3b1cec"}, + {file = "pydantic_core-2.20.0-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:22e3b1d4b1b3f6082849f9b28427ef147a5b46a6132a3dbaf9ca1baa40c88609"}, + {file = "pydantic_core-2.20.0-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2761f71faed820e25ec62eacba670d1b5c2709bb131a19fcdbfbb09884593e5a"}, + {file = "pydantic_core-2.20.0-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:a0586cddbf4380e24569b8a05f234e7305717cc8323f50114dfb2051fcbce2a3"}, + {file = "pydantic_core-2.20.0-pp39-pypy39_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:b8c46a8cf53e849eea7090f331ae2202cd0f1ceb090b00f5902c423bd1e11805"}, + {file = "pydantic_core-2.20.0-pp39-pypy39_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:b4a085bd04af7245e140d1b95619fe8abb445a3d7fdf219b3f80c940853268ef"}, + {file = "pydantic_core-2.20.0-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:116b326ac82c8b315e7348390f6d30bcfe6e688a7d3f1de50ff7bcc2042a23c2"}, + {file = "pydantic_core-2.20.0.tar.gz", hash = "sha256:366be8e64e0cb63d87cf79b4e1765c0703dd6313c729b22e7b9e378db6b96877"}, ] [package.dependencies] @@ -5540,18 +5563,18 @@ train = ["accelerate (>=0.20.3)", "datasets"] [[package]] name = "setuptools" -version = "70.1.1" +version = "70.2.0" description = "Easily download, build, install, upgrade, and uninstall Python packages" optional = false python-versions = ">=3.8" files = [ - {file = "setuptools-70.1.1-py3-none-any.whl", hash = "sha256:a58a8fde0541dab0419750bcc521fbdf8585f6e5cb41909df3a472ef7b81ca95"}, - {file = "setuptools-70.1.1.tar.gz", hash = "sha256:937a48c7cdb7a21eb53cd7f9b59e525503aa8abaf3584c730dc5f7a5bec3a650"}, + {file = "setuptools-70.2.0-py3-none-any.whl", hash = "sha256:b8b8060bb426838fbe942479c90296ce976249451118ef566a5a0b7d8b78fb05"}, + {file = "setuptools-70.2.0.tar.gz", hash = "sha256:bd63e505105011b25c3c11f753f7e3b8465ea739efddaccef8f0efac2137bac1"}, ] [package.extras] -docs = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "pygments-github-lexers (==0.0.5)", "pyproject-hooks (!=1.1)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-favicon", "sphinx-inline-tabs", "sphinx-lint", "sphinx-notfound-page (>=1,<2)", "sphinx-reredirects", "sphinxcontrib-towncrier"] -testing = ["build[virtualenv] (>=1.0.3)", "filelock (>=3.4.0)", "importlib-metadata", "ini2toml[lite] (>=0.14)", "jaraco.develop (>=7.21)", "jaraco.envs (>=2.2)", "jaraco.path (>=3.2.0)", "jaraco.test", "mypy (==1.10.0)", "packaging (>=23.2)", "pip (>=19.1)", "pyproject-hooks (!=1.1)", "pytest (>=6,!=8.1.1)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=2.2)", "pytest-home (>=0.5)", "pytest-mypy", "pytest-perf", "pytest-ruff (>=0.3.2)", "pytest-subprocess", "pytest-timeout", "pytest-xdist (>=3)", "tomli", "tomli-w (>=1.0.0)", "virtualenv (>=13.0.0)", "wheel"] +doc = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "pygments-github-lexers (==0.0.5)", "pyproject-hooks (!=1.1)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-favicon", "sphinx-inline-tabs", "sphinx-lint", "sphinx-notfound-page (>=1,<2)", "sphinx-reredirects", "sphinxcontrib-towncrier"] +test = ["build[virtualenv] (>=1.0.3)", "filelock (>=3.4.0)", "importlib-metadata", "ini2toml[lite] (>=0.14)", "jaraco.develop (>=7.21)", "jaraco.envs (>=2.2)", "jaraco.path (>=3.2.0)", "jaraco.test", "mypy (==1.10.0)", "packaging (>=23.2)", "pip (>=19.1)", "pyproject-hooks (!=1.1)", "pytest (>=6,!=8.1.*)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=2.2)", "pytest-home (>=0.5)", "pytest-mypy", "pytest-perf", "pytest-ruff (>=0.3.2)", "pytest-subprocess", "pytest-timeout", "pytest-xdist (>=3)", "tomli", "tomli-w (>=1.0.0)", "virtualenv (>=13.0.0)", "wheel"] [[package]] name = "six" @@ -6864,4 +6887,4 @@ multidict = ">=4.0" [metadata] lock-version = "2.0" python-versions = ">=3.11, <4.0" -content-hash = "0be4e6e956c9ecb269263f17c19dfa5506ae361a3a999c8e11fdae1ad46b3bcf" +content-hash = "91393e3f434457dd547937bf08aa22746cbd99ca66c940e9bc57ac71178b0432" diff --git a/pyproject.toml b/pyproject.toml index e2b148d06..e21433ab1 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -5,14 +5,11 @@ version = "0.1.0" description = "A project to develop and test the lightrag library" authors = ["Your Name "] license = "MIT" - -packages = [ - { include = "lightrag", from = "." } -] +packages = [{ from = "_lightrag", include = "lightrag" }] # empty packages list [tool.poetry.dependencies] python = ">=3.11, <4.0" -lightrag = { path = "./lightrag", develop = true } +lightrag = { path = "lightrag", develop = true } torch = "^2.3.1" flagembedding = "^1.2.10" # cohere = "^5.5.7" diff --git a/use_cases/__init__.py b/use_cases/__init__.py index e69de29bb..d33bab7c7 100644 --- a/use_cases/__init__.py +++ b/use_cases/__init__.py @@ -0,0 +1,3 @@ +from lightrag.utils import setup_env + +setup_env() diff --git a/use_cases/classification/__init__.py b/use_cases/classification/__init__.py new file mode 100644 index 000000000..d33bab7c7 --- /dev/null +++ b/use_cases/classification/__init__.py @@ -0,0 +1,3 @@ +from lightrag.utils import setup_env + +setup_env() diff --git a/use_cases/classification/task.py b/use_cases/classification/task.py index 8d9d97600..79e5754e0 100644 --- a/use_cases/classification/task.py +++ b/use_cases/classification/task.py @@ -1,23 +1,17 @@ from typing import Dict, Any -from dataclasses import dataclass, field +from dataclasses import field import os -from lightrag.utils import setup_env import re -import logging from lightrag.core.component import Component, Sequential, fun_to_component -from lightrag.core.generator import Generator, GeneratorOutput +from lightrag.core.generator import Generator from lightrag.components.model_client import ( GroqAPIClient, - OpenAIClient, - GoogleGenAIClient, - AnthropicAPIClient, ) from lightrag.core.prompt_builder import Prompt from lightrag.components.output_parsers import YamlOutputParser -from lightrag.core.string_parser import JsonParser from lightrag.tracing import trace_generator_states, trace_generator_call @@ -28,7 +22,6 @@ from lightrag.core.base_data_class import DataClass -from use_cases.classification.data import _COARSE_LABELS_DESC, _COARSE_LABELS from use_cases.classification.utils import get_script_dir from use_cases.classification.config_log import log @@ -100,6 +93,27 @@ def get_tracing_path(): return os.path.join(get_script_dir(), "traces") +openai_model_kwargs = { + "model": "gpt-3.5-turbo", + "temperature": 0.0, + "top_p": 1, + "frequency_penalty": 0, + "presence_penalty": 0, + "n": 1, +} # noqa: F841 +google_model_kwargs = { + "model": "gemini-1.5-pro-latest", + "temperature": 0.0, + "top_p": 1, +} # noqa: F841 +anthropic_model_kwargs = { + "model": "claude-3-opus-20240229", + "temperature": 0.0, + "top_p": 1, + "max_tokens": 1024, +} # noqa: F841 + + @trace_generator_states(save_dir=get_tracing_path()) @trace_generator_call(save_dir=get_tracing_path(), error_only=True) class TRECClassifier(Component): @@ -154,25 +168,6 @@ def __init__( "presence_penalty": 0, "n": 1, } - openai_model_kwargs = { - "model": "gpt-3.5-turbo", - "temperature": 0.0, - "top_p": 1, - "frequency_penalty": 0, - "presence_penalty": 0, - "n": 1, - } - google_model_kwargs = { - "model": "gemini-1.5-pro-latest", - "temperature": 0.0, - "top_p": 1, - } - anthropic_model_kwargs = { - "model": "claude-3-opus-20240229", - "temperature": 0.0, - "top_p": 1, - "max_tokens": 1024, - } @fun_to_component def format_class_label(x: Dict[str, Any]) -> int: diff --git a/use_cases/llm_as_retriever.py b/use_cases/llm_as_retriever.py index 451450886..81a44229b 100644 --- a/use_cases/llm_as_retriever.py +++ b/use_cases/llm_as_retriever.py @@ -3,8 +3,6 @@ from lightrag.components.retriever import LLMRetriever from lightrag.components.model_client import OpenAIClient -import utils.setup_env - def test_llm_retriever(): # TODO: directly pass Generator class is more intuitive than the generator_kwargs diff --git a/use_cases/rag_hotpotqa.py b/use_cases/rag_hotpotqa.py index 1ef6a0362..3f651a5da 100644 --- a/use_cases/rag_hotpotqa.py +++ b/use_cases/rag_hotpotqa.py @@ -8,15 +8,13 @@ from lightrag.core.types import Document from lightrag.core.string_parser import JsonParser -from lightrag.core.component import Sequential, Component +from lightrag.core.component import Component from lightrag.eval import ( RetrieverRecall, RetrieverRelevance, AnswerMatchAcc, LLMasJudge, - DEFAULT_LLM_EVALUATOR_PROMPT, ) -from lightrag.core.prompt_builder import Prompt from use_cases.rag import RAG diff --git a/use_cases/simple_qa_anthropic.py b/use_cases/simple_qa_anthropic.py index 2c961d146..632733545 100644 --- a/use_cases/simple_qa_anthropic.py +++ b/use_cases/simple_qa_anthropic.py @@ -7,8 +7,6 @@ from lightrag.components.model_client import AnthropicAPIClient -import lightrag.utils.setup_env - class SimpleQA(Component): def __init__(self): diff --git a/use_cases/simple_qa_google.py b/use_cases/simple_qa_google.py index bc87080cf..c672f079b 100644 --- a/use_cases/simple_qa_google.py +++ b/use_cases/simple_qa_google.py @@ -7,8 +7,6 @@ from lightrag.components.model_client import GoogleGenAIClient -import utils.setup_env - class SimpleQA(Component): def __init__(self): diff --git a/use_cases/simple_qa_groq.py b/use_cases/simple_qa_groq.py index 31fe3120b..ce38fa818 100644 --- a/use_cases/simple_qa_groq.py +++ b/use_cases/simple_qa_groq.py @@ -3,8 +3,6 @@ from lightrag.components.model_client import GroqAPIClient -import utils.setup_env - class SimpleQA(Component): def __init__(self): diff --git a/use_cases/simple_qa_memory.py b/use_cases/simple_qa_memory.py index 3cd2ca75b..ee9464c45 100644 --- a/use_cases/simple_qa_memory.py +++ b/use_cases/simple_qa_memory.py @@ -2,60 +2,56 @@ We just need to very basic generator that can be used to generate text from a prompt. """ -from lightrag.core.generator import Generator -from lightrag.core.component import Component -from lightrag.core.memory import Memory - -from lightrag.components.model_client import OpenAIClient - - -import utils.setup_env - - -class SimpleDialog(Component): - def __init__(self): - super().__init__() - model_kwargs = {"model": "gpt-3.5-turbo"} - task_desc_str = "You are a helpful assistant." - self.generator = Generator( - model_client=OpenAIClient(), - model_kwargs=model_kwargs, - preset_prompt_kwargs={"task_desc_str": task_desc_str}, - ) - self.chat_history = Memory() - self.generator.print_prompt() - - def chat(self) -> str: - print("Welcome to SimpleQA. You can ask any question. Type 'exit' to end.") - while True: - user_input = input("You: ") - # - if user_input.lower() == "exit": - print("Goodbye!") - break - chat_history_str = self.chat_history() - response = self.generator( - prompt_kwargs={ - "chat_history_str": chat_history_str, - "input": user_input, - }, - ) - # save the user input and response to the memory - self.chat_history.add_dialog_turn( - user_query=user_input, assistant_response=response - ) - """ - Most components mush have a __call__ method in order to be chained together with other component in the data pipeline. - From the memory management, it is difficult to just chain them together. - This is similar to the retrieval. This additional step is to manage the exteral db like - data injection. Retrieving can be chained such as we use self.chat_history() to get the chat history. - """ - print(f"Assistant: {response}") - - # a class to have a multiple turns and take user input - - -if __name__ == "__main__": - simple_qa = SimpleDialog() - print(simple_qa) - print(simple_qa.chat()) +# from lightrag.core.component import Component +# from lightrag.core.memory import Memory + +# from lightrag.components.model_client import OpenAIClient + + +# class SimpleDialog(Component): +# def __init__(self): +# super().__init__() +# model_kwargs = {"model": "gpt-3.5-turbo"} +# task_desc_str = "You are a helpful assistant." +# self.generator = Generator( +# model_client=OpenAIClient(), +# model_kwargs=model_kwargs, +# preset_prompt_kwargs={"task_desc_str": task_desc_str}, +# ) +# self.chat_history = Memory() +# self.generator.print_prompt() + +# def chat(self) -> str: +# print("Welcome to SimpleQA. You can ask any question. Type 'exit' to end.") +# while True: +# user_input = input("You: ") +# # +# if user_input.lower() == "exit": +# print("Goodbye!") +# break +# chat_history_str = self.chat_history() +# response = self.generator( +# prompt_kwargs={ +# "chat_history_str": chat_history_str, +# "input": user_input, +# }, +# ) +# # save the user input and response to the memory +# self.chat_history.add_dialog_turn( +# user_query=user_input, assistant_response=response +# ) +# """ +# Most components mush have a __call__ method in order to be chained together with other component in the data pipeline. +# From the memory management, it is difficult to just chain them together. +# This is similar to the retrieval. This additional step is to manage the exteral db like +# data injection. Retrieving can be chained such as we use self.chat_history() to get the chat history. +# """ +# print(f"Assistant: {response}") + +# # a class to have a multiple turns and take user input + + +# if __name__ == "__main__": +# simple_qa = SimpleDialog() +# print(simple_qa) +# print(simple_qa.chat()) diff --git a/use_cases/simple_qa_trainable.py b/use_cases/simple_qa_trainable.py index 7e4523e71..e44f805c9 100644 --- a/use_cases/simple_qa_trainable.py +++ b/use_cases/simple_qa_trainable.py @@ -3,8 +3,6 @@ from lightrag.components.model_client import GroqAPIClient -import utils.setup_env - class SimpleQA(Component): def __init__(self): diff --git a/use_cases/simple_rag.py b/use_cases/simple_rag.py index c9bda1894..e45eba179 100644 --- a/use_cases/simple_rag.py +++ b/use_cases/simple_rag.py @@ -17,7 +17,6 @@ ToEmbeddings, DocumentSplitter, ) -from lightrag.utils import setup_env # noqa # TODO: RAG can potentially be a component itsefl and be provided to the users diff --git a/use_cases/simple_rag_bm_25.py b/use_cases/simple_rag_bm_25.py index ac791e0d7..ba692b1d5 100644 --- a/use_cases/simple_rag_bm_25.py +++ b/use_cases/simple_rag_bm_25.py @@ -13,8 +13,6 @@ from lightrag.components.retriever import BM25Retriever from lightrag.components.model_client import OpenAIClient -import utils.setup_env # noqa - # TODO: RAG can potentially be a component itsefl and be provided to the users class RAG(Component): diff --git a/use_cases/use_embedder.py b/use_cases/use_embedder.py index 1498ce6f4..04600e726 100644 --- a/use_cases/use_embedder.py +++ b/use_cases/use_embedder.py @@ -5,8 +5,6 @@ from lightrag.core.component import Component from lightrag.components.model_client import OpenAIClient -import utils.setup_env - class SimpleEmbedder(Component): """ @@ -50,6 +48,7 @@ async def main(): start_time = time.time() results = await asyncio.gather(*tasks) + print(results) end_time = time.time() print(f"Total time for 10 async calls: {end_time - start_time} seconds") diff --git a/use_cases/yaml_output.py b/use_cases/yaml_output.py index 65a3b1e6a..1fd2b9c64 100644 --- a/use_cases/yaml_output.py +++ b/use_cases/yaml_output.py @@ -1,14 +1,12 @@ from lightrag.core.component import Component from lightrag.core.generator import Generator -from lightrag.components.model_client import GroqAPIClient, OpenAIClient -from lightrag.components.output_parsers import YamlOutputParser, ListOutputParser +from lightrag.components.model_client import GroqAPIClient +from lightrag.components.output_parsers import YamlOutputParser from lightrag.core.base_data_class import DataClass, field from lightrag.core.types import GeneratorOutput -from lightrag.utils import setup_env - class JokeOutput(DataClass): setup: str = field(metadata={"desc": "question to set up a joke"}, default="")