diff --git a/.github/workflows/gradient.yml b/.github/workflows/gradient.yml new file mode 100644 index 000000000..f717ba2c9 --- /dev/null +++ b/.github/workflows/gradient.yml @@ -0,0 +1,56 @@ +# This workflow comes from https://github.com/ofek/hatch-mypyc +# https://github.com/ofek/hatch-mypyc/blob/5a198c0ba8660494d02716cfc9d79ce4adfb1442/.github/workflows/test.yml +name: Test / gradient + +on: + schedule: + - cron: "0 0 * * *" + pull_request: + paths: + - 'integrations/gradient/**' + - '.github/workflows/gradient.yml' + +defaults: + run: + working-directory: integrations/gradient + +concurrency: + group: gradient-${{ github.head_ref }} + cancel-in-progress: true + +env: + PYTHONUNBUFFERED: "1" + FORCE_COLOR: "1" + +jobs: + run: + name: Python ${{ matrix.python-version }} on ${{ startsWith(matrix.os, 'macos-') && 'macOS' || startsWith(matrix.os, 'windows-') && 'Windows' || 'Linux' }} + runs-on: ${{ matrix.os }} + strategy: + fail-fast: false + matrix: + os: [ubuntu-latest, windows-latest, macos-latest] + python-version: ['3.9', '3.10'] + + steps: + - name: Support longpaths + if: matrix.os == 'windows-latest' + working-directory: . + run: git config --system core.longpaths true + + - uses: actions/checkout@v4 + + - name: Set up Python ${{ matrix.python-version }} + uses: actions/setup-python@v4 + with: + python-version: ${{ matrix.python-version }} + + - name: Install Hatch + run: pip install --upgrade hatch + + - name: Lint + if: matrix.python-version == '3.9' && runner.os == 'Linux' + run: hatch run lint:all + + - name: Run tests + run: hatch run cov \ No newline at end of file diff --git a/integrations/gradient/LICENSE.txt b/integrations/gradient/LICENSE.txt new file mode 100644 index 000000000..cf4129e2b --- /dev/null +++ b/integrations/gradient/LICENSE.txt @@ -0,0 +1,9 @@ +MIT License + +Copyright (c) 2023-present Massimiliano Pippi + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/integrations/gradient/README.md b/integrations/gradient/README.md new file mode 100644 index 000000000..853f5b4c3 --- /dev/null +++ b/integrations/gradient/README.md @@ -0,0 +1,21 @@ +# gradient-haystack + +[![PyPI - Version](https://img.shields.io/pypi/v/gradient-haystack.svg)](https://pypi.org/project/gradient-haystack) +[![PyPI - Python Version](https://img.shields.io/pypi/pyversions/gradient-haystack.svg)](https://pypi.org/project/gradient-haystack) + +----- + +**Table of Contents** + +- [Installation](#installation) +- [License](#license) + +## Installation + +```console +pip install gradient-haystack +``` + +## License + +`gradient-haystack` is distributed under the terms of the [MIT](https://spdx.org/licenses/MIT.html) license. diff --git a/integrations/gradient/pyproject.toml b/integrations/gradient/pyproject.toml new file mode 100644 index 000000000..afdd8ecb8 --- /dev/null +++ b/integrations/gradient/pyproject.toml @@ -0,0 +1,169 @@ +[build-system] +requires = ["hatchling"] +build-backend = "hatchling.build" + +[project] +name = "gradient-haystack" +dynamic = ["version"] +description = '' +readme = "README.md" +requires-python = ">=3.7" +license = "MIT" +keywords = [] +authors = [ + { name = "Mateusz Haligowski", email = "mhaligowski@gmail.com" }, +] +classifiers = [ + "Development Status :: 4 - Beta", + "Programming Language :: Python", + "Programming Language :: Python :: 3.7", + "Programming Language :: Python :: 3.8", + "Programming Language :: Python :: 3.9", + "Programming Language :: Python :: 3.10", + "Programming Language :: Python :: 3.11", + "Programming Language :: Python :: Implementation :: CPython", + "Programming Language :: Python :: Implementation :: PyPy", +] +dependencies = [ + "haystack-ai", + "gradientai", +] + +[project.urls] +Documentation = "https://github.com/unknown/gradient-haystack#readme" +Issues = "https://github.com/unknown/gradient-haystack/issues" +Source = "https://github.com/unknown/gradient-haystack" + +[tool.hatch.version] +path = "src/gradient_haystack/__about__.py" + +[tool.hatch.envs.default] +dependencies = [ + "coverage[toml]>=6.5", + "pytest", +] +[tool.hatch.envs.default.scripts] +test = "pytest {args:tests}" +test-cov = "coverage run -m pytest {args:tests}" +cov-report = [ + "- coverage combine", + "coverage report", +] +cov = [ + "test-cov", + "cov-report", +] + +[[tool.hatch.envs.all.matrix]] +python = ["3.8", "3.9", "3.10", "3.11"] + +[tool.hatch.envs.lint] +detached = true +dependencies = [ + "black>=23.1.0", + "mypy>=1.0.0", + "ruff>=0.0.243", +] +[tool.hatch.envs.lint.scripts] +typing = "mypy --install-types --non-interactive {args:src/gradient_haystack tests}" +style = [ + "ruff {args:.}", + "black --check --diff {args:.}", +] +fmt = [ + "black {args:.}", + "ruff --fix {args:.}", + "style", +] +all = [ + "style", + "typing", +] + +[tool.black] +target-version = ["py38"] +line-length = 120 +skip-string-normalization = true + +[tool.ruff] +target-version = "py38" +line-length = 120 +select = [ + "A", + "ARG", + "B", + "C", + "DTZ", + "E", + "EM", + "F", + "FBT", + "I", + "ICN", + "ISC", + "N", + "PLC", + "PLE", + "PLR", + "PLW", + "Q", + "RUF", + "S", + "T", + "TID", + "UP", + "W", + "YTT", +] +ignore = [ + # Allow non-abstract empty methods in abstract base classes + "B027", + # Allow boolean positional values in function calls, like `dict.get(... True)` + "FBT003", + # Ignore checks for possible passwords + "S105", "S106", "S107", + # Ignore complexity + "C901", "PLR0911", "PLR0912", "PLR0913", "PLR0915", +] +unfixable = [ + # Don't touch unused imports + "F401", +] + +[tool.ruff.isort] +known-first-party = ["gradient_haystack"] + +[tool.ruff.flake8-tidy-imports] +ban-relative-imports = "all" + +[tool.ruff.per-file-ignores] +# Tests can use magic values, assertions, and relative imports +"tests/**/*" = ["PLR2004", "S101", "TID252"] + +[tool.coverage.run] +source_pkgs = ["gradient_haystack", "tests"] +branch = true +parallel = true +omit = [ + "src/gradient_haystack/__about__.py", +] + +[tool.coverage.paths] +gradient_haystack = ["src/gradient_haystack", "*/gradient-haystack/src/gradient_haystack"] +tests = ["tests", "*/gradient-haystack/tests"] + +[tool.coverage.report] +exclude_lines = [ + "no cov", + "if __name__ == .__main__.:", + "if TYPE_CHECKING:", +] + +[[tool.mypy.overrides]] +module = [ + "gradientai.*", + "haystack.*", + "pytest.*", + "numpy.*", +] +ignore_missing_imports = true \ No newline at end of file diff --git a/integrations/gradient/src/gradient_haystack/__about__.py b/integrations/gradient/src/gradient_haystack/__about__.py new file mode 100644 index 000000000..132530b41 --- /dev/null +++ b/integrations/gradient/src/gradient_haystack/__about__.py @@ -0,0 +1,4 @@ +# SPDX-FileCopyrightText: 2023-present Massimiliano Pippi +# +# SPDX-License-Identifier: MIT +__version__ = "0.0.1" diff --git a/integrations/gradient/src/gradient_haystack/__init__.py b/integrations/gradient/src/gradient_haystack/__init__.py new file mode 100644 index 000000000..bd78f6a28 --- /dev/null +++ b/integrations/gradient/src/gradient_haystack/__init__.py @@ -0,0 +1,3 @@ +# SPDX-FileCopyrightText: 2023-present Massimiliano Pippi +# +# SPDX-License-Identifier: MIT diff --git a/integrations/gradient/src/gradient_haystack/embedders/__init__.py b/integrations/gradient/src/gradient_haystack/embedders/__init__.py new file mode 100644 index 000000000..bd78f6a28 --- /dev/null +++ b/integrations/gradient/src/gradient_haystack/embedders/__init__.py @@ -0,0 +1,3 @@ +# SPDX-FileCopyrightText: 2023-present Massimiliano Pippi +# +# SPDX-License-Identifier: MIT diff --git a/integrations/gradient/src/gradient_haystack/embedders/gradient_document_embedder.py b/integrations/gradient/src/gradient_haystack/embedders/gradient_document_embedder.py new file mode 100644 index 000000000..81a93ad2b --- /dev/null +++ b/integrations/gradient/src/gradient_haystack/embedders/gradient_document_embedder.py @@ -0,0 +1,112 @@ +import logging +from typing import Any, Dict, List, Optional + +from haystack import Document, component, default_to_dict +from haystack.lazy_imports import LazyImport + +with LazyImport(message="Run 'pip install gradientai'") as gradientai_import: + from gradientai import Gradient + +logger = logging.getLogger(__name__) + + +@component +class GradientDocumentEmbedder: + """ + A component for computing Document embeddings using Gradient AI API.. + The embedding of each Document is stored in the `embedding` field of the Document. + + ```python + embedder = GradientDocumentEmbedder( + access_token=gradient_access_token, + workspace_id=gradient_workspace_id, + model_name="bge_large")) + p = Pipeline() + p.add_component(embedder, name="document_embedder") + p.add_component(instance=GradientDocumentEmbedder( + p.add_component(instance=DocumentWriter(document_store=InMemoryDocumentStore()), name="document_writer") + p.connect("document_embedder", "document_writer") + p.run({"document_embedder": {"documents": documents}}) + ``` + """ + + def __init__( + self, + *, + model_name: str = "bge-large", + batch_size: int = 100, + access_token: Optional[str] = None, + workspace_id: Optional[str] = None, + host: Optional[str] = None, + ) -> None: + """ + Create a GradientDocumentEmbedder component. + + :param model_name: The name of the model to use. + :param access_token: The Gradient access token. If not provided it's read from the environment + variable GRADIENT_ACCESS_TOKEN. + :param workspace_id: The Gradient workspace ID. If not provided it's read from the environment + variable GRADIENT_WORKSPACE_ID. + :param host: The Gradient host. By default it uses https://api.gradient.ai/. + """ + gradientai_import.check() + self._batch_size = batch_size + self._host = host + self._model_name = model_name + + self._gradient = Gradient(access_token=access_token, host=host, workspace_id=workspace_id) + + def _get_telemetry_data(self) -> Dict[str, Any]: + """ + Data that is sent to Posthog for usage analytics. + """ + return {"model": self._model_name} + + def to_dict(self) -> dict: + """ + Serialize the component to a Python dictionary. + """ + return default_to_dict(self, workspace_id=self._gradient.workspace_id, model_name=self._model_name) + + def warm_up(self) -> None: + """ + Load the embedding model. + """ + if not hasattr(self, "_embedding_model"): + self._embedding_model = self._gradient.get_embeddings_model(slug=self._model_name) + + def _generate_embeddings(self, documents: List[Document], batch_size: int) -> List[List[float]]: + """ + Batches the documents and generates the embeddings. + """ + batches = [documents[i : i + batch_size] for i in range(0, len(documents), batch_size)] + + embeddings = [] + for batch in batches: + response = self._embedding_model.generate_embeddings(inputs=[{"input": doc.content} for doc in batch]) + embeddings.extend([e.embedding for e in response.embeddings]) + + return embeddings + + @component.output_types(documents=List[Document]) + def run(self, documents: List[Document]): + """ + Embed a list of Documents. + The embedding of each Document is stored in the `embedding` field of the Document. + + :param documents: A list of Documents to embed. + """ + if not isinstance(documents, list) or documents and any(not isinstance(doc, Document) for doc in documents): + msg = "GradientDocumentEmbedder expects a list of Documents as input.\ + In case you want to embed a list of strings, please use the GradientTextEmbedder." + raise TypeError(msg) + + if not hasattr(self, "_embedding_model"): + msg = "The embedding model has not been loaded. Please call warm_up() before running." + raise RuntimeError(msg) + + embeddings = self._generate_embeddings(documents=documents, batch_size=self._batch_size) + for doc, embedding in zip(documents, embeddings): + doc.embedding = embedding + + return {"documents": documents} diff --git a/integrations/gradient/src/gradient_haystack/embedders/gradient_text_embedder.py b/integrations/gradient/src/gradient_haystack/embedders/gradient_text_embedder.py new file mode 100644 index 000000000..53996b785 --- /dev/null +++ b/integrations/gradient/src/gradient_haystack/embedders/gradient_text_embedder.py @@ -0,0 +1,89 @@ +from typing import Any, Dict, List, Optional + +from haystack import component, default_to_dict +from haystack.lazy_imports import LazyImport + +with LazyImport(message="Run 'pip install gradientai'") as gradientai_import: + from gradientai import Gradient + + +@component +class GradientTextEmbedder: + """ + A component for embedding strings using models hosted on Gradient AI (https://gradient.ai). + + ```python + embedder = GradientTextEmbedder( + access_token=gradient_access_token, + workspace_id=gradient_workspace_id, + model_name="bge_large") + p = Pipeline() + p.add_component(instance=embedder, name="text_embedder") + p.add_component(instance=InMemoryEmbeddingRetriever(document_store=InMemoryDocumentStore()), name="retriever") + p.connect("text_embedder", "retriever") + p.run("embed me!!!") + ``` + """ + + def __init__( + self, + *, + model_name: str = "bge-large", + access_token: Optional[str] = None, + workspace_id: Optional[str] = None, + host: Optional[str] = None, + ) -> None: + """ + Create a GradientTextEmbedder component. + + :param model_name: The name of the model to use. + :param access_token: The Gradient access token. If not provided it's read from the environment + variable GRADIENT_ACCESS_TOKEN. + :param workspace_id: The Gradient workspace ID. If not provided it's read from the environment + variable GRADIENT_WORKSPACE_ID. + :param host: The Gradient host. By default it uses https://api.gradient.ai/. + """ + gradientai_import.check() + self._host = host + self._model_name = model_name + + self._gradient = Gradient(access_token=access_token, host=host, workspace_id=workspace_id) + + def _get_telemetry_data(self) -> Dict[str, Any]: + """ + Data that is sent to Posthog for usage analytics. + """ + return {"model": self._model_name} + + def to_dict(self) -> dict: + """ + Serialize the component to a Python dictionary. + """ + return default_to_dict(self, workspace_id=self._gradient.workspace_id, model_name=self._model_name) + + def warm_up(self) -> None: + """ + Load the embedding model. + """ + if not hasattr(self, "_embedding_model"): + self._embedding_model = self._gradient.get_embeddings_model(slug=self._model_name) + + @component.output_types(embedding=List[float]) + def run(self, text: str): + """Generates an embedding for a single text.""" + if not isinstance(text, str): + msg = "GradientTextEmbedder expects a string as an input.\ + In case you want to embed a list of Documents, please use the GradientDocumentEmbedder." + raise TypeError(msg) + + if not hasattr(self, "_embedding_model"): + msg = "The embedding model has not been loaded. Please call warm_up() before running." + raise RuntimeError(msg) + + result = self._embedding_model.generate_embeddings(inputs=[{"input": text}]) + + if (not result) or (result.embeddings is None) or (len(result.embeddings) == 0): + msg = "The embedding model did not return any embeddings." + raise RuntimeError(msg) + + return {"embedding": result.embeddings[0].embedding} diff --git a/integrations/gradient/src/gradient_haystack/generator/__init__.py b/integrations/gradient/src/gradient_haystack/generator/__init__.py new file mode 100644 index 000000000..bd78f6a28 --- /dev/null +++ b/integrations/gradient/src/gradient_haystack/generator/__init__.py @@ -0,0 +1,3 @@ +# SPDX-FileCopyrightText: 2023-present Massimiliano Pippi +# +# SPDX-License-Identifier: MIT diff --git a/integrations/gradient/src/gradient_haystack/generator/base.py b/integrations/gradient/src/gradient_haystack/generator/base.py new file mode 100644 index 000000000..536525377 --- /dev/null +++ b/integrations/gradient/src/gradient_haystack/generator/base.py @@ -0,0 +1,129 @@ +import logging +from typing import Any, Dict, List, Optional + +from haystack import component, default_to_dict +from haystack.lazy_imports import LazyImport + +with LazyImport(message="Run 'pip install gradientai'") as gradientai_import: + from gradientai import Gradient + +logger = logging.getLogger(__name__) + + +@component +class GradientGenerator: + """ + LLM Generator interfacing [Gradient AI](https://gradient.ai/). + + Queries the LLM using Gradient AI's SDK ('gradientai' package). + See [Gradient AI API](https://docs.gradient.ai/docs/sdk-quickstart) for more details. + + ```python + llm = GradientGenerator( + access_token=gradient_access_token, + workspace_id=gradient_workspace_id, + base_model_slug="llama2-7b-chat") + llm.warm_up() + print(llm.run(prompt="What is the meaning of life?")) + # Output: {'replies': ['42']} + ``` + """ + + def __init__( + self, + *, + access_token: Optional[str] = None, + base_model_slug: Optional[str] = None, + host: Optional[str] = None, + max_generated_token_count: Optional[int] = None, + model_adapter_id: Optional[str] = None, + temperature: Optional[float] = None, + top_k: Optional[int] = None, + top_p: Optional[float] = None, + workspace_id: Optional[str] = None, + ) -> None: + """ + Create a GradientGenerator component. + + :param access_token: The Gradient access token. If not provided it's read from the environment + variable GRADIENT_ACCESS_TOKEN. + :param base_model_slug: The base model slug to use. + :param host: The Gradient host. By default it uses https://api.gradient.ai/. + :param max_generated_token_count: The maximum number of tokens to generate. + :param model_adapter_id: The model adapter ID to use. + :param temperature: The temperature to use. + :param top_k: The top k to use. + :param top_p: The top p to use. + :param workspace_id: The Gradient workspace ID. If not provided it's read from the environment + variable GRADIENT_WORKSPACE_ID. + """ + gradientai_import.check() + + self._access_token = access_token + self._base_model_slug = base_model_slug + self._host = host + self._max_generated_token_count = max_generated_token_count + self._model_adapter_id = model_adapter_id + self._temperature = temperature + self._top_k = top_k + self._top_p = top_p + self._workspace_id = workspace_id + + has_base_model_slug = base_model_slug is not None and base_model_slug != "" + has_model_adapter_id = model_adapter_id is not None and model_adapter_id != "" + + if not has_base_model_slug and not has_model_adapter_id: + msg = "Either base_model_slug or model_adapter_id must be provided." + raise ValueError(msg) + if has_base_model_slug and has_model_adapter_id: + msg = "Only one of base_model_slug or model_adapter_id must be provided." + raise ValueError(msg) + + if has_base_model_slug: + self._base_model_slug = base_model_slug + if has_model_adapter_id: + self._model_adapter_id = model_adapter_id + + self._gradient = Gradient(access_token=access_token, host=host, workspace_id=workspace_id) + + def to_dict(self) -> Dict[str, Any]: + """ + Serialize this component to a dictionary. + """ + return default_to_dict( + self, + base_model_slug=self._base_model_slug, + host=self._host, + max_generated_token_count=self._max_generated_token_count, + model_adapter_id=self._model_adapter_id, + temperature=self._temperature, + top_k=self._top_k, + top_p=self._top_p, + workspace_id=self._workspace_id, + ) + + def warm_up(self): + """ + Initializes the LLM model instance if it doesn't exist. + """ + if not hasattr(self, "_model"): + if isinstance(self._base_model_slug, str): + self._model = self._gradient.get_base_model(base_model_slug=self._base_model_slug) + if isinstance(self._model_adapter_id, str): + self._model = self._gradient.get_model_adapter(model_adapter_id=self._model_adapter_id) + + @component.output_types(replies=List[str]) + def run(self, prompt: str): + """ + Queries the LLM with the prompt to produce replies. + + :param prompt: The prompt to be sent to the generative model. + """ + resp = self._model.complete( + query=prompt, + max_generated_token_count=self._max_generated_token_count, + temperature=self._temperature, + top_k=self._top_k, + top_p=self._top_p, + ) + return {"replies": [resp.generated_output]} diff --git a/integrations/gradient/tests/__init__.py b/integrations/gradient/tests/__init__.py new file mode 100644 index 000000000..bd78f6a28 --- /dev/null +++ b/integrations/gradient/tests/__init__.py @@ -0,0 +1,3 @@ +# SPDX-FileCopyrightText: 2023-present Massimiliano Pippi +# +# SPDX-License-Identifier: MIT diff --git a/integrations/gradient/tests/test_gradient_document_embedder.py b/integrations/gradient/tests/test_gradient_document_embedder.py new file mode 100644 index 000000000..bac02df5e --- /dev/null +++ b/integrations/gradient/tests/test_gradient_document_embedder.py @@ -0,0 +1,158 @@ +from unittest.mock import MagicMock, NonCallableMagicMock + +import numpy as np +import pytest +from gradientai.openapi.client.models.generate_embedding_success import GenerateEmbeddingSuccess +from haystack import Document + +from gradient_haystack.embedders.gradient_document_embedder import GradientDocumentEmbedder + +access_token = "access_token" +workspace_id = "workspace_id" +model = "bge-large" + + +class TestGradientDocumentEmbedder: + @pytest.mark.unit + def test_init_from_env(self, monkeypatch): + monkeypatch.setenv("GRADIENT_ACCESS_TOKEN", access_token) + monkeypatch.setenv("GRADIENT_WORKSPACE_ID", workspace_id) + + embedder = GradientDocumentEmbedder() + assert embedder is not None + assert embedder._gradient.workspace_id == workspace_id + assert embedder._gradient._api_client.configuration.access_token == access_token + + @pytest.mark.unit + def test_init_without_access_token(self, monkeypatch): + monkeypatch.delenv("GRADIENT_ACCESS_TOKEN", raising=False) + + with pytest.raises(ValueError): + GradientDocumentEmbedder(workspace_id=workspace_id) + + @pytest.mark.unit + def test_init_without_workspace(self, monkeypatch): + monkeypatch.delenv("GRADIENT_WORKSPACE_ID", raising=False) + + with pytest.raises(ValueError): + GradientDocumentEmbedder(access_token=access_token) + + @pytest.mark.unit + def test_init_from_params(self): + embedder = GradientDocumentEmbedder(access_token=access_token, workspace_id=workspace_id) + assert embedder is not None + assert embedder._gradient.workspace_id == workspace_id + assert embedder._gradient._api_client.configuration.access_token == access_token + + @pytest.mark.unit + def test_init_from_params_precedence(self, monkeypatch): + monkeypatch.setenv("GRADIENT_ACCESS_TOKEN", "env_access_token") + monkeypatch.setenv("GRADIENT_WORKSPACE_ID", "env_workspace_id") + + embedder = GradientDocumentEmbedder(access_token=access_token, workspace_id=workspace_id) + assert embedder is not None + assert embedder._gradient.workspace_id == workspace_id + assert embedder._gradient._api_client.configuration.access_token == access_token + + @pytest.mark.unit + def test_to_dict(self): + component = GradientDocumentEmbedder(access_token=access_token, workspace_id=workspace_id) + data = component.to_dict() + assert data == { + "type": "gradient_haystack.embedders.gradient_document_embedder.GradientDocumentEmbedder", + "init_parameters": {"workspace_id": workspace_id, "model_name": "bge-large"}, + } + + @pytest.mark.unit + def test_warmup(self): + embedder = GradientDocumentEmbedder(access_token=access_token, workspace_id=workspace_id) + embedder._gradient.get_embeddings_model = MagicMock() + embedder.warm_up() + embedder._gradient.get_embeddings_model.assert_called_once_with(slug="bge-large") + + @pytest.mark.unit + def test_warmup_doesnt_reload(self): + embedder = GradientDocumentEmbedder(access_token=access_token, workspace_id=workspace_id) + embedder._gradient.get_embeddings_model = MagicMock(default_return_value="fake model") + embedder.warm_up() + embedder.warm_up() + embedder._gradient.get_embeddings_model.assert_called_once_with(slug="bge-large") + + @pytest.mark.unit + def test_run_fail_if_not_warmed_up(self): + embedder = GradientDocumentEmbedder(access_token=access_token, workspace_id=workspace_id) + + with pytest.raises(RuntimeError, match="warm_up()"): + embedder.run(documents=[Document(content=f"document number {i}") for i in range(5)]) + + @pytest.mark.unit + def test_run(self): + embedder = GradientDocumentEmbedder(access_token=access_token, workspace_id=workspace_id) + embedder._embedding_model = NonCallableMagicMock() + embedder._embedding_model.generate_embeddings.return_value = GenerateEmbeddingSuccess( + embeddings=[{"embedding": np.random.rand(1024).tolist(), "index": i} for i in range(5)] + ) + + documents = [Document(content=f"document number {i}") for i in range(5)] + + result = embedder.run(documents=documents) + + assert embedder._embedding_model.generate_embeddings.call_count == 1 + assert isinstance(result["documents"], list) + assert len(result["documents"]) == len(documents) + for doc in result["documents"]: + assert isinstance(doc, Document) + assert isinstance(doc.embedding, list) + assert isinstance(doc.embedding[0], float) + + @pytest.mark.unit + def test_run_batch(self): + embedder = GradientDocumentEmbedder(access_token=access_token, workspace_id=workspace_id) + embedder._embedding_model = NonCallableMagicMock() + + embedder._embedding_model.generate_embeddings.return_value = GenerateEmbeddingSuccess( + embeddings=[{"embedding": np.random.rand(1024).tolist(), "index": i} for i in range(110)] + ) + + documents = [Document(content=f"document number {i}") for i in range(110)] + + result = embedder.run(documents=documents) + + assert embedder._embedding_model.generate_embeddings.call_count == 2 + assert isinstance(result["documents"], list) + assert len(result["documents"]) == len(documents) + for doc in result["documents"]: + assert isinstance(doc, Document) + assert isinstance(doc.embedding, list) + assert isinstance(doc.embedding[0], float) + + @pytest.mark.unit + def test_run_custom_batch(self): + embedder = GradientDocumentEmbedder(access_token=access_token, workspace_id=workspace_id, batch_size=20) + embedder._embedding_model = NonCallableMagicMock() + + document_count = 101 + embedder._embedding_model.generate_embeddings.return_value = GenerateEmbeddingSuccess( + embeddings=[{"embedding": np.random.rand(1024).tolist(), "index": i} for i in range(document_count)] + ) + + documents = [Document(content=f"document number {i}") for i in range(document_count)] + + result = embedder.run(documents=documents) + + assert embedder._embedding_model.generate_embeddings.call_count == 6 + assert isinstance(result["documents"], list) + assert len(result["documents"]) == len(documents) + for doc in result["documents"]: + assert isinstance(doc, Document) + assert isinstance(doc.embedding, list) + assert isinstance(doc.embedding[0], float) + + @pytest.mark.unit + def test_run_empty(self): + embedder = GradientDocumentEmbedder(access_token=access_token, workspace_id=workspace_id) + embedder._embedding_model = NonCallableMagicMock() + + result = embedder.run(documents=[]) + + assert result["documents"] == [] diff --git a/integrations/gradient/tests/test_gradient_rag_pipelines.py b/integrations/gradient/tests/test_gradient_rag_pipelines.py new file mode 100644 index 000000000..5835944a8 --- /dev/null +++ b/integrations/gradient/tests/test_gradient_rag_pipelines.py @@ -0,0 +1,93 @@ +import json +import os + +import pytest +from haystack import Document, Pipeline +from haystack.components.builders.answer_builder import AnswerBuilder +from haystack.components.builders.prompt_builder import PromptBuilder +from haystack.components.retrievers import InMemoryEmbeddingRetriever +from haystack.components.writers import DocumentWriter +from haystack.document_stores import InMemoryDocumentStore + +from gradient_haystack.embedders.gradient_document_embedder import GradientDocumentEmbedder +from gradient_haystack.embedders.gradient_text_embedder import GradientTextEmbedder +from gradient_haystack.generator.base import GradientGenerator + + +@pytest.mark.skipif( + not os.environ.get("GRADIENT_ACCESS_TOKEN", None) or not os.environ.get("GRADIENT_WORKSPACE_ID", None), + reason="Export env variables called GRADIENT_ACCESS_TOKEN and GRADIENT_WORKSPACE_ID \ + containing the Gradient configuration settings to run this test.", +) +def test_gradient_embedding_retrieval_rag_pipeline(tmp_path): + # Create the RAG pipeline + prompt_template = """ + Given these documents, answer the question.\nDocuments: + {% for doc in documents %} + {{ doc.content }} + {% endfor %} + \nQuestion: {{question}} + \nAnswer: + """ + + gradient_access_token = os.environ.get("GRADIENT_ACCESS_TOKEN") + rag_pipeline = Pipeline() + embedder = GradientTextEmbedder(access_token=gradient_access_token) + rag_pipeline.add_component(instance=embedder, name="text_embedder") + rag_pipeline.add_component( + instance=InMemoryEmbeddingRetriever(document_store=InMemoryDocumentStore()), name="retriever" + ) + rag_pipeline.add_component(instance=PromptBuilder(template=prompt_template), name="prompt_builder") + rag_pipeline.add_component( + instance=GradientGenerator(access_token=gradient_access_token, base_model_slug="llama2-7b-chat"), name="llm" + ) + rag_pipeline.add_component(instance=AnswerBuilder(), name="answer_builder") + rag_pipeline.connect("text_embedder", "retriever") + rag_pipeline.connect("retriever", "prompt_builder.documents") + rag_pipeline.connect("prompt_builder", "llm") + rag_pipeline.connect("llm.replies", "answer_builder.replies") + rag_pipeline.connect("retriever", "answer_builder.documents") + + # Draw the pipeline + rag_pipeline.draw(tmp_path / "test_gradient_embedding_rag_pipeline.png") + + # Serialize the pipeline to JSON + with open(tmp_path / "test_bm25_rag_pipeline.json", "w") as f: + json.dump(rag_pipeline.to_dict(), f) + + # Load the pipeline back + with open(tmp_path / "test_bm25_rag_pipeline.json") as f: + rag_pipeline = Pipeline.from_dict(json.load(f)) + + # Populate the document store + documents = [ + Document(content="My name is Jean and I live in Paris."), + Document(content="My name is Mark and I live in Berlin."), + Document(content="My name is Giorgio and I live in Rome."), + ] + document_store = rag_pipeline.get_component("retriever").document_store + indexing_pipeline = Pipeline() + indexing_pipeline.add_component(instance=GradientDocumentEmbedder(), name="document_embedder") + indexing_pipeline.add_component(instance=DocumentWriter(document_store=document_store), name="document_writer") + indexing_pipeline.connect("document_embedder", "document_writer") + indexing_pipeline.run({"document_embedder": {"documents": documents}}) + + # Query and assert + questions = ["Who lives in Paris?", "Who lives in Berlin?", "Who lives in Rome?"] + answers_spywords = ["Jean", "Mark", "Giorgio"] + + for question, spyword in zip(questions, answers_spywords): + result = rag_pipeline.run( + { + "text_embedder": {"text": question}, + "prompt_builder": {"question": question}, + "answer_builder": {"query": question}, + } + ) + + assert len(result["answer_builder"]["answers"]) == 1 + generated_answer = result["answer_builder"]["answers"][0] + assert spyword in generated_answer.data + assert generated_answer.query == question + assert hasattr(generated_answer, "documents") + assert hasattr(generated_answer, "metadata") diff --git a/integrations/gradient/tests/test_gradient_text_embedder.py b/integrations/gradient/tests/test_gradient_text_embedder.py new file mode 100644 index 000000000..9623db5d4 --- /dev/null +++ b/integrations/gradient/tests/test_gradient_text_embedder.py @@ -0,0 +1,127 @@ +from unittest.mock import MagicMock, NonCallableMagicMock + +import numpy as np +import pytest +from gradientai.openapi.client.models.generate_embedding_success import GenerateEmbeddingSuccess + +from gradient_haystack.embedders.gradient_text_embedder import GradientTextEmbedder + +access_token = "access_token" +workspace_id = "workspace_id" +model = "bge-large" + + +class TestGradientTextEmbedder: + @pytest.mark.unit + def test_init_from_env(self, monkeypatch): + monkeypatch.setenv("GRADIENT_ACCESS_TOKEN", access_token) + monkeypatch.setenv("GRADIENT_WORKSPACE_ID", workspace_id) + + embedder = GradientTextEmbedder() + assert embedder is not None + assert embedder._gradient.workspace_id == workspace_id + assert embedder._gradient._api_client.configuration.access_token == access_token + + @pytest.mark.unit + def test_init_without_access_token(self, monkeypatch): + monkeypatch.delenv("GRADIENT_ACCESS_TOKEN", raising=False) + + with pytest.raises(ValueError): + GradientTextEmbedder(workspace_id=workspace_id) + + @pytest.mark.unit + def test_init_without_workspace(self, monkeypatch): + monkeypatch.delenv("GRADIENT_WORKSPACE_ID", raising=False) + + with pytest.raises(ValueError): + GradientTextEmbedder(access_token=access_token) + + @pytest.mark.unit + def test_init_from_params(self): + embedder = GradientTextEmbedder(access_token=access_token, workspace_id=workspace_id) + assert embedder is not None + assert embedder._gradient.workspace_id == workspace_id + assert embedder._gradient._api_client.configuration.access_token == access_token + + @pytest.mark.unit + def test_init_from_params_precedence(self, monkeypatch): + monkeypatch.setenv("GRADIENT_ACCESS_TOKEN", "env_access_token") + monkeypatch.setenv("GRADIENT_WORKSPACE_ID", "env_workspace_id") + + embedder = GradientTextEmbedder(access_token=access_token, workspace_id=workspace_id) + assert embedder is not None + assert embedder._gradient.workspace_id == workspace_id + assert embedder._gradient._api_client.configuration.access_token == access_token + + @pytest.mark.unit + def test_to_dict(self): + component = GradientTextEmbedder(access_token=access_token, workspace_id=workspace_id) + data = component.to_dict() + assert data == { + "type": "gradient_haystack.embedders.gradient_text_embedder.GradientTextEmbedder", + "init_parameters": {"workspace_id": workspace_id, "model_name": "bge-large"}, + } + + @pytest.mark.unit + def test_warmup(self): + embedder = GradientTextEmbedder(access_token=access_token, workspace_id=workspace_id) + embedder._gradient.get_embeddings_model = MagicMock() + embedder.warm_up() + embedder._gradient.get_embeddings_model.assert_called_once_with(slug="bge-large") + + @pytest.mark.unit + def test_warmup_doesnt_reload(self): + embedder = GradientTextEmbedder(access_token=access_token, workspace_id=workspace_id) + embedder._gradient.get_embeddings_model = MagicMock(default_return_value="fake model") + embedder.warm_up() + embedder.warm_up() + embedder._gradient.get_embeddings_model.assert_called_once_with(slug="bge-large") + + @pytest.mark.unit + def test_run_fail_if_not_warmed_up(self): + embedder = GradientTextEmbedder(access_token=access_token, workspace_id=workspace_id) + + with pytest.raises(RuntimeError, match="warm_up()"): + embedder.run(text="The food was delicious") + + @pytest.mark.unit + def test_run_fail_when_no_embeddings_returned(self): + embedder = GradientTextEmbedder(access_token=access_token, workspace_id=workspace_id) + embedder._embedding_model = NonCallableMagicMock() + embedder._embedding_model.generate_embeddings.return_value = GenerateEmbeddingSuccess(embeddings=[]) + + with pytest.raises(RuntimeError): + _result = embedder.run(text="The food was delicious") + embedder._embedding_model.generate_embeddings.assert_called_once_with( + inputs=[{"input": "The food was delicious"}] + ) + + @pytest.mark.unit + def test_run_empty_string(self): + embedder = GradientTextEmbedder(access_token=access_token, workspace_id=workspace_id) + embedder._embedding_model = NonCallableMagicMock() + embedder._embedding_model.generate_embeddings.return_value = GenerateEmbeddingSuccess( + embeddings=[{"embedding": np.random.rand(1024).tolist(), "index": 0}] + ) + + result = embedder.run(text="") + embedder._embedding_model.generate_embeddings.assert_called_once_with(inputs=[{"input": ""}]) + + assert len(result["embedding"]) == 1024 # 1024 is the bge-large embedding size + assert all(isinstance(x, float) for x in result["embedding"]) + + @pytest.mark.unit + def test_run(self): + embedder = GradientTextEmbedder(access_token=access_token, workspace_id=workspace_id) + embedder._embedding_model = NonCallableMagicMock() + embedder._embedding_model.generate_embeddings.return_value = GenerateEmbeddingSuccess( + embeddings=[{"embedding": np.random.rand(1024).tolist(), "index": 0}] + ) + + result = embedder.run(text="The food was delicious") + embedder._embedding_model.generate_embeddings.assert_called_once_with( + inputs=[{"input": "The food was delicious"}] + ) + + assert len(result["embedding"]) == 1024 # 1024 is the bge-large embedding size + assert all(isinstance(x, float) for x in result["embedding"])