Skip to content

Commit

Permalink
more ignore deps
Browse files Browse the repository at this point in the history
  • Loading branch information
logan-markewich committed Feb 15, 2024
1 parent 4c75a6b commit d9e673d
Show file tree
Hide file tree
Showing 7 changed files with 73 additions and 11 deletions.
8 changes: 5 additions & 3 deletions llama-index-core/llama_index/core/agent/runner/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -253,16 +253,18 @@ def from_llm(
)

try:
from llama_index.llms.openai import OpenAI
from llama_index.llms.openai.utils import is_function_calling_model
from llama_index.llms.openai import OpenAI # pants: no-infer-dep
from llama_index.llms.openai.utils import (
is_function_calling_model,
) # pants: no-infer-dep
except ImportError:
raise ImportError(
"`llama-index-llms-openai` package not found. Please "
"install by running `pip install llama-index-llms-openai`."
)

if isinstance(llm, OpenAI) and is_function_calling_model(llm.model):
from llama_index.agent.openai import OpenAIAgent
from llama_index.agent.openai import OpenAIAgent # pants: no-infer-dep

return OpenAIAgent.from_tools(
tools=tools,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -99,7 +99,7 @@ def default_rag_cli() -> RagCLI:

from llama_index.vector_stores.chroma import (
ChromaVectorStore,
)
) # pants: no-infer-dep
except ImportError:
ChromaVectorStore = None

Expand Down
2 changes: 1 addition & 1 deletion llama-index-core/llama_index/core/ingestion/api_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,7 @@

def default_transformations() -> List[TransformComponent]:
"""Default transformations."""
from llama_index.embeddings.openai import OpenAIEmbedding
from llama_index.embeddings.openai import OpenAIEmbedding # pants: no-infer-dep

return [
SentenceSplitter(),
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -255,7 +255,7 @@ def build_configured_transformation(

# Embeddings
try:
from llama_index.embeddings.openai import OpenAIEmbedding
from llama_index.embeddings.openai import OpenAIEmbedding # pants: no-infer-dep

enum_members.append(
(
Expand Down
4 changes: 3 additions & 1 deletion llama-index-core/llama_index/core/program/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -70,7 +70,9 @@ def get_program_for_llm(
**kwargs,
)
elif pydantic_program_mode == PydanticProgramMode.OPENAI:
from llama_index.core.program.openai_program import OpenAIPydanticProgram
from llama_index.program.openai import (
OpenAIPydanticProgram,
) # pants: no-infer-dep

return OpenAIPydanticProgram.from_defaults(
output_cls=output_cls,
Expand Down
10 changes: 6 additions & 4 deletions llama-index-core/llama_index/core/prompts/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -451,7 +451,7 @@ def partial_format(self, **kwargs: Any) -> "BasePromptTemplate":

def format(self, llm: Optional[BaseLLM] = None, **kwargs: Any) -> str:
"""Format the prompt into a string."""
from llama_index.llms.langchain import LangChainLLM
from llama_index.llms.langchain import LangChainLLM # pants: no-infer-dep

if llm is not None:
# if llamaindex LLM is provided, and we require a langchain LLM,
Expand All @@ -474,8 +474,10 @@ def format_messages(
self, llm: Optional[BaseLLM] = None, **kwargs: Any
) -> List[ChatMessage]:
"""Format the prompt into a list of chat messages."""
from llama_index.llms.langchain import LangChainLLM
from llama_index.llms.langchain.utils import from_lc_messages
from llama_index.llms.langchain import LangChainLLM # pants: no-infer-dep
from llama_index.llms.langchain.utils import (
from_lc_messages,
) # pants: no-infer-dep

if llm is not None:
# if llamaindex LLM is provided, and we require a langchain LLM,
Expand All @@ -497,7 +499,7 @@ def format_messages(
return from_lc_messages(lc_messages)

def get_template(self, llm: Optional[BaseLLM] = None) -> str:
from llama_index.llms.langchain import LangChainLLM
from llama_index.llms.langchain import LangChainLLM # pants: no-infer-dep

if llm is not None:
# if llamaindex LLM is provided, and we require a langchain LLM,
Expand Down
56 changes: 56 additions & 0 deletions llama-index-core/tests/embeddings/todo_hf_test_utils.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,56 @@
from typing import Any, Dict

# pants: no-infer-dep
from llama_index.core.embeddings.mock_embed_model import MockEmbedding
from llama_index.core.embeddings.utils import resolve_embed_model
from llama_index.embeddings.huggingface import (
HuggingFaceEmbedding,
) # pants: no-infer-dep
from llama_index.embeddings.openai import OpenAIEmbedding # pants: no-infer-dep
from pytest import MonkeyPatch


def mock_hf_embeddings(self: Any, *args: Any, **kwargs: Dict[str, Any]) -> Any:
"""Mock HuggingFaceEmbeddings."""
super(HuggingFaceEmbedding, self).__init__(
model_name="fake",
tokenizer_name="fake",
model="fake",
tokenizer="fake",
)
return


def mock_openai_embeddings(self: Any, *args: Any, **kwargs: Dict[str, Any]) -> Any:
"""Mock OpenAIEmbedding."""
super(OpenAIEmbedding, self).__init__(
api_key="fake", api_base="fake", api_version="fake"
)
return


def test_resolve_embed_model(monkeypatch: MonkeyPatch) -> None:
monkeypatch.setattr(
"llama_index.embeddings.huggingface.HuggingFaceEmbedding.__init__",
mock_hf_embeddings,
)
monkeypatch.setattr(
"llama_index.embeddings.openai.OpenAIEmbedding.__init__",
mock_openai_embeddings,
)

# Test None
embed_model = resolve_embed_model(None)
assert isinstance(embed_model, MockEmbedding)

# Test str
embed_model = resolve_embed_model("local")
assert isinstance(embed_model, HuggingFaceEmbedding)

# Test LCEmbeddings
embed_model = resolve_embed_model(HuggingFaceEmbedding())
assert isinstance(embed_model, HuggingFaceEmbedding)

# Test BaseEmbedding
embed_model = resolve_embed_model(OpenAIEmbedding())
assert isinstance(embed_model, OpenAIEmbedding)

0 comments on commit d9e673d

Please sign in to comment.