Skip to content

Commit

Permalink
feat: dedicated llm type (#3)
Browse files Browse the repository at this point in the history
  • Loading branch information
samuelint authored Jul 15, 2024
1 parent 42c953b commit b2d5112
Show file tree
Hide file tree
Showing 7 changed files with 33 additions and 6 deletions.
7 changes: 7 additions & 0 deletions langchain_llamacpp_chat_model/llama_chat_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@

class LlamaChatModel(BaseChatOpenAI):
model_name: str = "unknown"
llama: Llama = None

def __init__(
self,
Expand All @@ -19,3 +20,9 @@ def __init__(
client=LLamaOpenAIClientProxy(llama=llama),
async_client=LLamaOpenAIClientAsyncProxy(llama=llama),
)
self.llama = llama

@property
def _llm_type(self) -> str:
"""Return type of chat model."""
return self.llama.model_path
4 changes: 2 additions & 2 deletions tests/test_functional/models_configuration.py
Original file line number Diff line number Diff line change
Expand Up @@ -45,8 +45,8 @@ def _create_models_settings():
return models


def create_llama(request) -> Llama:
local_path = _model_local_path(request.param)
def create_llama(params) -> Llama:
local_path = _model_local_path(params)

return Llama(
model_path=local_path,
Expand Down
2 changes: 1 addition & 1 deletion tests/test_functional/test_ainvoke.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@ class TestAInvoke:
params=models_to_test, ids=[config["repo_id"] for config in models_to_test]
)
def llama(self, request) -> Llama:
return create_llama(request)
return create_llama(request.param)

@pytest.fixture
def instance(self, llama):
Expand Down
2 changes: 1 addition & 1 deletion tests/test_functional/test_astream.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@ class TestAStream:
params=models_to_test, ids=[config["repo_id"] for config in models_to_test]
)
def llama(self, request) -> Llama:
return create_llama(request)
return create_llama(request.param)

@pytest.fixture
def instance(self, llama):
Expand Down
2 changes: 1 addition & 1 deletion tests/test_functional/test_invoke.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@ class TestInvoke:
params=models_to_test, ids=[config["repo_id"] for config in models_to_test]
)
def llama(self, request) -> Llama:
return create_llama(request)
return create_llama(request.param)

@pytest.fixture
def instance(self, llama):
Expand Down
20 changes: 20 additions & 0 deletions tests/test_functional/test_llm_type.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,20 @@
from llama_cpp import Llama
import pytest
from langchain_llamacpp_chat_model import LlamaChatModel
from tests.test_functional.models_configuration import create_llama, models_to_test


class TestInvoke:

@pytest.fixture()
def llama(self) -> Llama:

return create_llama(models_to_test[0])

@pytest.fixture
def instance(self, llama):
return LlamaChatModel(llama=llama)

def test_llm_type(self, instance: LlamaChatModel):
result = instance._llm_type
assert models_to_test[0]["repo_id"] in result
2 changes: 1 addition & 1 deletion tests/test_functional/test_stream.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@ class TestStream:
params=models_to_test, ids=[config["repo_id"] for config in models_to_test]
)
def llama(self, request) -> Llama:
return create_llama(request)
return create_llama(request.param)

@pytest.fixture
def instance(self, llama):
Expand Down

0 comments on commit b2d5112

Please sign in to comment.