Skip to content

Commit

Permalink
removal of token.
Browse files Browse the repository at this point in the history
  • Loading branch information
mfmezger committed Jun 2, 2024
1 parent 1328380 commit c7a9384
Show file tree
Hide file tree
Showing 7 changed files with 10 additions and 56 deletions.
23 changes: 4 additions & 19 deletions agent/api.py
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,6 @@
from agent.utils.utility import (
combine_text_from_list,
create_tmp_folder,
validate_token,
)
from agent.utils.vdb import initialize_all_vector_dbs, load_vec_db_conn

Expand Down Expand Up @@ -116,10 +115,9 @@ async def post_embedd_documents(llm_backend: LLMBackend, files: list[UploadFile]
"""
logger.info("Embedding Multiple Documents")

token = validate_token(token=llm_backend.token, llm_backend=llm_backend, aleph_alpha_key=ALEPH_ALPHA_API_KEY, openai_key=OPENAI_API_KEY)
tmp_dir = create_tmp_folder()

service = LLMContext(LLMStrategyFactory.get_strategy(strategy_type=LLMProvider.ALEPH_ALPHA, token=token, collection_name=llm_backend.collection_name))
service = LLMContext(LLMStrategyFactory.get_strategy(strategy_type=LLMProvider.ALEPH_ALPHA, collection_name=llm_backend.collection_name))

file_names = []

Expand Down Expand Up @@ -164,10 +162,7 @@ async def embedd_text(embedding: EmbeddTextRequest, llm_backend: LLMBackend) ->
"""
logger.info("Embedding Text")

# TODO: REWORK THE TOKEN
token = validate_token(token=llm_backend.token, llm_backend=llm_backend, aleph_alpha_key=ALEPH_ALPHA_API_KEY, openai_key=OPENAI_API_KEY)

service = LLMContext(LLMStrategyFactory.get_strategy(strategy_type=llm_backend.llm_provider, token=token, collection_name=llm_backend.collection_name))
service = LLMContext(LLMStrategyFactory.get_strategy(strategy_type=llm_backend.llm_provider, collection_name=llm_backend.collection_name))

# save the string to a txt file in a uuid directory
tmp_dir = create_tmp_folder()
Expand Down Expand Up @@ -243,9 +238,7 @@ def question_answer(rag: RAGRequest, llm_backend: LLMBackend) -> QAResponse:
msg = "Please provide a Question."
raise ValueError(msg)

token = validate_token(token=llm_backend.token, llm_backend=llm_backend, aleph_alpha_key=ALEPH_ALPHA_API_KEY, openai_key=OPENAI_API_KEY)

service = LLMContext(LLMStrategyFactory.get_strategy(strategy_type=llm_backend.llm_provider, token=token, collection_name=llm_backend.collection_name))
service = LLMContext(LLMStrategyFactory.get_strategy(strategy_type=llm_backend.llm_provider, collection_name=llm_backend.collection_name))
# summarize the history
if rag.history:
# combine the texts
Expand Down Expand Up @@ -284,13 +277,6 @@ def explain_question_answer(explain_request: ExplainQARequest, llm_backend: LLMB
msg = "Please provide a Question."
raise ValueError(msg)

explain_request.rag_request.search.llm_backend.token = validate_token(
token=explain_request.rag_request.search.llm_backend.token,
llm_backend=explain_request.rag_request.search.llm_backend.llm_provider,
aleph_alpha_key=ALEPH_ALPHA_API_KEY,
openai_key=OPENAI_API_KEY,
)

service = LLMContext(
LLMStrategyFactory.get_strategy(strategy_type=llm_backend.llm_provider, token=search.llm_backend.token, collection_name=llm_backend.collection_name)
)
Expand Down Expand Up @@ -324,7 +310,6 @@ def explain_question_answer(explain_request: ExplainQARequest, llm_backend: LLMB
# JSONResponse: _description_
# """
# logger.info("Processing Document")
# token = validate_token(token=token, llm_backend=llm_backend, aleph_alpha_key=ALEPH_ALPHA_API_KEY, openai_key=OPENAI_API_KEY)

# # Create a temporary folder to save the files
# tmp_dir = create_tmp_folder()
Expand All @@ -347,7 +332,7 @@ def explain_question_answer(explain_request: ExplainQARequest, llm_backend: LLMB
# with Path(tmp_dir / file_name).open() as f:
# f.write(await file.read())

# process_documents_aleph_alpha(folder=tmp_dir, token=token, type=document_type)
# process_documents_aleph_alpha(folder=tmp_dir, , type=document_type)

# logger.info(f"Found {len(documents)} documents.")
# return documents
Expand Down
2 changes: 1 addition & 1 deletion agent/backend/LLMStrategy.py
Original file line number Diff line number Diff line change
Expand Up @@ -55,7 +55,7 @@ def get_strategy(strategy_type: str, token: str, collection_name: str) -> LLMBas
if strategy is None:
msg = "Unknown Strategy Type"
raise ValueError(msg)
return strategy(token=token, collection_name=collection_name)
return strategy(collection_name=collection_name)


class LLMContext:
Expand Down
2 changes: 1 addition & 1 deletion agent/backend/aleph_alpha_service.py
Original file line number Diff line number Diff line change
Expand Up @@ -343,7 +343,7 @@ def process_documents_aleph_alpha(self, folder: str, processing_type: str) -> li
if __name__ == "__main__":
query = "Was ist Attention?"

aa_service = AlephAlphaService(collection_name="", token="")
aa_service = AlephAlphaService(collection_name="")

aa_service.embed_documents(directory="tests/resources/")

Expand Down
2 changes: 1 addition & 1 deletion agent/backend/cohere_service.py
Original file line number Diff line number Diff line change
Expand Up @@ -118,7 +118,7 @@ def summarize_text(self, text: str) -> str:
if __name__ == "__main__":
query = "Was ist Attention?"

cohere_service = CohereService(collection_name="", token="")
cohere_service = CohereService(collection_name="")

cohere_service.embed_documents(directory="tests/resources/")

Expand Down
2 changes: 1 addition & 1 deletion agent/backend/gpt4all_service.py
Original file line number Diff line number Diff line change
Expand Up @@ -148,7 +148,7 @@ def create_rag_chain(self, rag: RAGRequest, search: SearchParams) -> tuple:
if __name__ == "__main__":
query = "Was ist Attention?"

gpt4all_service = GPT4AllService(collection_name="", token="")
gpt4all_service = GPT4AllService(collection_name="")

gpt4all_service.embed_documents(directory="tests/resources/")

Expand Down
2 changes: 1 addition & 1 deletion agent/backend/open_ai_service.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
"""Script is used to initialize the Qdrant db backend with Azure OpenAI."""
"""Script is used to initialize the Qdrant db backend with (Azure) OpenAI."""
import os

import openai
Expand Down
33 changes: 1 addition & 32 deletions tests/unit_tests/test_utility.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
"""Tests for the utility functions."""
from agent.utils.utility import generate_prompt, set_token
from agent.utils.utility import generate_prompt


def test_generate_prompt() -> None:
Expand Down Expand Up @@ -33,34 +33,3 @@ def test_generate_prompt_detect_language_default_parameter() -> None:

def test_combine_text_from_list() -> None:
"""Test that combine_text_from_list returns the correct text."""


def test_validate_token() -> None:
"""Test that validate_token returns the correct token."""
token = set_token(token="example_token", llm_backend="openai", aleph_alpha_key="example_key_a", openai_key="example_key_o")

assert token == "example_token"

token = set_token(token="", llm_backend="aleph-alpha", aleph_alpha_key="example_key_a", openai_key="example_key_o")

assert token == "example_key_a"

token = set_token(token="", llm_backend="openai", aleph_alpha_key="example_key_a", openai_key="example_key_o")

assert token == "example_key_o"

token = set_token(token=None, llm_backend="openai", aleph_alpha_key="example_key_a", openai_key="example_key_o")

assert token == "example_key_o"

token = set_token(token="", llm_backend="gpt4all", aleph_alpha_key="example_key_a", openai_key="example_key_o")

assert token == "gpt4all"

from agent.data_model.request_data_model import LLMProvider

backend = LLMProvider.ALEPH_ALPHA

token = set_token(token="", llm_backend=backend, aleph_alpha_key="example_key_a", openai_key="example_key_o")

assert token == "example_key_a"

0 comments on commit c7a9384

Please sign in to comment.