From 9fe9e0a66783f7dbe3d91e5c17297e7373c07c88 Mon Sep 17 00:00:00 2001 From: Alexander Joham Date: Sat, 19 Oct 2024 20:33:51 +0200 Subject: [PATCH] Update DTO for new Artemis table --- app/common/token_usage_dto.py | 8 ++++---- app/llm/external/ollama.py | 2 +- app/llm/external/openai_chat.py | 2 +- app/llm/langchain/iris_langchain_chat_model.py | 6 +++--- 4 files changed, 9 insertions(+), 9 deletions(-) diff --git a/app/common/token_usage_dto.py b/app/common/token_usage_dto.py index 9579c831..a0ee3eda 100644 --- a/app/common/token_usage_dto.py +++ b/app/common/token_usage_dto.py @@ -4,12 +4,12 @@ class TokenUsageDTO(BaseModel): - model_info: str = Field(alias="modelInfo", default="") + model_info: str = Field(alias="model", default="") num_input_tokens: int = Field(alias="numInputTokens", default=0) - cost_per_input_token: float = Field(alias="costPerInputToken", default=0) + cost_per_input_token: float = Field(alias="costPerMillionInputToken", default=0) num_output_tokens: int = Field(alias="numOutputTokens", default=0) - cost_per_output_token: float = Field(alias="costPerOutputToken", default=0) - pipeline: PipelineEnum = Field(default=PipelineEnum.NOT_SET) + cost_per_output_token: float = Field(alias="costPerMillionOutputToken", default=0) + pipeline: PipelineEnum = Field(alias="pipelineId", default=PipelineEnum.NOT_SET) def __str__(self): return ( diff --git a/app/llm/external/ollama.py b/app/llm/external/ollama.py index 146ed82a..2cea702f 100644 --- a/app/llm/external/ollama.py +++ b/app/llm/external/ollama.py @@ -68,7 +68,7 @@ def convert_to_iris_message( tokens = TokenUsageDTO( numInputTokens=num_input_tokens, numOutputTokens=num_output_tokens, - modelInfo=model, + model=model, ) return PyrisMessage( sender=map_str_to_role(message["role"]), diff --git a/app/llm/external/openai_chat.py b/app/llm/external/openai_chat.py index 005c2dd7..7a49f0c6 100644 --- a/app/llm/external/openai_chat.py +++ b/app/llm/external/openai_chat.py @@ -79,7 +79,7 @@ def convert_to_iris_message( num_output_tokens = getattr(usage, "completion_tokens", -1) tokens = TokenUsageDTO( - modelInfo=model, + model=model, numInputTokens=num_input_tokens, numOutputTokens=num_output_tokens, ) diff --git a/app/llm/langchain/iris_langchain_chat_model.py b/app/llm/langchain/iris_langchain_chat_model.py index 94f41d5d..c8b1c6da 100644 --- a/app/llm/langchain/iris_langchain_chat_model.py +++ b/app/llm/langchain/iris_langchain_chat_model.py @@ -48,11 +48,11 @@ def _generate( base_message = convert_iris_message_to_langchain_message(iris_message) chat_generation = ChatGeneration(message=base_message) self.tokens = TokenUsageDTO( - modelInfo=iris_message.token_usage.model_info, + model=iris_message.token_usage.model_info, numInputTokens=iris_message.token_usage.num_input_tokens, - costPerInputToken=iris_message.token_usage.cost_per_input_token, + costPerMillionInputToken=iris_message.token_usage.cost_per_input_token, numOutputTokens=iris_message.token_usage.num_output_tokens, - costPerOutputToken=iris_message.token_usage.cost_per_output_token, + costPerMillionOutputToken=iris_message.token_usage.cost_per_output_token, pipeline=PipelineEnum.NOT_SET, ) return ChatResult(generations=[chat_generation])