|
| 1 | +from typing import Optional |
| 2 | + |
| 3 | +from loguru import logger |
| 4 | +from pydantic import BaseModel, Field, SecretStr |
| 5 | + |
| 6 | +from llmkira.openai.cell import UserMessage |
| 7 | +from llmkira.openai.request import OpenAI, OpenAICredential |
| 8 | + |
| 9 | + |
| 10 | +class whether(BaseModel): |
| 11 | + """ |
| 12 | + Decide whether to agree to the decision based on the content |
| 13 | + """ |
| 14 | + |
| 15 | + yes_no: bool = Field(description="Whether the condition is true or false") |
| 16 | + comment_to_user: Optional[str] = Field( |
| 17 | + default="", description="Comment on the decision" |
| 18 | + ) |
| 19 | + |
| 20 | + |
| 21 | +class continue_act(BaseModel): |
| 22 | + """ |
| 23 | + Decide whether to continue execution based on circumstances |
| 24 | + """ |
| 25 | + |
| 26 | + continue_it: bool = Field(description="Whether to continue execution") |
| 27 | + comment_to_user: Optional[str] = Field( |
| 28 | + default="", description="Comment on the decision" |
| 29 | + ) |
| 30 | + |
| 31 | + |
| 32 | +class LLMLogic(object): |
| 33 | + """ |
| 34 | + LLMLogic is a class that provides some basic logic operations. |
| 35 | +
|
| 36 | + """ |
| 37 | + |
| 38 | + def __init__(self, api_endpoint, api_key, api_model): |
| 39 | + self.api_endpoint = api_endpoint |
| 40 | + self.api_key = api_key |
| 41 | + self.api_model = api_model |
| 42 | + |
| 43 | + async def llm_if(self, context: str, condition: str, default: bool): |
| 44 | + message = f"Context:{context}\nCondition{condition}\nPlease make a decision." |
| 45 | + try: |
| 46 | + logic_if = await OpenAI( |
| 47 | + model=self.api_model, messages=[UserMessage(content=message)] |
| 48 | + ).extract( |
| 49 | + response_model=whether, |
| 50 | + session=OpenAICredential( |
| 51 | + api_key=SecretStr(self.api_key), |
| 52 | + base_url=self.api_endpoint, |
| 53 | + model=self.api_model, |
| 54 | + ), |
| 55 | + ) |
| 56 | + logic_if: whether |
| 57 | + return logic_if |
| 58 | + except Exception as e: |
| 59 | + logger.error(f"llm_if error: {e}") |
| 60 | + return whether(yes_no=default) |
| 61 | + |
| 62 | + async def llm_continue(self, context: str, condition: str, default: bool): |
| 63 | + message = f"Context:{context}\nCondition{condition}\nPlease make a decision whether to continue." |
| 64 | + try: |
| 65 | + logic_continue = await OpenAI( |
| 66 | + model=self.api_model, messages=[UserMessage(content=message)] |
| 67 | + ).extract( |
| 68 | + response_model=continue_act, |
| 69 | + session=OpenAICredential( |
| 70 | + api_key=SecretStr(self.api_key), |
| 71 | + base_url=self.api_endpoint, |
| 72 | + model=self.api_model, |
| 73 | + ), |
| 74 | + ) |
| 75 | + logic_continue: continue_act |
| 76 | + return logic_continue |
| 77 | + except Exception as e: |
| 78 | + logger.error(f"llm_continue error: {e}") |
| 79 | + return continue_act(continue_it=default) |
0 commit comments