Skip to content

Commit 522d2b5

Browse files
committed
✨ feat(app): implement save_credential function
🚀 feat(components): add read_user_credential function 🔨 refactor(sender): replace login function with save_credential function 🔧 chore(setting): update database connection validation
1 parent a64ae20 commit 522d2b5

File tree

3 files changed

+95
-10
lines changed

3 files changed

+95
-10
lines changed

llmkira/extra/plugins/search/__init__.py

+1-3
Original file line numberDiff line numberDiff line change
@@ -105,7 +105,7 @@ async def failed(
105105
refer_llm_result: dict = None,
106106
**kwargs,
107107
):
108-
meta = task.task_sign.notify(
108+
meta = task.task_sign.reply(
109109
plugin_name=__plugin_name__,
110110
tool_response=[
111111
ToolResponse(
@@ -115,8 +115,6 @@ async def failed(
115115
tool_call=pending_task,
116116
)
117117
],
118-
memory_able=True,
119-
response_snapshot=True,
120118
)
121119
await Task.create_and_send(
122120
queue_name=receiver.platform,

llmkira/logic/__init__.py

+79
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,79 @@
1+
from typing import Optional
2+
3+
from loguru import logger
4+
from pydantic import BaseModel, Field, SecretStr
5+
6+
from llmkira.openai.cell import UserMessage
7+
from llmkira.openai.request import OpenAI, OpenAICredential
8+
9+
10+
class whether(BaseModel):
11+
"""
12+
Decide whether to agree to the decision based on the content
13+
"""
14+
15+
yes_no: bool = Field(description="Whether the condition is true or false")
16+
comment_to_user: Optional[str] = Field(
17+
default="", description="Comment on the decision"
18+
)
19+
20+
21+
class continue_act(BaseModel):
22+
"""
23+
Decide whether to continue execution based on circumstances
24+
"""
25+
26+
continue_it: bool = Field(description="Whether to continue execution")
27+
comment_to_user: Optional[str] = Field(
28+
default="", description="Comment on the decision"
29+
)
30+
31+
32+
class LLMLogic(object):
33+
"""
34+
LLMLogic is a class that provides some basic logic operations.
35+
36+
"""
37+
38+
def __init__(self, api_endpoint, api_key, api_model):
39+
self.api_endpoint = api_endpoint
40+
self.api_key = api_key
41+
self.api_model = api_model
42+
43+
async def llm_if(self, context: str, condition: str, default: bool):
44+
message = f"Context:{context}\nCondition{condition}\nPlease make a decision."
45+
try:
46+
logic_if = await OpenAI(
47+
model=self.api_model, messages=[UserMessage(content=message)]
48+
).extract(
49+
response_model=whether,
50+
session=OpenAICredential(
51+
api_key=SecretStr(self.api_key),
52+
base_url=self.api_endpoint,
53+
model=self.api_model,
54+
),
55+
)
56+
logic_if: whether
57+
return logic_if
58+
except Exception as e:
59+
logger.error(f"llm_if error: {e}")
60+
return whether(yes_no=default)
61+
62+
async def llm_continue(self, context: str, condition: str, default: bool):
63+
message = f"Context:{context}\nCondition{condition}\nPlease make a decision whether to continue."
64+
try:
65+
logic_continue = await OpenAI(
66+
model=self.api_model, messages=[UserMessage(content=message)]
67+
).extract(
68+
response_model=continue_act,
69+
session=OpenAICredential(
70+
api_key=SecretStr(self.api_key),
71+
base_url=self.api_endpoint,
72+
model=self.api_model,
73+
),
74+
)
75+
logic_continue: continue_act
76+
return logic_continue
77+
except Exception as e:
78+
logger.error(f"llm_continue error: {e}")
79+
return continue_act(continue_it=default)

llmkira/openai/request.py

+15-7
Original file line numberDiff line numberDiff line change
@@ -225,17 +225,25 @@ async def request(self, session: OpenAICredential) -> OpenAIResult:
225225

226226
@retry(stop=stop_after_attempt(3), reraise=True)
227227
async def extract(
228-
self, response_model: Union[Type[BaseModel], Tool], session: OpenAICredential
228+
self, response_model: Union[Type[BaseModel]], session: OpenAICredential
229229
):
230+
"""
231+
Extract the result from the response
232+
:param response_model: BaseModel
233+
:param session: OpenAICredential
234+
:return: BaseModel
235+
:raises NetworkError, UnexpectedFormatError, RuntimeError: The response model is not matched with the result
236+
"""
230237
self.n = 1
231238
self.response_format = None
232-
if not isinstance(response_model, Tool):
233-
response_model = Tool(function=response_model)
234-
self.tools = [response_model]
235-
self.tool_choice = ToolChoice(function=response_model.function)
239+
tool = Tool(function=response_model)
240+
self.tools = [tool]
241+
self.tool_choice = ToolChoice(function=tool.function)
236242
result = await self.request(session)
237243
try:
238244
tool_call = ToolCall.model_validate(result.choices[0].message.tool_calls[0])
239-
return response_model.model_validate(tool_call.function.arguments)
240-
except Exception:
245+
logger.debug(f"Extracted: {tool_call}")
246+
return response_model.model_validate(tool_call.function.json_arguments)
247+
except Exception as exc:
248+
logger.error(f"extract:{exc}")
241249
raise RuntimeError("The response model is not matched with the result")

0 commit comments

Comments
 (0)