Skip to content

Commit b049286

Browse files
authored
Merge pull request #403 from LlmKira/dev
Logic Mock In Recursive function snapshot process
2 parents ac5fe4a + 522d2b5 commit b049286

File tree

14 files changed

+305
-183
lines changed

14 files changed

+305
-183
lines changed

README.md

+5-2
Original file line numberDiff line numberDiff line change
@@ -78,10 +78,10 @@ or [one-api](https://github.com/songquanpeng/one-api) independently.
7878

7979
### 🍔 Login Modes
8080

81-
- `Login via url`: Use `/login token$https://provider.com` to Login. The program posts the token to the interface to
81+
- `Login via url`: Use `/login <a token>$<something like https://provider.com/login>` to Login. The program posts the token to the interface to
8282
retrieve configuration
8383
information, [how to develop this](https://github.com/LlmKira/Openaibot/blob/81eddbff0f136697d5ad6e13ee1a7477b26624ed/app/components/credential.py#L20).
84-
- `Login`: Use `/login https://api.com/v1$key$model` to login
84+
- `Login`: Use `/login https://<api endpoint>/v1$<api key>$<the model>` to login
8585

8686
### 🧀 Plugin Can Do More
8787

@@ -145,6 +145,9 @@ npm install pm2 -g
145145
pm2 start pm2.json
146146
```
147147

148+
> **Be sure to change the default password for the command, or disable open ports to prevent the database from being
149+
scanned and attacked.**
150+
148151
### 🥣 Docker
149152

150153
Build Hub: [sudoskys/llmbot](https://hub.docker.com/repository/docker/sudoskys/llmbot/general)

app/components/__init__.py

+9
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,9 @@
1+
from typing import Optional
2+
3+
from app.components.credential import Credential
4+
from app.components.user_manager import USER_MANAGER
5+
6+
7+
async def read_user_credential(user_id: str) -> Optional[Credential]:
8+
user = await USER_MANAGER.read(user_id=user_id)
9+
return user.credential

app/components/credential.py

+2-29
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,4 @@
11
import os
2-
from urllib.parse import urlparse
32

43
import requests
54
from dotenv import load_dotenv
@@ -15,6 +14,7 @@ class Credential(BaseModel):
1514
api_key: str
1615
api_endpoint: str
1716
api_model: str
17+
api_tool_model: str = "gpt-3.5-turbo"
1818

1919
@classmethod
2020
def from_provider(cls, token, provider_url):
@@ -36,37 +36,10 @@ def from_provider(cls, token, provider_url):
3636
api_key=user_data["api_key"],
3737
api_endpoint=user_data["api_endpoint"],
3838
api_model=user_data["api_model"],
39+
api_tool_model=user_data.get("api_tool_model", "gpt-3.5-turbo"),
3940
)
4041

4142

42-
def split_setting_string(input_string):
43-
if not isinstance(input_string, str):
44-
return None
45-
segments = input_string.split("$")
46-
47-
# 检查链接的有效性
48-
def is_valid_url(url):
49-
try:
50-
result = urlparse(url)
51-
return all([result.scheme, result.netloc])
52-
except ValueError:
53-
return False
54-
55-
# 开头为链接的情况
56-
if is_valid_url(segments[0]) and len(segments) >= 3:
57-
return segments[:3]
58-
# 第二个元素为链接,第一个元素为字符串的情况
59-
elif (
60-
len(segments) == 2
61-
and not is_valid_url(segments[0])
62-
and is_valid_url(segments[1])
63-
):
64-
return segments
65-
# 其他情况
66-
else:
67-
return None
68-
69-
7043
load_dotenv()
7144

7245
if os.getenv("GLOBAL_OAI_KEY") and os.getenv("GLOBAL_OAI_ENDPOINT"):

app/receiver/function.py

+67-18
Original file line numberDiff line numberDiff line change
@@ -14,8 +14,12 @@
1414
from aio_pika.abc import AbstractIncomingMessage
1515
from loguru import logger
1616

17+
from app.components import read_user_credential
18+
from app.components.credential import global_credential
1719
from llmkira.kv_manager.env import EnvManager
1820
from llmkira.kv_manager.tool_call import GLOBAL_TOOLCALL_CACHE_HANDLER
21+
from llmkira.logic import LLMLogic
22+
from llmkira.memory import global_message_runtime
1923
from llmkira.openai.cell import ToolCall
2024
from llmkira.sdk.tools.register import ToolRegister
2125
from llmkira.task import Task, TaskHeader
@@ -235,20 +239,7 @@ async def run_pending_task(task: TaskHeader, pending_task: ToolCall):
235239
return logger.info(
236240
f"[Snapshot Auth] \n--auth-require {pending_task.name} require."
237241
)
238-
239-
# Resign Chain
240-
# 时序实现,防止过度注册
241-
if len(task.task_sign.tool_calls_pending) == 1:
242-
if not has_been_called_recently(userid=task.receiver.uid, n_seconds=5):
243-
logger.debug(
244-
"ToolCall run out, resign a new request to request stop sign."
245-
)
246-
await create_child_snapshot(
247-
task=task,
248-
memory_able=True,
249-
channel=task.receiver.platform,
250-
)
251-
# 运行函数, 传递模型的信息,以及上一条的结果的openai raw信息
242+
# Run Function
252243
run_result = await _tool_obj.load(
253244
task=task,
254245
receiver=task.receiver,
@@ -257,11 +248,72 @@ async def run_pending_task(task: TaskHeader, pending_task: ToolCall):
257248
pending_task=pending_task,
258249
refer_llm_result=task.task_sign.llm_response,
259250
)
251+
run_status = True
260252
# 更新任务状态
253+
if run_result.get("exception"):
254+
run_status = False
261255
await task.task_sign.complete_task(
262256
tool_calls=pending_task, success_or_not=True, run_result=run_result
263257
)
264-
return run_result
258+
# Resign Chain
259+
# 时序实现,防止过度注册
260+
if len(task.task_sign.tool_calls_pending) == 0:
261+
if not has_been_called_recently(userid=task.receiver.uid, n_seconds=3):
262+
credentials = await read_user_credential(user_id=task.receiver.uid)
263+
if global_credential:
264+
credentials = global_credential
265+
logic = LLMLogic(
266+
api_key=credentials.api_key,
267+
api_endpoint=credentials.api_endpoint,
268+
api_model=credentials.api_tool_model,
269+
)
270+
history = await global_message_runtime.update_session(
271+
session_id=task.receiver.uid,
272+
).read(lines=3)
273+
logger.debug(f"Read History:{history}")
274+
continue_ = await logic.llm_continue(
275+
context=f"History:{history},ToolCallResult:{run_status}",
276+
condition="Would you like to continue a chat?",
277+
default=False,
278+
)
279+
if continue_.continue_it:
280+
logger.debug(
281+
"ToolCall run out, resign a new request to request stop sign."
282+
)
283+
await create_child_snapshot(
284+
task=task,
285+
memory_able=True,
286+
channel=task.receiver.platform,
287+
)
288+
# 运行函数, 传递模型的信息,以及上一条的结果的openai raw信息
289+
await Task.create_and_send(
290+
queue_name=task.receiver.platform,
291+
task=TaskHeader(
292+
sender=task.sender,
293+
receiver=task.receiver,
294+
task_sign=task.task_sign.notify(
295+
plugin_name=__receiver__,
296+
response_snapshot=True,
297+
memory_able=False,
298+
),
299+
message=[
300+
EventMessage(
301+
user_id=task.receiver.user_id,
302+
chat_id=task.receiver.chat_id,
303+
text=continue_.comment_to_user,
304+
)
305+
],
306+
),
307+
)
308+
else:
309+
if continue_.comment_to_user:
310+
await reply_user(
311+
platform=task.receiver.platform,
312+
receiver=task.receiver,
313+
task=task,
314+
text=continue_.comment_to_user,
315+
)
316+
return run_status
265317

266318
async def process_function_call(self, message: AbstractIncomingMessage):
267319
"""
@@ -307,9 +359,6 @@ async def run_task(self, task, pending_task):
307359
try:
308360
await self.run_pending_task(task=task, pending_task=pending_task)
309361
except Exception as e:
310-
await task.task_sign.complete_task(
311-
tool_calls=pending_task, success_or_not=False, run_result=str(e)
312-
)
313362
logger.error(f"Function Call Error {e}")
314363
raise e
315364
finally:

app/receiver/receiver_client.py

+2-7
Original file line numberDiff line numberDiff line change
@@ -17,8 +17,8 @@
1717
from loguru import logger
1818
from telebot import formatting
1919

20-
from app.components.credential import Credential, global_credential
21-
from app.components.user_manager import USER_MANAGER
20+
from app.components import read_user_credential
21+
from app.components.credential import global_credential
2222
from app.middleware.llm_task import OpenaiMiddleware
2323
from llmkira.kv_manager.env import EnvManager
2424
from llmkira.openai import OpenaiError
@@ -48,11 +48,6 @@ async def get(self, user_id):
4848
user_locks = UserLocks()
4949

5050

51-
async def read_user_credential(user_id: str) -> Optional[Credential]:
52-
user = await USER_MANAGER.read(user_id=user_id)
53-
return user.credential
54-
55-
5651
async def generate_authorization(
5752
secrets: Dict, tool_invocation: ToolCall
5853
) -> Tuple[dict, list, bool]:

app/sender/discord/__init__.py

+17-9
Original file line numberDiff line numberDiff line change
@@ -30,7 +30,13 @@
3030
__sender__ = "discord_hikari"
3131
__default_disable_tool_action__ = False
3232

33-
from ..util_func import auth_reloader, is_command, is_empty_command, uid_make, login
33+
from ..util_func import (
34+
auth_reloader,
35+
is_command,
36+
is_empty_command,
37+
uid_make,
38+
save_credential,
39+
)
3440
from llmkira.openapi.trigger import get_trigger_loop
3541
from ...components.credential import Credential, ProviderError
3642

@@ -238,7 +244,7 @@ async def listen_login_url_command(
238244
credential = Credential.from_provider(
239245
token=token, provider_url=provider_url
240246
)
241-
await login(
247+
await save_credential(
242248
uid=uid_make(__sender__, ctx.user.id),
243249
credential=credential,
244250
)
@@ -264,17 +270,19 @@ async def listen_login_url_command(
264270
)
265271
async def listen_endpoint_command(
266272
ctx: crescent.Context,
267-
openai_endpoint: str,
268-
openai_key: str,
269-
openai_model: str,
273+
api_endpoint: str,
274+
api_key: str,
275+
api_model: str,
276+
api_tool_model: str = "gpt-3.5-turbo",
270277
):
271278
try:
272279
credential = Credential(
273-
api_endpoint=openai_endpoint,
274-
api_key=openai_key,
275-
api_model=openai_model,
280+
api_endpoint=api_endpoint,
281+
api_key=api_key,
282+
api_model=api_model,
283+
api_tool_model=api_tool_model,
276284
)
277-
await login(
285+
await save_credential(
278286
uid=uid_make(__sender__, ctx.user.id),
279287
credential=credential,
280288
)

app/sender/kook/__init__.py

+17-9
Original file line numberDiff line numberDiff line change
@@ -27,7 +27,13 @@
2727
__sender__ = "kook"
2828
__default_disable_tool_action__ = False
2929

30-
from ..util_func import auth_reloader, is_command, is_empty_command, uid_make, login
30+
from ..util_func import (
31+
auth_reloader,
32+
is_command,
33+
is_empty_command,
34+
uid_make,
35+
save_credential,
36+
)
3137
from llmkira.openapi.trigger import get_trigger_loop
3238
from ...components.credential import ProviderError, Credential
3339

@@ -244,7 +250,7 @@ async def listen_login_url_command(
244250
credential = Credential.from_provider(
245251
token=token, provider_url=provider_url
246252
)
247-
await login(
253+
await save_credential(
248254
uid=uid_make(__sender__, msg.author_id),
249255
credential=credential,
250256
)
@@ -272,17 +278,19 @@ async def listen_login_url_command(
272278
@bot.command(name="login")
273279
async def listen_login_command(
274280
msg: Message,
275-
openai_endpoint: str,
276-
openai_key: str,
277-
openai_model: str,
281+
api_endpoint: str,
282+
api_key: str,
283+
api_model: str = "gpt-3.5-turbo",
284+
api_tool_model: str = "gpt-3.5-turbo",
278285
):
279286
try:
280287
credential = Credential(
281-
api_endpoint=openai_endpoint,
282-
api_key=openai_key,
283-
api_model=openai_model,
288+
api_endpoint=api_endpoint,
289+
api_key=api_key,
290+
api_model=api_model,
291+
api_tool_model=api_tool_model,
284292
)
285-
await login(
293+
await save_credential(
286294
uid=uid_make(__sender__, msg.author_id),
287295
credential=credential,
288296
)

app/sender/slack/__init__.py

+4-42
Original file line numberDiff line numberDiff line change
@@ -39,8 +39,6 @@
3939

4040
__sender__ = "slack"
4141

42-
from ...components.credential import split_setting_string, Credential, ProviderError
43-
4442
SlackTask = Task(queue=__sender__)
4543
__default_disable_tool_action__ = False
4644
__join_cache__ = {}
@@ -232,46 +230,10 @@ async def listen_login_command(ack: AsyncAck, respond: AsyncRespond, command):
232230
if not command.text:
233231
return
234232
_arg = command.text
235-
settings = split_setting_string(_arg)
236-
if not settings:
237-
return await respond(
238-
text=convert(
239-
"🔑 **Incorrect format.**\n"
240-
"You can set it via `https://api.com/v1$key$model` format, "
241-
"or you can log in via URL using `token$https://provider.com`."
242-
),
243-
)
244-
if len(settings) == 2:
245-
try:
246-
credential = Credential.from_provider(
247-
token=settings[0], provider_url=settings[1]
248-
)
249-
except ProviderError as e:
250-
return await respond(text=f"Login failed, website return {e}")
251-
except Exception as e:
252-
logger.error(f"Login failed {e}")
253-
return await respond(text=f"Login failed, because {type(e)}")
254-
else:
255-
await login(
256-
uid=uid_make(__sender__, command.user_id),
257-
credential=credential,
258-
)
259-
return await respond(
260-
text="Login success as provider! Welcome master!"
261-
)
262-
elif len(settings) == 3:
263-
credential = Credential(
264-
api_endpoint=settings[0], api_key=settings[1], api_model=settings[2]
265-
)
266-
await login(
267-
uid=uid_make(__sender__, command.user_id),
268-
credential=credential,
269-
)
270-
return await respond(
271-
text=f"Login success as {settings[2]}! Welcome master! "
272-
)
273-
else:
274-
return logger.trace(f"Login failed {settings}")
233+
reply = await login(
234+
uid=uid_make(__sender__, command.user_id), arg_string=_arg
235+
)
236+
return await respond(text=reply)
275237

276238
@bot.command(command="/env")
277239
async def listen_env_command(ack: AsyncAck, respond: AsyncRespond, command):

0 commit comments

Comments
 (0)