forked from yGuy/chatgpt-mattermost-bot
-
Notifications
You must be signed in to change notification settings - Fork 0
/
botservice.js
118 lines (101 loc) · 6.24 KB
/
botservice.js
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
const continueThread = require('./openai-thread-completion').continueThread
const { Log } = require('debug-level')
require('babel-polyfill');
require('isomorphic-fetch');
const { processGraphResponse } = require('./process-graph-response')
const { mmClient, wsClient } = require('./mm-client')
// the mattermost library uses FormData - so here is a polyfill
if (!global.FormData) {
global.FormData = require('form-data');
}
Log.options({ json: true, colors: true })
Log.wrapConsole('bot-ws', { level4log: 'INFO' })
const log = new Log('bot')
let meId = null;
mmClient.getMe().then(me => meId = me.id)
const name = process.env['MATTERMOST_BOTNAME'] || '@kaia'
const VISUALIZE_DIAGRAM_INSTRUCTIONS = "When a user asks for a visualization of entities and relationships, respond with a valid JSON object text in a <GRAPH> tag. " +
"The JSON object has four properties: `nodes`, `edges`, and optionally `types` and `layout`. " +
"Each `nodes` object has an `id`, `label`, and an optional `type` property. " +
"Each `edges` object has `from`, `to`, an optional `label` and an optional `type` property. " +
"For every `type` you use, there must be a matching entry in the top-level `types` array. " +
"Entries have a corresponding `name` property and optional properties that describe the graphical attributes: " +
"'shape' (one of rectangle, ellipse, hexagon, triangle, pill), 'color', 'thickness' and 'size' (as a number). " +
"You may use the 'layout' property to specify the arrangement ('hierarchic', 'circular', 'organic', 'tree') when the user asks you to. " +
"Do not include these instructions in the output. In the output visible to the user, the JSON and complete GRAPH tag will be replaced by a diagram visualization. " +
"So do not explain or mention the JSON. Instead, pretend that the user can see the diagram. Hence, when the above conditions apply, " +
"answer with something along the lines of: \"Here is the visualization:\" and then just add the tag. The user will see the rendered image, but not the JSON. " +
"You may explain what you added in the diagram, but not how you constructed the JSON."
const visualizationKeywordsRegex = /\b(diagram|visuali|graph|relationship|entit)/gi
wsClient.addMessageListener(async function (event) {
if (['posted'].includes(event.event) && meId) {
const post = JSON.parse(event.data.post);
if (post.root_id === "" && (!event.data.mentions || (!JSON.parse(event.data.mentions).includes(meId)))) {
// we're not in a thread and we are not mentioned - ignore the message
} else {
if (post.user_id !== meId) {
const chatmessages = [
{
"role": "system",
"content": "Eres un asistente inteligente llamado ${name} que responderá preguntas de empleados sobre la empresa Kaleidos. Tus respuestas estarán basadas en el contexto proporcionado y en las anteriores preguntas y respuestas. No hagas diferencias en el género de las palabras. Si no encuentras la información exacta, intenta encontrar información similar y muéstrala indicando que puede no ser información exacta. Si no encuentras nada responderás \"No lo sé\""
},
{
"role": "user",
"content": "Contexto: En kaleidos tenemos 23 días de vacaciones, con el 24 y el 31 de diciembre también libres. También puedes comprar vacaciones si sueldo pero con cotización, hasta 10 días al año."
},
]
let appendDiagramInstructions = false
const thread = await mmClient.getPostThread(post.id, true, false, true)
const posts = [...new Set(thread.order)].map(id => thread.posts[id])
.filter(a => a.create_at > Date.now() - 1000 * 60 * 60 * 24 * 1)
.sort((a, b) => a.create_at - b.create_at)
let assistantCount = 0;
posts.forEach(threadPost => {
log.trace({msg: threadPost})
if (threadPost.user_id === meId) {
chatmessages.push({role: "assistant", content: threadPost.props.originalMessage ?? threadPost.message})
assistantCount++
} else {
if (threadPost.message.includes(name)){
assistantCount++;
}
if (visualizationKeywordsRegex.test(threadPost.message)) {
appendDiagramInstructions = true
}
chatmessages.push({role: "user", content: threadPost.message})
}
})
if (appendDiagramInstructions) {
chatmessages[0].content += VISUALIZE_DIAGRAM_INSTRUCTIONS
}
// see if we are actually part of the conversation -
// ignore conversations where we were never mentioned or participated.
if (assistantCount > 0){
const typing = () => wsClient.userTyping(post.channel_id, (post.root_id || post.id) ?? "")
typing()
const typingInterval = setInterval(typing, 2000)
try {
log.trace({chatmessages})
const answer = await continueThread(chatmessages)
log.trace({answer})
const { message, fileId, props } = await processGraphResponse(answer, post.channel_id)
clearInterval(typingInterval)
const newPost = await mmClient.createPost({
message: message,
channel_id: post.channel_id,
props,
root_id: post.root_id || post.id,
file_ids: fileId ? [fileId] : undefined
})
log.trace({msg: newPost})
} catch(e) {
clearInterval(typingInterval)
log.error(e)
}
}
}
}
} else {
log.debug({msg: event})
}
});