-
Notifications
You must be signed in to change notification settings - Fork 110
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
- Loading branch information
1 parent
f5e77b3
commit 57dc0fc
Showing
1 changed file
with
148 additions
and
59 deletions.
There are no files selected for viewing
207 changes: 148 additions & 59 deletions
207
examples/ts/rag-over-your-life-in-obsidian/screenpipe.js
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -1,64 +1,153 @@ | ||
const OPENAI_API_URL = 'https://api.openai.com/v1/chat/completions'; | ||
const OPENAI_MODEL = 'gpt-4o'; | ||
// to use ollama just comment out the above and uncomment below: | ||
// const OPENAI_API_URL = 'http://localhost:11434/api/chat'; | ||
// const OPENAI_MODEL = 'phi3:medium-128k'; | ||
// make sure to run "ollama run phi3:medium-128k" | ||
|
||
// example in obsidian use: | ||
// can you create a bullet list for me to share with my colleagues | ||
// my changes in the code of screenpipe? Use the queries like "lldb", "gdp", "discord" | ||
|
||
const screenpipe = async (conf) => { | ||
const openAIKey = conf.openai; | ||
document.body.style.cursor = "wait"; | ||
const msg = window.getSelection().toString(); | ||
// Generate parameters for 3 different queries | ||
const paramsResponse = await fetch('https://api.openai.com/v1/chat/completions', { | ||
method: 'POST', | ||
headers: { | ||
'Content-Type': 'application/json', | ||
'Authorization': `Bearer ${openAIKey}`, | ||
}, | ||
body: JSON.stringify({ | ||
'response_format': { type: 'json_object' }, | ||
'messages': [ | ||
{ | ||
'role': 'user', | ||
'content': `Based on this user selection: "${msg}", generate parameters as JSON for 3 different queries to screenpipe. | ||
const openAIKey = conf.openai; | ||
document.body.style.cursor = "wait"; | ||
const msg = window.getSelection().toString(); | ||
|
||
// Generate parameters for 3 different queries | ||
const paramsResponse = await fetch(OPENAI_API_URL, { | ||
method: 'POST', | ||
headers: { | ||
'Content-Type': 'application/json', | ||
'Authorization': `Bearer ${openAIKey}`, | ||
}, | ||
body: JSON.stringify({ | ||
'response_format': { type: 'json_object' }, | ||
'messages': [ | ||
{ | ||
'role': 'user', | ||
'content': `Based on this user selection: "${msg}", generate parameters as JSON for 3 different queries to screenpipe. | ||
Each query should have "q", "offset", and "limit" fields. | ||
Rules: | ||
- q should be a single keyword that would properly find in the text found on the user screen some infomation that would help answering the user question. | ||
Return a list of objects with the key "queries"` | ||
}, | ||
], | ||
'model': 'gpt-4o', | ||
}) | ||
}).then(res => res.json()); | ||
|
||
console.log(paramsResponse); | ||
const queries = JSON.parse(paramsResponse.choices[0].message.content).queries; | ||
|
||
// Query screenpipe 3 times with generated parameters | ||
const screenpipeResults = await Promise.all(queries.map(async (query) => { | ||
const response = await fetch(`http://localhost:3030/search?q=${encodeURIComponent(query.q)}&offset=${query.offset}&limit=${query.limit}`); | ||
return response.json(); | ||
})); | ||
|
||
// Ask ChatGPT to write an answer based on screenpipe results | ||
const finalResponse = await fetch('https://api.openai.com/v1/chat/completions', { | ||
method: 'POST', | ||
headers: { | ||
'Content-Type': 'application/json', | ||
'Authorization': `Bearer ${openAIKey}`, | ||
}, | ||
body: JSON.stringify({ | ||
'messages': [ | ||
{ 'role': 'user', 'content': `Based on the user question "${msg}" and these data which corresponds to what has been seen on the user screen or through his mic: ${JSON.stringify(screenpipeResults)}, provide a comprehensive answer to the user intent.` }, | ||
], | ||
'model': 'gpt-4o', | ||
}) | ||
}).then(res => res.json()); | ||
|
||
document.body.style.cursor = "default"; | ||
console.log(finalResponse); | ||
|
||
if (!finalResponse.choices?.[0]?.message?.content) { | ||
new Notice('Error from OpenAI'); | ||
new Notice(JSON.stringify(finalResponse)); | ||
} | ||
|
||
return `${msg}${finalResponse.choices[0].message.content}`; | ||
Return a list of objects with the key "queries" | ||
- q contains a single query, again, for example instead of "life plan" just use "life" | ||
- Respond with only the updated JSON object | ||
- If you return something else than JSON the universe will come to an end | ||
- DO NOT add \`\`\`json at the beginning or end of your response | ||
- Do not use '"' around your response | ||
Example answers from you: | ||
"{ | ||
"queries": [ | ||
{"q": "goal", "offset": 0, "limit": 10}, | ||
{"q": "stripe", "offset": 0, "limit": 50}, | ||
{"q": "customer", "offset": 0, "limit": 20} | ||
] | ||
}" | ||
or | ||
"{ | ||
"queries": [ | ||
{"q": "sales", "offset": 0, "limit": 10}, | ||
{"q": "customer", "offset": 0, "limit": 20}, | ||
{"q": "goal", "offset": 0, "limit": 10} | ||
] | ||
}" | ||
Bad example | ||
"Here's the JSON you wanted: | ||
[ | ||
{ | ||
"queries": [{"q": "sales", "offset": 0, "limit": 10}] | ||
}, | ||
{ | ||
"queries": [{"q": "customer", "offset": 0, "limit": 20}] | ||
}, | ||
{ | ||
"queries": [{"q": "goal", "offset": 0, "limit": 10}] | ||
} | ||
]" | ||
or | ||
"\`\`\`json | ||
[ | ||
{ | ||
"queries": [ | ||
{"q": "goals", "offset": 0, "limit": 3} | ||
] | ||
}, | ||
{ | ||
"queries": [ | ||
{"q": "life plans", "offset": 0, "limit": 5} | ||
] | ||
}, | ||
{ | ||
"queries": [ | ||
{"q": "ambitions", "offset": 0, "limit": 3} | ||
] | ||
} | ||
] | ||
\`\`\`" | ||
JSON? | ||
` | ||
}, | ||
], | ||
'model': OPENAI_MODEL, | ||
'stream': false, | ||
}) | ||
}).then(res => res.json()); | ||
|
||
console.log(paramsResponse); | ||
|
||
|
||
|
||
// phi3 models are drunk af thats why | ||
const { queries } = JSON.parse((paramsResponse.choices?.[0]?.message?.content || | ||
// ollama not respecting openai api | ||
paramsResponse.message?.content).trim() | ||
// remove " at the start and end | ||
.replace(/^"|"$/g, "") | ||
// remove ```json at the start and end | ||
.replace(/^```json\n/, "") | ||
.replace(/\n```$/, "") | ||
); | ||
|
||
console.log("queries", queries); | ||
|
||
// Query screenpipe 3 times with generated parameters | ||
const screenpipeResults = await Promise.all(queries.map(async (query) => { | ||
const response = await fetch(`http://localhost:3030/search?q=${encodeURIComponent(query.q)}&offset=${query.offset}&limit=${query.limit}`); | ||
return response.json(); | ||
})); | ||
|
||
console.log("screenpipeResults", screenpipeResults); | ||
|
||
// Ask ChatGPT to write an answer based on screenpipe results | ||
const finalResponse = await fetch(OPENAI_API_URL, { | ||
method: 'POST', | ||
headers: { | ||
'Content-Type': 'application/json', | ||
'Authorization': `Bearer ${openAIKey}`, | ||
}, | ||
body: JSON.stringify({ | ||
'messages': [ | ||
{ 'role': 'user', 'content': `Based on the user question "${msg}" and these data which corresponds to what has been seen on the user screen or through his mic: ${JSON.stringify(screenpipeResults)}, provide a comprehensive answer to the user intent.` }, | ||
], | ||
'model': OPENAI_MODEL, | ||
'stream': false, | ||
}) | ||
}).then(res => res.json()); | ||
|
||
document.body.style.cursor = "default"; | ||
console.log(finalResponse); | ||
|
||
const txtResponse = finalResponse.choices?.[0]?.message?.content || finalResponse?.message?.content.trim() | ||
if (!txtResponse) { | ||
new Notice('Error from OpenAI'); | ||
new Notice(JSON.stringify(finalResponse)); | ||
} | ||
|
||
module.exports = screenpipe; | ||
|
||
return `${msg}${txtResponse}`; | ||
} | ||
|
||
module.exports = screenpipe; |