diff --git a/ai.js b/ai.js index 34983e8e..0a64fd1c 100644 --- a/ai.js +++ b/ai.js @@ -51,61 +51,85 @@ async function rebuildIndex() { globalLunrIndex = initLunrIndex(documents); } -async function getFirstAvailableModel() { +async function getFirstAvailableModel(exclude=null) { let ollamaendpoint = settings.ollamaendpoint?.textsetting || "http://localhost:11434"; - if (typeof ipcRenderer !== 'undefined') { - // Electron environment - return new Promise( async (resolve, reject) => { - let ccc = setTimeout(()=>{ - reject(new Error('Request timed out')); - },10000); - let xhr; - try { - xhr = await fetchNode(`${ollamaendpoint}/api/tags`); - } catch(e){ - clearTimeout(ccc); - reject(new Error('General fetch error')); - return; - } - - const datar = JSON.parse(xhr.data); - if (datar && datar.models && datar.models.length > 0) { - resolve(datar.models[0].name); - return; - } else { - reject(new Error('No models available')); - return; - } + const isLLMModel = (model) => { + const llmFamilies = ['llama', 'qwen2']; + return model.details?.families?.some(family => llmFamilies.includes(family)); + }; + + const getSizeInBillions = (model) => { + const sizeStr = model.details?.parameter_size; + return parseFloat(sizeStr?.replace('B', '')) || 0; + }; + + const findBestModel = (models) => { + const llmModels = models.filter(isLLMModel); + if (!llmModels.length) return models[0]?.name; + + const targetModels = llmModels.filter(m => { + const size = getSizeInBillions(m); + return size >= 2 && size <= 8; }); - - } else { - // Web environment - return new Promise((resolve, reject) => { - const xhr = new XMLHttpRequest(); - xhr.open('GET', `${ollamaendpoint}/api/tags`, true); - xhr.onload = function() { - if (xhr.status === 200) { - const datar = JSON.parse(xhr.responseText); - if (datar && datar.models && datar.models.length > 0) { - resolve(datar.models[0].name); - } else { - reject(new Error('No models available')); - } - } else { - reject(new Error('Failed to fetch models')); - } - }; - xhr.timeout = 10000; // 10 seconds timeout - xhr.ontimeout = function() { - reject(new Error('Request timed out')); - }; - xhr.onerror = function() { - reject(new Error('Network error while fetching models')); - }; - xhr.send(); + + if (targetModels.length) { + const model = targetModels[0].name !== exclude ? + targetModels[0] : targetModels[1] || targetModels[0]; + return model.name; + } + + const sizes = llmModels.map(m => ({ + model: m, + size: getSizeInBillions(m), + diff: Math.min( + Math.abs(getSizeInBillions(m) - 2), + Math.abs(getSizeInBillions(m) - 8) + ) + })); + + const closest = sizes.sort((a, b) => a.diff - b.diff)[0]; + return closest.model.name; + }; + + if (typeof ipcRenderer !== 'undefined') { + return new Promise(async (resolve, reject) => { + let ccc = setTimeout(() => reject(new Error('Request timed out')), 10000); + try { + const xhr = await fetchNode(`${ollamaendpoint}/api/tags`); + clearTimeout(ccc); + const datar = JSON.parse(xhr.data); + if (!datar?.models?.length) throw new Error('No models available'); + resolve(findBestModel(datar.models)); + } catch(e) { + clearTimeout(ccc); + reject(e); + } }); } + + return new Promise((resolve, reject) => { + const xhr = new XMLHttpRequest(); + xhr.open('GET', `${ollamaendpoint}/api/tags`, true); + xhr.timeout = 10000; + + xhr.onload = function() { + if (xhr.status === 200) { + const datar = JSON.parse(xhr.responseText); + if (!datar?.models?.length) { + reject(new Error('No models available')); + return; + } + resolve(findBestModel(datar.models)); + } else { + reject(new Error('Failed to fetch models')); + } + }; + + xhr.ontimeout = () => reject(new Error('Request timed out')); + xhr.onerror = () => reject(new Error('Network error while fetching models')); + xhr.send(); + }); } @@ -207,9 +231,12 @@ async function callOllamaAPI(prompt, model = null, callback = null, abortControl let ollamamodel = model || settings.ollamamodel?.textsetting || tmpModelFallback || null; if (!ollamamodel) { - const availableModel0 = await getFirstAvailableModel(); - if (availableModel0) { - tmpModelFallback = availableModel0; + ollamamodel = await getFirstAvailableModel(); + if (ollamamodel) { + tmpModelFallback = ollamamodel; + setTimeout(() => { + tmpModelFallback = ""; + }, 60000); } else { console.error("No Ollama model found"); return; @@ -220,7 +247,7 @@ async function callOllamaAPI(prompt, model = null, callback = null, abortControl return result.response + "💥"; } else if (result.error && result.error === 404) { try { - const availableModel = await getFirstAvailableModel(); + const availableModel = await getFirstAvailableModel(ollamamodel); if (availableModel) { tmpModelFallback = availableModel; setTimeout(() => { @@ -353,7 +380,8 @@ async function callOllamaAPI(prompt, model = null, callback = null, abortControl const message = { model: currentModel, prompt: prompt, - stream: true + stream: true, + keep_alive: settings.ollamaKeepAlive ? parseInt(settings.ollamaKeepAlive.numbersetting)+"m" : "5m" }; if (images){ @@ -411,7 +439,8 @@ async function callOllamaAPI(prompt, model = null, callback = null, abortControl const message = { model: currentModel, prompt: prompt, - stream: isStreaming + stream: isStreaming, + keep_alive: settings.ollamaKeepAlive ? parseInt(settings.ollamaKeepAlive.numbersetting)+"m" : "5m" }; if (images){ @@ -838,8 +867,7 @@ async function processMessageWithOllama(data) { ollamaRateLimitPerTab = Math.max(0, parseInt(settings.ollamaRateLimitPerTab.numbersetting) || 0); } - if (data.type !== "stageten" && !settings.ollamaoverlayonly && data.tid && - lastResponseTime[data.tid] && (currentTime - lastResponseTime[data.tid] < ollamaRateLimitPerTab)) { + if (data.type !== "stageten" && !settings.ollamaoverlayonly && data.tid && lastResponseTime[data.tid] && (currentTime - lastResponseTime[data.tid] < ollamaRateLimitPerTab)) { isProcessing = false; return; } @@ -851,9 +879,7 @@ async function processMessageWithOllama(data) { } // Early return conditions - if ((data.type === "stageten" && botname === data.chatname) || - !data.chatmessage || - (!settings.noollamabotname && data.chatmessage.startsWith(botname + ":"))) { + if ((data.type === "stageten" && botname === data.chatname) || !data.chatmessage || (!settings.noollamabotname && data.chatmessage.startsWith(botname + ":"))) { isProcessing = false; return; } @@ -884,9 +910,10 @@ async function processMessageWithOllama(data) { } // Prevent self-replies - const score = levenshtein(cleanedText, lastSentMessage); - if (score < 7) { + const score = fastMessageSimilarity(cleanedText, lastSentMessage); + if (score > 0.5) { isProcessing = false; + console.log("RETURN", cleanedText, lastSentMessage); return; } @@ -895,14 +922,15 @@ async function processMessageWithOllama(data) { if (settings.ollamaprompt) { additionalInstructions = settings.ollamaprompt.textsetting; } - + //console.log(additionalInstructions, cleanedText, botname, data); const response = await processUserInput(cleanedText, data, additionalInstructions, botname); + //console.log(response); // Handle response - if (response && !response.toLowerCase().startsWith("not available") && + if (response && !response.toLowerCase().startsWith("not available") && (settings.alwaysRespondLLM || ( !response.includes("NO_RESPONSE") && !response.startsWith("No ") && - !response.startsWith("NO ")) { + !response.startsWith("NO ")))) { // Send to overlay if enabled sendTargetP2P({ @@ -978,11 +1006,19 @@ async function processUserInput(userInput, data, additionalInstructions, botname if (await isRAGConfigured()) { const databaseDescriptor = localStorage.getItem('databaseDescriptor') || ''; - const ragPrompt = `${promptBase}\n\nDatabase info: ${databaseDescriptor}\n\n` + - 'Determine if this message requires searching the database. Format response as:\n' + - '[NEEDS_SEARCH]\nyes/no\n[/NEEDS_SEARCH]\n\n' + - '[SEARCH_QUERY]\nkeywords if search needed\n[/SEARCH_QUERY]\n\n' + - '[RESPONSE]\nDirect response if no search needed. Use NO_RESPONSE if no response warranted.\n[/RESPONSE]'; + if (settings.alwaysRespondLLM){ + var ragPrompt = `${promptBase}\n\nDatabase info: ${databaseDescriptor}\n\n` + + 'Determine if this message requires searching the database. Format response as:\n' + + '[NEEDS_SEARCH]\nyes/no\n[/NEEDS_SEARCH]\n\n' + + '[SEARCH_QUERY]\nkeywords if search needed\n[/SEARCH_QUERY]\n\n' + + '[RESPONSE]\nDirect response if no search needed.\n[/RESPONSE]'; + } else { + var ragPrompt = `${promptBase}\n\nDatabase info: ${databaseDescriptor}\n\n` + + 'Determine if this message requires searching the database. Format response as:\n' + + '[NEEDS_SEARCH]\nyes/no\n[/NEEDS_SEARCH]\n\n' + + '[SEARCH_QUERY]\nkeywords if search needed\n[/SEARCH_QUERY]\n\n' + + '[RESPONSE]\nDirect response if no search needed. Use NO_RESPONSE if no response warranted.\n[/RESPONSE]'; + } const ragDecision = await callOllamaAPI(ragPrompt); const decision = parseDecision(ragDecision); @@ -1007,6 +1043,13 @@ async function processUserInput(userInput, data, additionalInstructions, botname } else { promptBase += '\n\nRespond conversationally to the current group chat message only if the message seems directed at you specifically, doing so directly and succinctly, or instead reply with NO_RESPONSE if no response is neede, followed by why no response was needed.'; } + } else if (settings.alwaysRespondLLM){ + if (!settings.nollmcontext){ + promptBase += '\n\nRespond conversationally to the current message, doing so directly and succinctly.'; + } else { + promptBase += '\n\nRespond conversationally to the current group chat message, doing so directly and succinctly.'; + } + } else { if (!settings.nollmcontext){ promptBase += '\n\nRespond conversationally to the current message, if appropriate, doing so directly and succinctly, or instead reply with NO_RESPONSE to state you are choosing not to respond. Respond only with NO_RESPONSE if you have no reply.'; @@ -1020,6 +1063,9 @@ async function processUserInput(userInput, data, additionalInstructions, botname if (!response || response.toLowerCase().includes('no_response') || response.toLowerCase().startsWith('no ') || response.toLowerCase().startsWith('@@@@')) { console.log(response); + if (settings.alwaysRespondLLM && (response && !response.toLowerCase().startsWith('@@@@'))){ + return response; + } return false; } diff --git a/api.md b/api.md index 3722e308..7f93842d 100644 --- a/api.md +++ b/api.md @@ -64,6 +64,17 @@ There is an easy to use sandbox to play with some of the common API commands and - [Message Types](#message-types) - [API Actions](#api-actions-4) - [Integration with Extension](#integration-with-extension) +- [StreamDeck Integration Guide for Social Stream Ninja](#streamdeck-integration-guide-for-social-stream-ninja) + - [Quick Setup Method](#quick-setup-method) + - [Advanced Setup with Multi Actions](#advanced-setup-with-multi-actions) + - [Tips for StreamDeck Setup](#tips-for-streamdeck-setup) + - [Testing Your Setup](#testing-your-setup) + - [Channel-Specific Messages](#channel-specific-messages) +- [Using Bitfocus Companion with Social Stream Ninja](#using-bitfocus-companion-with-social-stream-ninja) + - [Initial Setup](#initial-setup) + - [Available Actions](#available-actions) + - [Variables](#variables) + - [Comparison with StreamDeck](#comparison-with-streamdeck) @@ -774,3 +785,136 @@ The battle page relies on the extension for receiving data: 1. The extension uses `sendDataP2P()` to send data to the battle page 2. Data can be sent via WebRTC or fallback to WebSocket if available 3. The extension can trigger game actions like starting the game + +I'll create a guide focused on integrating Social Stream Ninja with StreamDeck, specifically for sending custom messages. + + +# StreamDeck Integration Guide for Social Stream Ninja + +## Quick Setup Method + +1. Open StreamDeck software +2. Add a new "Website" action to your StreamDeck +3. Configure the URL using this format: +``` +https://io.socialstream.ninja/YOUR_SESSION_ID/sendEncodedChat/null/YOUR_MESSAGE +``` + +Replace: +- `YOUR_SESSION_ID` with your Social Stream Ninja session ID +- `YOUR_MESSAGE` with your URL-encoded message + +For example, to send "Hello Stream!": +``` +https://io.socialstream.ninja/abc123/sendEncodedChat/null/Hello%20Stream! +``` + +## Advanced Setup with Multi Actions + +For more flexibility, you can use Multi Actions to send different messages: + +1. Create a new "Multi Action" on your StreamDeck +2. Add "Website" actions for each command +3. Use these URL patterns: + +**WebSocket (WSS)** +``` +https://io.socialstream.ninja/YOUR_SESSION_ID/sendChat/null/YOUR_MESSAGE +``` + +**HTTPS POST** +``` +https://io.socialstream.ninja/YOUR_SESSION_ID +``` +With body: +```json +{ + "action": "sendChat", + "value": "YOUR_MESSAGE", + "apiid": "YOUR_SESSION_ID" +} +``` + +## Tips for StreamDeck Setup + +- Use URL encoding for special characters in messages +- You can create multiple buttons for different preset messages +- Chain commands using Multi Actions for complex sequences +- Add a delay between actions if needed using StreamDeck's delay feature + +## Testing Your Setup + +1. Find your session ID from the Social Stream API Sandbox +2. Create a test button with a simple message +3. Press the button to verify the message appears in your social platforms +4. Check the Social Stream API Sandbox's incoming messages panel to confirm delivery + +## Channel-Specific Messages + +To send to specific channels, add the channel parameter: + +``` +https://io.socialstream.ninja/YOUR_SESSION_ID/sendChat/null/YOUR_MESSAGE?channel=2 +``` + +Channels: +- 1: General communication +- 2: Dock +- 3: Featured content +- 4-7: Custom channels + +I'll add a section about Bitfocus Companion integration with what we can confirm from the provided information: + +# Using Bitfocus Companion with Social Stream Ninja + +Bitfocus Companion enables the reasonably priced Elgato Streamdeck to be a professional shotbox surface for a huge amount of different presentation switchers, video playback software and broadcast equipment. It supports Social Stream Ninja and VDO.Ninja! + +https://bitfocus.io/companion +https://bitfocus.io/connections/socialstream-ninja + +## Initial Setup + +1. Enable API Control: + - Open Social Stream Ninja + - Go to `Global settings and tools` > `Mechanics` + - Enable `Enable remote API control of extension` + +2. Get Your Session ID: + - Navigate to `Global settings and tools` > `Session Options` + - Copy your Session ID + - Alternatively, find it in your URL after `?session=` + +3. Configure Companion: + - Install the Social Stream Ninja module in Companion + - Paste your Session ID into the module settings + +## Available Actions + +The following commands are confirmed available in Companion: + +- Clear featured message +- Clear all messages +- Next in queue +- Toggle auto-show +- Feature next un-featured +- Reset Poll +- Close Poll +- Waitlist Controls +- Text to Speech (TTS) Controls +- Send Chat Message + +## Variables + +Companion can access: +- `queue_size`: Shows the current queue size + +## Comparison with StreamDeck + +Advantages of using Companion: +- Native integration with Social Stream Ninja +- No need for URL encoding or complex HTTP requests +- Direct access to all core functionality +- Real-time queue size monitoring through variables +- Can be used alongside StreamDeck for more complex setups + +This makes Companion a simpler alternative to the StreamDeck HTTP method described above, especially for basic Social Stream Ninja control. diff --git a/background.js b/background.js index 88f9fde1..a67afad1 100644 --- a/background.js +++ b/background.js @@ -3487,90 +3487,6 @@ chrome.runtime.onMessage.addListener(async function (request, sender, sendRespon return response; }); -// The below "levenshtein" function is based on the follow work: -// https://github.com/gustf/js-levenshtein -// MIT License - Requires preservation of copyright and license notice -// Copyright (c) 2017 Gustaf Andersson -function levenshtein(a, b) { - if (a === b) { - return 0; - } - if (a.length > b.length) { - var tmp = a; - a = b; - b = tmp; - } - var la = a.length; - var lb = b.length; - while (la > 0 && a.charCodeAt(la - 1) === b.charCodeAt(lb - 1)) { - la--; - lb--; - } - var offset = 0; - while (offset < la && a.charCodeAt(offset) === b.charCodeAt(offset)) { - offset++; - } - la -= offset; - lb -= offset; - if (la === 0 || lb < 3) { - return lb; - } - var x = 0; - var y; - var d0; - var d1; - var d2; - var d3; - var dd; - var dy; - var ay; - var bx0; - var bx1; - var bx2; - var bx3; - var vector = []; - for (y = 0; y < la; y++) { - vector.push(y + 1); - vector.push(a.charCodeAt(offset + y)); - } - var len = vector.length - 1; - for (; x < lb - 3; ) { - bx0 = b.charCodeAt(offset + (d0 = x)); - bx1 = b.charCodeAt(offset + (d1 = x + 1)); - bx2 = b.charCodeAt(offset + (d2 = x + 2)); - bx3 = b.charCodeAt(offset + (d3 = x + 3)); - dd = x += 4; - for (y = 0; y < len; y += 2) { - dy = vector[y]; - ay = vector[y + 1]; - d0 = _min(dy, d0, d1, bx0, ay); - d1 = _min(d0, d1, d2, bx1, ay); - d2 = _min(d1, d2, d3, bx2, ay); - dd = _min(d2, d3, dd, bx3, ay); - vector[y] = dd; - d3 = d2; - d2 = d1; - d1 = d0; - d0 = dy; - } - } - for (; x < lb; ) { - bx0 = b.charCodeAt(offset + (d0 = x)); - dd = ++x; - for (y = 0; y < len; y += 2) { - dy = vector[y]; - vector[y] = dd = _min(dy, d0, dd, bx0, vector[y + 1]); - d0 = dy; - } - } - return dd; -} -function _min(d0, d1, d2, bx, ay) { - return d0 < d1 || d2 < d1 ? (d0 > d2 ? d2 + 1 : d0 + 1) : bx === ay ? d1 : d1 + 1; -} -//// End of levenshtein code -//////////////////////////// - function verifyOriginalNewIncomingMessage(msg, cleaned=false) { if (Date.now() - lastSentTimestamp > 5000) { @@ -3585,9 +3501,9 @@ function verifyOriginalNewIncomingMessage(msg, cleaned=false) { msg = decodeAndCleanHtml(msg); } - var score = levenshtein(msg, lastSentMessage); + var score = fastMessageSimilarity(msg, lastSentMessage); // console.log(msg, score); - if (score < 7) { // same message + if (score > 0.5) { // same message lastMessageCounter += 1; if (lastMessageCounter>1) { @@ -3607,6 +3523,52 @@ function verifyOriginalNewIncomingMessage(msg, cleaned=false) { } +function fastMessageSimilarity(a, b) { + if (a === b) return 1; + if (!a || !b) return 0; + + const normalize = str => str + .toLowerCase() + .replace(/[\u{1F300}-\u{1F9FF}]/gu, '') + .replace(/\s+/g, '') + .trim(); + + const normA = normalize(a); + const normB = normalize(b); + + // Handle exact match after normalization + if (normA === normB) return 1; + + const maxLen = Math.max(normA.length, normB.length); + const minLen = Math.min(normA.length, normB.length); + + // Check if one is prefix of the other + const shorter = normA.length < normB.length ? normA : normB; + const longer = normA.length < normB.length ? normB : normA; + + // For messages > 50 chars, if one is a prefix of the other + // and covers at least 90% of the shorter message, consider it similar + if (maxLen > 50 && longer.startsWith(shorter) && minLen / maxLen > 0.9) { + return 0.95; + } + + // For very short strings + if (maxLen < 10) { + const matched = [...normA].filter(char => normB.includes(char)).length; + return matched / maxLen; + } + + // Compute similarity based on character matches for position-sensitive comparison + let matches = 0; + const compareLen = Math.min(normA.length, normB.length); + + for (let i = 0; i < compareLen; i++) { + if (normA[i] === normB[i]) matches++; + } + + return matches / maxLen; +} + function ajax(object2send, url, ajaxType = "PUT", type = "application/json; charset=utf-8") { try { if (ajaxType == "PUT" && putNode) { @@ -5586,7 +5548,7 @@ async function processIncomingRequest(request, UUID = false) { // from the dock fowardOBSCommand(request); } } else if (request.value && ("target" in request) && UUID && request.action === "chatbot"){ // target is the callback ID - if (isExtensionOn && settings.allowChatBot){ + if (isExtensionOn && settings.allowChatBot){ // private chat bot try { // ollama run technobyte/Llama-3.3-70B-Abliterated:IQ2_XS @@ -7489,7 +7451,6 @@ async function applyBotActions(data, tab = false) { console.log(e); // ai.js file missing? } } - if (settings.ollama){ try{ processMessageWithOllama(data); diff --git a/manifest.json b/manifest.json index 2149ab71..b1662d83 100644 --- a/manifest.json +++ b/manifest.json @@ -2,7 +2,7 @@ "name": "Social Stream Ninja", "description": "Powerful tooling to engage live chat on Youtube, Twitch, Zoom, and more.", "manifest_version": 3, - "version": "3.9.1", + "version": "3.9.3", "homepage_url": "http://socialstream.ninja/", "icons": { "128": "icons/icon-128.png" diff --git a/popup.html b/popup.html index 1d33c79a..4dbf5ee9 100644 --- a/popup.html +++ b/popup.html @@ -190,6 +190,13 @@ border-radius: 1px; } +@keyframes shake { + 0%, 50%, 100% { transform: rotate(0deg); } + 70% { transform: rotate(-20deg); } + 90% { transform: rotate(20deg); } +} + + #searchInput.show { display: block; width: calc(100% - 35px); /* Adjust width as needed */ @@ -201,8 +208,8 @@ right: 0px; z-index: 100; cursor: pointer; + font-size: 150%; animation: shake 2s ease-in-out; - font-size: 150%; } #activeIcon { position: absolute; @@ -211,7 +218,7 @@ cursor: pointer; display: block; height: 0; - font-size: 150%; + font-size: 150%; } .tts-test-button { display: inline-block; @@ -4832,7 +4839,7 @@