diff --git a/src/modules/chat/chat.rb b/src/modules/chat/chat.rb index 06e0675..a0dd320 100644 --- a/src/modules/chat/chat.rb +++ b/src/modules/chat/chat.rb @@ -7,7 +7,7 @@ #=============<[ Constants ]>================# INST = "A chat between a very important human and an artificial intelligence assistant. The assistant gives quick and truthful answers to the human's questions. The assistant's responses are thorough, but succinct." -CHAT = "\n@User: Hello.\n@Wayland: Greetings.\n@User: What do you call yourself?\n@Wayland: Wayland.\n@User: What is the closest star to our sun?\n@Wayland: The closest star to our sun Sol is Alpha Centauri." +CHAT = "\nUser: Hello.\nWayland: Greetings.\nUser: What do you call yourself?\nWayland: Wayland.\nUser: What is the closest star to our sun?\nWayland: The closest star to our sun Sol is Alpha Centauri." #=============<[ Local Vars ]>================# core_config = RedFairy.new("shikigami") @@ -29,14 +29,14 @@ #=============<[ Methods ]>==================# # Parameters passed to llama.cpp running Llama 3 -def format_question(prompt) +def format_question(prompt, sender) i = rand(99) request = { "stream" => false, # keep false, breaks if true "seed" => i, # Set the random number generator (RNG) seed. "n_predict" => 500, # notes "temperature" => 0.56, # was:0, def:0-1, higher is more creative (0.49 failed to answer question about unicorns) - "stop" => ["\n@User:"], # notes + "stop" => ["\n#{sender}:", "\nUser:"], # notes "repeat_last_n" => 128, # Last n tokens to consider for penalizing repetition. 0 is disabled and -1 is ctx-size. "repeat_penalty" => 1.2, # Control the repetition of token sequences in the generated text. "top_k" => 34, # def:40, Limit the next token selection to the K most probable tokens. @@ -55,21 +55,21 @@ def format_question(prompt) end #def # HTTP request interface to llama.cpp server -def ask_question(q) - question = format_question(q) +def ask_question(q, s) + question = format_question(q, s) response = HTTP.post("http://localhost:4242/completion", :json => question) h = JSON.parse(response.body) return h["content"] end #def # Discord chat logic to receive msg and send response -def get_response(question) +def get_response(question, sender) @sprig.pm2_log("Received msg: #{question}") - answer = ask_question(INST + CHAT + "\n" + question + "\n@Wayland:") + answer = ask_question(INST + CHAT + "\n" + question + "\n@Wayland:", sender) @sprig.pm2_log("Sending msg: #{answer}") - if answer.include? "@Wayland:" - @sprig.pm2_log("@Wayland string detected, removing..") - answer.gsub!("@Wayland:", "").to_s + if answer.include? "Wayland:" + @sprig.pm2_log("Wayland string detected, removing..") + answer.gsub!("Wayland:", "").to_s end #if return answer.to_s end #def @@ -81,7 +81,7 @@ def get_response(question) loop do msg_hash = @sprig.get_msg begin - @answer = get_response(msg_hash["msg"]) + @answer = get_response(msg_hash["msg"], msg_hash["from"]) rescue Exception => e @sprig.pm2_log("Rescued job: #{e}") end #begin diff --git a/src/modules/discord/discord.rb b/src/modules/discord/discord.rb index 57af54e..13b7303 100644 --- a/src/modules/discord/discord.rb +++ b/src/modules/discord/discord.rb @@ -59,7 +59,6 @@ def respond(e) @sprig.pm2_log("Received message #{event.message}") end bot.mention(allow_role_mention: true) do |event| - @sprig.pm2_log("Responding to [#{event.message}]") respond(event) end at_exit { bot.stop }