Skip to content

Commit

Permalink
Add Temperature
Browse files Browse the repository at this point in the history
  • Loading branch information
Oceania2018 committed Dec 1, 2023
1 parent 9cc9839 commit 9d44878
Show file tree
Hide file tree
Showing 2 changed files with 25 additions and 18 deletions.
Original file line number Diff line number Diff line change
@@ -1,14 +1,12 @@
using BotSharp.Abstraction.Agents;
using BotSharp.Abstraction.Agents.Enums;
using BotSharp.Abstraction.Conversations;
using BotSharp.Abstraction.Loggers;
using BotSharp.Abstraction.Functions.Models;
using BotSharp.Abstraction.Routing;
using BotSharp.Plugin.GoogleAI.Settings;
using LLMSharp.Google.Palm;
using Microsoft.Extensions.Logging;
using System.Diagnostics.Metrics;
using static System.Net.Mime.MediaTypeNames;
using LLMSharp.Google.Palm.DiscussService;

namespace BotSharp.Plugin.GoogleAI.Providers;

Expand Down Expand Up @@ -39,19 +37,25 @@ public RoleDialogModel GetChatCompletions(Agent agent, List<RoleDialogModel> con

var client = new GooglePalmClient(apiKey: _settings.PaLM.ApiKey);

var (prompt, messages) = PrepareOptions(agent, conversations);
var (prompt, messages, hasFunctions) = PrepareOptions(agent, conversations);

RoleDialogModel msg;

if (messages == null)
if (hasFunctions)
{
// use text completion
var response = client.GenerateTextAsync(prompt, null).Result;
// var response = client.GenerateTextAsync(prompt, null).Result;
var response = client.ChatAsync(new PalmChatCompletionRequest
{
Context = prompt,
Messages = messages,
Temperature = 0.1f
}).Result;

var message = response.Candidates.First();

// check if returns function calling
var llmResponse = message.Output.JsonContent<FunctionCallingResponse>();
var llmResponse = message.Content.JsonContent<FunctionCallingResponse>();

msg = new RoleDialogModel(llmResponse.Role, llmResponse.Content)
{
Expand Down Expand Up @@ -79,13 +83,14 @@ public RoleDialogModel GetChatCompletions(Agent agent, List<RoleDialogModel> con
Task.WaitAll(hooks.Select(hook =>
hook.AfterGenerated(msg, new TokenStatsModel
{
Prompt = prompt,
Model = _model
})).ToArray());

return msg;
}

private (string, List<PalmChatMessage>) PrepareOptions(Agent agent, List<RoleDialogModel> conversations)
private (string, List<PalmChatMessage>, bool) PrepareOptions(Agent agent, List<RoleDialogModel> conversations)
{
var prompt = "";

Expand All @@ -99,6 +104,9 @@ public RoleDialogModel GetChatCompletions(Agent agent, List<RoleDialogModel> con
var routing = _services.GetRequiredService<IRoutingService>();
var router = routing.Router;

var messages = conversations.Select(c => new PalmChatMessage(c.Content, c.Role == AgentRole.User ? "user" : "AI"))
.ToList();

if (agent.Functions != null && agent.Functions.Count > 0)
{
prompt += "\r\n\r\n[Functions] defined in JSON Schema:\r\n";
Expand All @@ -118,13 +126,13 @@ public RoleDialogModel GetChatCompletions(Agent agent, List<RoleDialogModel> con

prompt += "\r\n\r\n" + router.Templates.FirstOrDefault(x => x.Name == "response_with_function").Content;

return (prompt, null);
return (prompt, new List<PalmChatMessage>
{
new PalmChatMessage("Which function should be used for the next step based on latest user or function response, output your response in JSON:", AgentRole.User),
}, true);
}

var messages = conversations.Select(c => new PalmChatMessage(c.Content, c.Role == AgentRole.User ? "user" : "AI"))
.ToList();

return (prompt, messages);
return (prompt, messages, false);
}

public Task<bool> GetChatCompletionsAsync(Agent agent, List<RoleDialogModel> conversations, Func<RoleDialogModel, Task> onMessageReceived, Func<RoleDialogModel, Task> onFunctionExecuting)
Expand Down
Original file line number Diff line number Diff line change
@@ -1,10 +1,9 @@
[Output Requirements]
1. Read the [Functions] definition, you can utilize the function to retrieve data or execute actions.
2. Think step by step, check if specific function will provider data to help complete user request based on the [Conversation].
2. Think step by step, check if specific function will provider data to help complete user request based on the conversation.
3. If you need to call a function to decide how to response user,
response in format: {"role": "function", "reason":"why choose this function", "function_name": "", "args": {}},
otherwise response in format: {"role": "assistant", "reason":"why response to user", "content":"next step question"}.
4. If the [Conversation] already contains the function execution result, don't need to call it again.
4. If the conversation already contains the function execution result, don't need to call it again.
5. If user mentioned some specific requirment, don't ask this question in your response.
6. Don't repeat the same question in your response.

Which function should be used for the next step based on latest user's response, output your response in JSON:
6. Don't repeat the same question in your response.

0 comments on commit 9d44878

Please sign in to comment.