Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Fixes #149: Add Temperature and Max Tokens Configuration #176

Open
wants to merge 6 commits into
base: staging
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from 5 commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 3 additions & 0 deletions package.json
Original file line number Diff line number Diff line change
Expand Up @@ -45,6 +45,8 @@
"@radix-ui/react-hover-card": "^1.1.2",
"@radix-ui/react-icons": "^1.3.0",
"@radix-ui/react-label": "^2.1.0",
"@radix-ui/react-navigation-menu": "^1.2.1",
"@radix-ui/react-popover": "^1.1.2",
"@radix-ui/react-progress": "^1.1.0",
"@radix-ui/react-select": "^2.1.1",
"@radix-ui/react-slider": "^1.2.1",
Expand All @@ -60,6 +62,7 @@
"@vercel/kv": "^2.0.0",
"class-variance-authority": "^0.7.0",
"clsx": "^2.1.1",
"cmdk": "1.0.0",
"date-fns": "^4.1.0",
"dotenv": "^16.4.5",
"eslint-plugin-unused-imports": "^4.1.4",
Expand Down
6 changes: 4 additions & 2 deletions src/agent/open-canvas/nodes/customAction.ts
Original file line number Diff line number Diff line change
Expand Up @@ -39,11 +39,13 @@ export const customAction = async (
throw new Error("No custom quick action ID found.");
}

const { modelName, modelProvider } =
const { modelName, modelProvider, modelConfig } =
getModelNameAndProviderFromConfig(config);
const smallModel = await initChatModel(modelName, {
temperature: 0.5,
modelProvider,
// temperature: 0.5,
temperature: modelConfig.temperatureRange.current,
maxTokens: modelConfig.maxTokens.current,
});

const store = ensureStoreInConfig(config);
Expand Down
2 changes: 1 addition & 1 deletion src/agent/open-canvas/nodes/generate-artifact/index.ts
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@ export const generateArtifact = async (
config: LangGraphRunnableConfig
): Promise<OpenCanvasGraphReturnType> => {
const { modelName } = getModelNameAndProviderFromConfig(config);
const smallModel = await getModelFromConfig(config, 0.5);
const smallModel = await getModelFromConfig(config);

const modelWithArtifactTool = smallModel.bindTools(
[
Expand Down
11 changes: 2 additions & 9 deletions src/agent/open-canvas/nodes/generatePath.ts
Original file line number Diff line number Diff line change
Expand Up @@ -9,10 +9,9 @@ import { OpenCanvasGraphAnnotation } from "../state";
import { z } from "zod";
import {
formatArtifactContentWithTemplate,
getModelNameAndProviderFromConfig,
getModelFromConfig,
} from "../../utils";
import { getArtifactContent } from "../../../contexts/utils";
import { initChatModel } from "langchain/chat_models/universal";
import { LangGraphRunnableConfig } from "@langchain/langgraph";

/**
Expand All @@ -22,7 +21,6 @@ export const generatePath = async (
state: typeof OpenCanvasGraphAnnotation.State,
config: LangGraphRunnableConfig
) => {
console.log("config.configurable!!", config.configurable);
if (state.highlightedCode) {
return {
next: "updateArtifact",
Expand Down Expand Up @@ -94,12 +92,7 @@ export const generatePath = async (
? "rewriteArtifact"
: "generateArtifact";

const { modelName, modelProvider } =
getModelNameAndProviderFromConfig(config);
const model = await initChatModel(modelName, {
temperature: 0,
modelProvider,
});
const model = await getModelFromConfig(config);
const modelWithTool = model.withStructuredOutput(
z.object({
route: z
Expand Down
6 changes: 4 additions & 2 deletions src/agent/open-canvas/nodes/replyToGeneralInput.ts
Original file line number Diff line number Diff line change
Expand Up @@ -18,11 +18,13 @@ export const replyToGeneralInput = async (
state: typeof OpenCanvasGraphAnnotation.State,
config: LangGraphRunnableConfig
): Promise<OpenCanvasGraphReturnType> => {
const { modelName, modelProvider } =
const { modelName, modelProvider, modelConfig } =
getModelNameAndProviderFromConfig(config);
const smallModel = await initChatModel(modelName, {
temperature: 0.5,
modelProvider,
// temperature: 0.5,
temperature: modelConfig.temperatureRange.current,
maxTokens: modelConfig.maxTokens.current,
});

const prompt = `You are an AI assistant tasked with responding to the users question.
Expand Down
6 changes: 4 additions & 2 deletions src/agent/open-canvas/nodes/rewriteArtifactTheme.ts
Original file line number Diff line number Diff line change
Expand Up @@ -21,11 +21,13 @@ export const rewriteArtifactTheme = async (
state: typeof OpenCanvasGraphAnnotation.State,
config: LangGraphRunnableConfig
): Promise<OpenCanvasGraphReturnType> => {
const { modelName, modelProvider } =
const { modelName, modelProvider, modelConfig } =
getModelNameAndProviderFromConfig(config);
const smallModel = await initChatModel(modelName, {
temperature: 0.5,
modelProvider,
// temperature: 0.5,
temperature: modelConfig.temperatureRange.current,
maxTokens: modelConfig.maxTokens.current,
});

const store = ensureStoreInConfig(config);
Expand Down
6 changes: 4 additions & 2 deletions src/agent/open-canvas/nodes/rewriteCodeArtifactTheme.ts
Original file line number Diff line number Diff line change
Expand Up @@ -16,11 +16,13 @@ export const rewriteCodeArtifactTheme = async (
state: typeof OpenCanvasGraphAnnotation.State,
config: LangGraphRunnableConfig
): Promise<OpenCanvasGraphReturnType> => {
const { modelName, modelProvider } =
const { modelName, modelProvider, modelConfig } =
getModelNameAndProviderFromConfig(config);
const smallModel = await initChatModel(modelName, {
temperature: 0.5,
modelProvider,
// temperature: 0.5,
temperature: modelConfig.temperatureRange.current,
maxTokens: modelConfig.maxTokens.current,
});

const currentArtifactContent = state.artifact
Expand Down
17 changes: 12 additions & 5 deletions src/agent/open-canvas/nodes/updateArtifact.ts
Original file line number Diff line number Diff line change
@@ -1,11 +1,15 @@
import { ChatOpenAI } from "@langchain/openai";
import { OpenCanvasGraphAnnotation, OpenCanvasGraphReturnType } from "../state";
import { UPDATE_HIGHLIGHTED_ARTIFACT_PROMPT } from "../prompts";
import { ensureStoreInConfig, formatReflections } from "../../utils";
import { ArtifactCodeV3, ArtifactV3, Reflections } from "../../../types";
import { LangGraphRunnableConfig } from "@langchain/langgraph";
import { ChatOpenAI } from "@langchain/openai";
import { getArtifactContent } from "../../../contexts/utils";
import { isArtifactCodeContent } from "../../../lib/artifact_content_types";
import { ArtifactCodeV3, ArtifactV3, Reflections } from "../../../types";
import {
ensureStoreInConfig,
formatReflections,
getModelNameAndProviderFromConfig,
} from "../../utils";
import { UPDATE_HIGHLIGHTED_ARTIFACT_PROMPT } from "../prompts";
import { OpenCanvasGraphAnnotation, OpenCanvasGraphReturnType } from "../state";

/**
* Update an existing artifact based on the user's query.
Expand All @@ -14,9 +18,12 @@ export const updateArtifact = async (
state: typeof OpenCanvasGraphAnnotation.State,
config: LangGraphRunnableConfig
): Promise<OpenCanvasGraphReturnType> => {
const { modelConfig } = getModelNameAndProviderFromConfig(config);
const smallModel = new ChatOpenAI({
model: "gpt-4o",
temperature: 0,
// temperature: modelConfig.temperatureRange.current,
maxTokens: modelConfig.maxTokens.current,
});

const store = ensureStoreInConfig(config);
Expand Down
12 changes: 9 additions & 3 deletions src/agent/open-canvas/nodes/updateHighlightedText.ts
Original file line number Diff line number Diff line change
@@ -1,8 +1,10 @@
import { getModelNameAndProviderFromConfig } from "@/agent/utils";
import { LangGraphRunnableConfig } from "@langchain/langgraph";
import { ChatOpenAI } from "@langchain/openai";
import { OpenCanvasGraphAnnotation, OpenCanvasGraphReturnType } from "../state";
import { ArtifactMarkdownV3 } from "../../../types";
import { getArtifactContent } from "../../../contexts/utils";
import { isArtifactMarkdownContent } from "../../../lib/artifact_content_types";
import { ArtifactMarkdownV3 } from "../../../types";
import { OpenCanvasGraphAnnotation, OpenCanvasGraphReturnType } from "../state";

const PROMPT = `You are an expert AI writing assistant, tasked with rewriting some text a user has selected. The selected text is nested inside a larger 'block'. You should always respond with ONLY the updated text block in accordance with the user's request.
You should always respond with the full markdown text block, as it will simply replace the existing block in the artifact.
Expand All @@ -27,11 +29,15 @@ Ensure you reply with the FULL text block, including the updated selected text.
* Update an existing artifact based on the user's query.
*/
export const updateHighlightedText = async (
state: typeof OpenCanvasGraphAnnotation.State
state: typeof OpenCanvasGraphAnnotation.State,
config: LangGraphRunnableConfig
): Promise<OpenCanvasGraphReturnType> => {
const { modelConfig } = getModelNameAndProviderFromConfig(config);
const model = new ChatOpenAI({
model: "gpt-4o",
temperature: 0,
// temperature: modelConfig.temperatureRange.current,
maxTokens: modelConfig.maxTokens.current,
}).withConfig({ runName: "update_highlighted_markdown" });

const currentArtifactContent = state.artifact
Expand Down
27 changes: 20 additions & 7 deletions src/agent/utils.ts
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
import { isArtifactCodeContent } from "@/lib/artifact_content_types";
import { CustomModelConfig } from "@/types";
import { BaseStore, LangGraphRunnableConfig } from "@langchain/langgraph";
import { ArtifactCodeV3, ArtifactMarkdownV3, Reflections } from "../types";
import { initChatModel } from "langchain/chat_models/universal";
Expand Down Expand Up @@ -133,33 +134,47 @@ export const formatArtifactContentWithTemplate = (

export const getModelNameAndProviderFromConfig = (
config: LangGraphRunnableConfig
): { modelName: string; modelProvider: string } => {
): {
modelName: string;
modelProvider: string;
modelConfig: CustomModelConfig;
} => {
const customModelName = config.configurable?.customModelName as string;
if (!customModelName) {
throw new Error("Model name is missing in config.");
}

const modelConfig = config.configurable?.modelConfig as CustomModelConfig;
if (!modelConfig) {
throw new Error("Custom Model config is missing in config.");
}

if (customModelName.includes("gpt-")) {
return {
modelName: customModelName,
modelProvider: "openai",
modelConfig,
};
}
if (customModelName.includes("claude-")) {
return {
modelName: customModelName,
modelProvider: "anthropic",
modelConfig,
};
}
if (customModelName.includes("fireworks/")) {
return {
modelName: customModelName,
modelProvider: "fireworks",
modelConfig,
};
}
if (customModelName.includes("gemini-")) {
return {
modelName: customModelName,
modelProvider: "google-genai",
modelConfig,
};
}

Expand All @@ -172,14 +187,12 @@ export function optionallyGetSystemPromptFromConfig(
return config.configurable?.systemPrompt as string | undefined;
}

export async function getModelFromConfig(
config: LangGraphRunnableConfig,
temperature = 0
) {
const { modelName, modelProvider } =
export async function getModelFromConfig(config: LangGraphRunnableConfig) {
const { modelName, modelProvider, modelConfig } =
getModelNameAndProviderFromConfig(config);
return await initChatModel(modelName, {
temperature,
modelProvider,
temperature: modelConfig.temperatureRange.current,
maxTokens: modelConfig.maxTokens.current,
});
}
35 changes: 26 additions & 9 deletions src/components/canvas/canvas.tsx
Original file line number Diff line number Diff line change
@@ -1,25 +1,30 @@
"use client";

import { ArtifactRenderer } from "@/components/artifacts/ArtifactRenderer";
import { ContentComposerChatInterface } from "./content-composer";
import { ALL_MODEL_NAMES } from "@/constants";
import {
ALL_MODEL_NAMES,
DEFAULT_MODEL_CONFIG,
DEFAULT_MODEL_NAME,
} from "@/constants";
import { useGraphContext } from "@/contexts/GraphContext";
import { useToast } from "@/hooks/use-toast";
import { getLanguageTemplate } from "@/lib/get_language_template";
import { cn } from "@/lib/utils";
import {
ArtifactCodeV3,
ArtifactMarkdownV3,
ArtifactV3,
CustomModelConfig,
ProgrammingLanguageOptions,
} from "@/types";
import { useEffect, useState } from "react";
import { useGraphContext } from "@/contexts/GraphContext";
import React from "react";
import React, { useEffect, useState } from "react";
import { ContentComposerChatInterface } from "./content-composer";

export function CanvasComponent() {
const { threadData, graphData, userData } = useGraphContext();
const { user } = userData;
const { threadId, clearThreadsWithNoValues, setModelName } = threadData;
const { threadId, clearThreadsWithNoValues, setModelName, setModelConfig } =
threadData;
const { setArtifact } = graphData;
const { toast } = useToast();
const [chatStarted, setChatStarted] = useState(false);
Expand Down Expand Up @@ -88,9 +93,21 @@ export function CanvasComponent() {
// Chat should only be "started" if there are messages present
if ((thread.values as Record<string, any>)?.messages?.length) {
setChatStarted(true);
setModelName(
thread?.metadata?.customModelName as ALL_MODEL_NAMES
);
if (thread?.metadata?.customModelName) {
setModelName(
thread.metadata.customModelName as ALL_MODEL_NAMES
);
} else {
setModelName(DEFAULT_MODEL_NAME);
}

if (thread?.metadata?.modelConfig) {
setModelConfig(
thread.metadata.modelConfig as CustomModelConfig
);
} else {
setModelConfig(DEFAULT_MODEL_CONFIG);
}
} else {
setChatStarted(false);
}
Expand Down
13 changes: 13 additions & 0 deletions src/components/chat-interface/model-selector/constants.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,13 @@
import {
ANTHROPIC_MODELS,
OPENAI_MODELS,
FIREWORKS_MODELS,
GEMINI_MODELS,
} from "@/constants";

export const ALL_MODELS = [
...ANTHROPIC_MODELS,
...OPENAI_MODELS,
...FIREWORKS_MODELS,
...GEMINI_MODELS,
];
Loading