diff --git a/core/llm/index.ts b/core/llm/index.ts index 0b48d867c7..11b66ce8d3 100644 --- a/core/llm/index.ts +++ b/core/llm/index.ts @@ -25,6 +25,7 @@ import { logDevData } from "../util/devdata.js"; import { DevDataSqliteDb } from "../util/devdataSqlite.js"; import mergeJson from "../util/merge.js"; import { renderChatMessage } from "../util/messageContent.js"; +import { isOllamaInstalled } from "../util/ollamaHelper.js"; import { Telemetry } from "../util/posthog.js"; import { withExponentialBackoff } from "../util/withExponentialBackoff.js"; @@ -55,7 +56,6 @@ import { toCompleteBody, toFimBody, } from "./openaiTypeConverters.js"; -import { isOllamaInstalled } from "../util/ollamaHelper.js"; export abstract class BaseLLM implements ILLM { static providerName: string; @@ -415,10 +415,9 @@ export abstract class BaseLLM implements ILLM { e.code === "ECONNREFUSED" && e.message.includes("http://127.0.0.1:11434") ) { - const message = (await isOllamaInstalled()) ? - "Unable to connect to local Ollama instance. Ollama may not be running." : - "Unable to connect to local Ollama instance. Ollama may not be installed or may not running." - ; + const message = (await isOllamaInstalled()) + ? "Unable to connect to local Ollama instance. Ollama may not be running." + : "Unable to connect to local Ollama instance. Ollama may not be installed or may not running."; throw new Error(message); } } @@ -693,13 +692,28 @@ export abstract class BaseLLM implements ILLM { return body; } + private _modifyCompletionOptions( + completionOptions: CompletionOptions, + ): CompletionOptions { + // As of 01/14/25 streaming is currently not available with o1 + // See these threads: + // - https://github.com/continuedev/continue/issues/3698 + // - https://community.openai.com/t/streaming-support-for-o1-o1-2024-12-17-resulting-in-400-unsupported-value/1085043 + if (completionOptions.model === "o1") { + completionOptions.stream = false; + } + + return completionOptions; + } + async *streamChat( _messages: ChatMessage[], signal: AbortSignal, options: LLMFullCompletionOptions = {}, ): AsyncGenerator { - const { completionOptions, log, raw } = - this._parseCompletionOptions(options); + let { completionOptions, log } = this._parseCompletionOptions(options); + + completionOptions = this._modifyCompletionOptions(completionOptions); const messages = this._compileChatMessages(completionOptions, _messages); diff --git a/gui/src/hooks/useSetup.ts b/gui/src/hooks/useSetup.ts index ee8ced33f1..a545aedafb 100644 --- a/gui/src/hooks/useSetup.ts +++ b/gui/src/hooks/useSetup.ts @@ -65,7 +65,6 @@ function useSetup() { if (result.status === "error") { return; } - console.log("Config loaded", result.content); await handleConfigUpdate(initial, result.content); }, [ideMessenger, handleConfigUpdate],