diff --git a/src/components/Chat.tsx b/src/components/Chat.tsx index 5728d6a..5671f2d 100644 --- a/src/components/Chat.tsx +++ b/src/components/Chat.tsx @@ -7,6 +7,7 @@ import { generateMessageId } from '../mcp/client' import type { Message } from 'ai' import { type Servers } from '../lib/schemas' import { ToolCallMessage } from './ToolCallMessage' +import { ReasoningMessage } from './ReasoningMessage' import { useModel } from '../contexts/ModelContext' import { useUser } from '../contexts/UserContext' @@ -23,6 +24,16 @@ type StreamEvent = arguments?: unknown } | { type: 'user'; id: string; content: string } + | { + type: 'reasoning' + effort: string + summary: string | null + model?: string + serviceTier?: string + temperature?: number + topP?: number + done?: boolean + } export function Chat() { const messagesEndRef = useRef(null) @@ -75,6 +86,85 @@ export function Chat() { try { const toolState = JSON.parse(line.slice(2)) + // Handle reasoning summary streaming + if (toolState.type === 'reasoning_summary_delta') { + setStreamBuffer((prev) => { + // Find the last reasoning message + const last = prev[prev.length - 1] + if (last && last.type === 'reasoning' && !last.done) { + // Append delta to summary + return [ + ...prev.slice(0, -1), + { + ...last, + summary: (last.summary || '') + toolState.delta, + effort: toolState.effort || last.effort, + model: toolState.model || last.model, + serviceTier: toolState.serviceTier || last.serviceTier, + temperature: toolState.temperature ?? last.temperature, + topP: toolState.topP ?? last.topP, + }, + ] + } else { + // Start a new reasoning message + return [ + ...prev, + { + type: 'reasoning', + summary: toolState.delta, + effort: toolState.effort || '', + model: toolState.model, + serviceTier: toolState.serviceTier, + temperature: toolState.temperature, + topP: toolState.topP, + done: false, + }, + ] + } + }) + return + } + + if (toolState.type === 'reasoning_summary_done') { + setStreamBuffer((prev) => { + // Mark the last reasoning message as done + const last = prev[prev.length - 1] + if (last && last.type === 'reasoning' && !last.done) { + return [ + ...prev.slice(0, -1), + { + ...last, + done: true, + effort: toolState.effort || last.effort, + model: toolState.model || last.model, + serviceTier: toolState.serviceTier || last.serviceTier, + temperature: toolState.temperature ?? last.temperature, + topP: toolState.topP ?? last.topP, + }, + ] + } + return prev + }) + return + } + + if (toolState.type === 'reasoning') { + setStreamBuffer((prev) => [ + ...prev, + { + type: 'reasoning', + effort: toolState.effort, + summary: toolState.summary, + model: toolState.model, + serviceTier: toolState.serviceTier, + temperature: toolState.temperature, + topP: toolState.topP, + }, + ]) + return + } + + // Tool call fallback (for other tool types) if ('delta' in toolState) { try { toolState.delta = @@ -214,6 +304,19 @@ export function Chat() { args={event} /> ) + } else if ('type' in event && event.type === 'reasoning') { + return ( + + ) } else if ('type' in event && event.type === 'assistant') { const assistantEvent = event as Extract< StreamEvent, diff --git a/src/components/ChatMessage.tsx b/src/components/ChatMessage.tsx index db7cfa4..dd221bb 100644 --- a/src/components/ChatMessage.tsx +++ b/src/components/ChatMessage.tsx @@ -2,7 +2,7 @@ import { cn } from '../lib/utils' import type { Message } from '../mcp/client' import { formatTimestamp } from '../lib/utils' import { Bot, User, CheckCircle2, Clock, AlertCircle } from 'lucide-react' -import ReactMarkdown from 'react-markdown' +import { MarkdownContent } from './MarkdownContent' type ChatMessageProps = { message: Message @@ -49,35 +49,7 @@ export function ChatMessage({ message, isLoading }: ChatMessageProps) { )} >
- ( -
-                ),
-                code: ({ node, ...props }) => (
-                  
-                ),
-                a: ({ href, children, ...props }) => (
-                  
-                    {children}
-                  
-                ),
-              }}
-            >
-              {message.content}
-            
+            
           
diff --git a/src/components/MarkdownContent.tsx b/src/components/MarkdownContent.tsx new file mode 100644 index 0000000..53465e9 --- /dev/null +++ b/src/components/MarkdownContent.tsx @@ -0,0 +1,41 @@ +import ReactMarkdown from 'react-markdown' + +type MarkdownContentProps = { + content: string +} + +export function MarkdownContent({ content }: MarkdownContentProps) { + return ( +
+ ( +
+          ),
+          code: ({ node, ...props }) => (
+            
+          ),
+          a: ({ href, children, ...props }) => (
+            
+              {children}
+            
+          ),
+        }}
+      >
+        {content}
+      
+    
+ ) +} diff --git a/src/components/ReasoningMessage.tsx b/src/components/ReasoningMessage.tsx new file mode 100644 index 0000000..9c1c989 --- /dev/null +++ b/src/components/ReasoningMessage.tsx @@ -0,0 +1,58 @@ +import { Brain } from 'lucide-react' +import { cn } from '../lib/utils' +import { MarkdownContent } from './MarkdownContent' + +type ReasoningMessageProps = { + effort: string + summary: string | null + model?: string + serviceTier?: string + temperature?: number + topP?: number + isLoading?: boolean +} + +export function ReasoningMessage({ + effort, + summary, + model, + serviceTier, + temperature, + topP, + isLoading, +}: ReasoningMessageProps) { + return ( +
+
+ +
+ +
+
+
Reasoning
+
+ {effort &&
Effort: {effort}
} + {summary && ( +
+ +
+ )} + {model &&
Model: {model}
} + {serviceTier &&
Service Tier: {serviceTier}
} +
+ {temperature !== undefined && ( +
Temperature: {temperature}
+ )} + {topP !== undefined &&
Top P: {topP}
} +
+
+
+
+
+ ) +} diff --git a/src/lib/streaming.ts b/src/lib/streaming.ts index 3e76e15..ffa7491 100644 --- a/src/lib/streaming.ts +++ b/src/lib/streaming.ts @@ -75,6 +75,48 @@ export function streamText( } } break + + case 'response.content_part.added': + case 'response.content_part.done': + if (chunk.part?.type === 'output_text' && chunk.part.text) { + buffer += chunk.part.text + flush() + } + break + + case 'response.reasoning.delta': + if (typeof chunk.delta === 'string') { + controller.enqueue( + encoder.encode( + `t:${JSON.stringify({ + type: 'reasoning', + effort: chunk.effort, + summary: chunk.delta, + model: chunk.model, + serviceTier: chunk.service_tier, + temperature: chunk.temperature, + topP: chunk.top_p, + })}\n`, + ), + ) + } + break + + case 'response.created': + case 'response.in_progress': + if (chunk.response?.reasoning) { + controller.enqueue( + encoder.encode( + `t:${JSON.stringify({ + type: 'reasoning', + effort: chunk.response.reasoning.effort, + summary: chunk.response.reasoning.summary, + })}\n`, + ), + ) + } + break + case 'response.mcp_call.failed': console.error('[TOOL CALL FAILED]', chunk) @@ -163,6 +205,39 @@ export function streamText( } break + case 'response.reasoning_summary_text.delta': + if (typeof chunk.delta === 'string') { + controller.enqueue( + encoder.encode( + `t:${JSON.stringify({ + type: 'reasoning_summary_delta', + delta: chunk.delta, + effort: chunk.effort, + model: chunk.model, + serviceTier: chunk.service_tier, + temperature: chunk.temperature, + topP: chunk.top_p, + })}\n`, + ), + ) + } + break + + case 'response.reasoning_summary_text.done': + controller.enqueue( + encoder.encode( + `t:${JSON.stringify({ + type: 'reasoning_summary_done', + effort: chunk.effort, + model: chunk.model, + serviceTier: chunk.service_tier, + temperature: chunk.temperature, + topP: chunk.top_p, + })}\n`, + ), + ) + break + default: break } diff --git a/src/routes/api/chat.ts b/src/routes/api/chat.ts index a2d935f..a2a5082 100644 --- a/src/routes/api/chat.ts +++ b/src/routes/api/chat.ts @@ -88,6 +88,13 @@ export const ServerRoute = createServerFileRoute('/api/chat').methods({ input, stream: true, user: userId, + ...(model.startsWith('o3') || model.startsWith('o4') + ? { + reasoning: { + summary: 'detailed', + }, + } + : {}), }) return streamText(answer)