diff --git a/docs/docs/learn/002-get-started/001-vercel-ai.mdx b/docs/docs/learn/002-get-started/001-vercel-ai.mdx index 4211aa22..3c6d5829 100644 --- a/docs/docs/learn/002-get-started/001-vercel-ai.mdx +++ b/docs/docs/learn/002-get-started/001-vercel-ai.mdx @@ -147,8 +147,7 @@ Then add the following code: ```tsx import { openai } from '@ai-sdk/openai'; -import { generateText } from 'ai'; -import {NextResponse} from 'next/server'; +import { streamText } from 'ai'; // Allow streaming responses up to 30 seconds export const maxDuration = 30; @@ -156,15 +155,19 @@ export const maxDuration = 30; export async function POST(req: Request) { const { prompt } = await req.json(); - const result = await generateText({ + const result = await streamText({ model: openai('gpt-4-turbo'), messages: [{ role: 'system', content: prompt, - }] + }], + async onFinish({ text, toolCalls, toolResults, usage, finishReason }) { + // implement your own logic here, e.g. for storing messages + // or recording token usage + }, }); - return NextResponse.json({ reply: result.responseMessages[0].content[0].text }); + return result.toTextStreamResponse(); } ``` @@ -182,19 +185,47 @@ and provide a user message input: ```tsx 'use client'; -import {AiChat, ChatAdapter} from '@nlux/react'; +import {AiChat, ChatAdapter, StreamingAdapterObserver} from '@nlux/react'; import '@nlux/themes/nova.css'; export default function Chat() { - const chatAdapter: ChatAdapter = { batchText: async (prompt: string) => { + const chatAdapter: ChatAdapter = { + + streamText: async (prompt: string, observer: StreamingAdapterObserver) => { const response = await fetch('/api/chat', { method: 'POST', body: JSON.stringify({prompt: prompt}), headers: {'Content-Type': 'application/json'}, }); - const {reply} = await response.json(); - return reply; - }}; + if (response.status !== 200) { + observer.error(new Error('Failed to connect to the server')); + return; + } + + if (!response.body) { + return; + } + + // Read a stream of server-sent events + // and feed them to the observer as they are being generated + const reader = response.body.getReader(); + const textDecoder = new TextDecoder(); + + while (true) { + const { value, done } = await reader.read(); + if (done) { + break; + } + + const content = textDecoder.decode(value); + if (content) { + observer.next(content); + } + } + + observer.complete(); + } + } return (
@@ -212,7 +243,7 @@ Let's take a look at what is happening in this code: This tells the Next.js framework that **this file is intended to run on the client-side**. 1. We import `AiChat` from `@nlux/react` and the default theme `@nlux/themes/nova.css`. 2. We define a `chatAdapter` object that implements the interface `ChatAdapter`.
- It contains on method `batchText` that handles chat responses generated in a single batch. + It contains on method `streamText` that handles chat responses streaming. 3. We render the `` component with the `chatAdapter` object. ---