-
Notifications
You must be signed in to change notification settings - Fork 340
/
route.ts
80 lines (66 loc) · 2.39 KB
/
route.ts
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
import { NextRequest, NextResponse } from "next/server";
import { z } from "zod";
import { ChatOpenAI } from "@langchain/openai";
import { PromptTemplate } from "@langchain/core/prompts";
export const runtime = "edge";
const TEMPLATE = `Extract the requested fields from the input.
The field "entity" refers to the first mentioned entity in the input.
Input:
{input}`;
/**
* This handler initializes and calls an OpenAI Functions powered
* structured output chain. See the docs for more information:
*
* https://js.langchain.com/v0.2/docs/how_to/structured_output
*/
export async function POST(req: NextRequest) {
try {
const body = await req.json();
const messages = body.messages ?? [];
const currentMessageContent = messages[messages.length - 1].content;
const prompt = PromptTemplate.fromTemplate(TEMPLATE);
/**
* Function calling is currently only supported with ChatOpenAI models
*/
const model = new ChatOpenAI({
temperature: 0.8,
model: "gpt-4o-mini",
});
/**
* We use Zod (https://zod.dev) to define our schema for convenience,
* but you can pass JSON schema if desired.
*/
const schema = z
.object({
tone: z
.enum(["positive", "negative", "neutral"])
.describe("The overall tone of the input"),
entity: z.string().describe("The entity mentioned in the input"),
word_count: z.number().describe("The number of words in the input"),
chat_response: z.string().describe("A response to the human's input"),
final_punctuation: z
.optional(z.string())
.describe("The final punctuation mark in the input, if any."),
})
.describe("Should always be used to properly format output");
/**
* Bind schema to the OpenAI model.
* Future invocations of the returned model will always match the schema.
*
* Under the hood, uses tool calling by default.
*/
const functionCallingModel = model.withStructuredOutput(schema, {
name: "output_formatter",
});
/**
* Returns a chain with the function calling model.
*/
const chain = prompt.pipe(functionCallingModel);
const result = await chain.invoke({
input: currentMessageContent,
});
return NextResponse.json(result, { status: 200 });
} catch (e: any) {
return NextResponse.json({ error: e.message }, { status: e.status ?? 500 });
}
}