11import { NextRequest , NextResponse } from "next/server" ;
22import { GoogleGenerativeAI } from "@google/generative-ai" ;
3+ import OpenAI from "openai" ;
4+ import Anthropic from "@anthropic-ai/sdk" ;
35import fs from "fs" ;
46import path from "path" ;
57
6- export async function POST ( req : NextRequest ) {
7- const envApiKey = process . env . GEMINI_API_KEY ;
8- let apiKeyToUse = envApiKey ;
8+ const modelToProvider : { [ key : string ] : string } = {
9+ "gemini-2.5-flash" : "Gemini" ,
10+ "gemini-2.5-pro" : "Gemini" ,
11+ "gemini-2.5-flash-lite" : "Gemini" ,
12+ "gemini-1.5-flash" : "Gemini" ,
13+ "claude-3-7-sonnet-latest" : "Claude" ,
14+ "claude-opus-4-1-20250805" : "Claude" ,
15+ "claude-sonnet-4-20250514" : "Claude" ,
16+ "claude-3-haiku-20240307" : "Claude" ,
17+ "gpt-4o" : "OpenAI" ,
18+ "gpt-4-turbo" : "OpenAI" ,
19+ "gpt-3.5-turbo" : "OpenAI" ,
20+ } ;
921
22+ export async function POST ( req : NextRequest ) {
1023 try {
1124 const {
1225 apiKey,
@@ -18,42 +31,52 @@ export async function POST(req: NextRequest) {
1831 temperature = 0.5 ,
1932 } = await req . json ( ) ;
2033
21- if ( apiKey ) {
22- apiKeyToUse = apiKey ;
34+ if ( ! currentPath ) {
35+ return NextResponse . json (
36+ { error : "Current path is required" } ,
37+ { status : 400 }
38+ ) ;
2339 }
2440
25- if ( ! apiKeyToUse ) {
41+ if ( ! selectedModel ) {
2642 return NextResponse . json (
27- { error : "API key not configured " } ,
43+ { error : "Model selection is required " } ,
2844 { status : 400 }
2945 ) ;
3046 }
3147
32- if ( ! currentPath ) {
48+ const provider = modelToProvider [ selectedModel ] ;
49+
50+ if ( ! provider ) {
3351 return NextResponse . json (
34- { error : "Current path is required " } ,
52+ { error : "Invalid model selected " } ,
3553 { status : 400 }
3654 ) ;
3755 }
3856
39- if ( ! selectedModel ) {
57+ let apiKeyToUse : string | undefined ;
58+ if ( apiKey ) {
59+ apiKeyToUse = apiKey ;
60+ } else {
61+ if ( provider === "Gemini" ) {
62+ apiKeyToUse = process . env . GEMINI_API_KEY ;
63+ } else if ( provider === "OpenAI" ) {
64+ apiKeyToUse = process . env . OPENAI_API_KEY ;
65+ } else if ( provider === "Claude" ) {
66+ apiKeyToUse = process . env . ANTHROPIC_API_KEY ;
67+ }
68+ }
69+
70+ if ( ! apiKeyToUse ) {
4071 return NextResponse . json (
41- { error : "Model selection is required" } ,
72+ { error : `API key for ${ provider } not configured` } ,
4273 { status : 400 }
4374 ) ;
4475 }
4576
4677 const infoMdPath = path . join ( process . cwd ( ) , "info.md" ) ;
4778 const infoMdContent = fs . readFileSync ( infoMdPath , "utf-8" ) ;
4879
49- const genAI = new GoogleGenerativeAI ( apiKeyToUse ) ;
50- const model = genAI . getGenerativeModel ( {
51- model : selectedModel ,
52- generationConfig : {
53- temperature : temperature ,
54- } ,
55- } ) ;
56-
5780 const systemInstructionsPath = path . join (
5881 process . cwd ( ) ,
5982 "src" ,
@@ -78,7 +101,7 @@ export async function POST(req: NextRequest) {
78101**Available Image Assets:**
79102You can use the following images in your design. Assume they are served from the '/image-assets' path. For example, to use 'profile.jpg', the path would STRICTLY be '/image-assets/profile.jpg'.
80103---
81- ${ imageFiles . map ( ( file ) => `- ${ file } ` ) . join ( "\n" ) } ---
104+ ${ imageFiles . map ( ( file ) => `- ${ file } ` ) . join ( "\n" ) } ---
82105` ;
83106 }
84107 }
@@ -90,25 +113,74 @@ ${additionalInstructions}
90113---`
91114 : "" ;
92115
93- const prompt = promptTemplate
94- . replace ( "{{infoMdContent}}" , infoMdContent )
95- . replace ( "{{additionalInstructions}}" , additionalInstructionsText )
96- . replace ( "{{imageAssets}}" , imageAssetsText )
97- . replace ( / { { currentPath} } / g, currentPath )
98- . replace ( "{{language}}" , language ) ;
99-
100- const result = await model . generateContent ( prompt ) ;
101- const response = await result . response ;
102- const text = await response . text ( ) ;
116+ let generatedContent : string = "" ;
103117
104118 console . log (
105119 "Using model:" ,
106120 selectedModel ,
121+ "from provider:" ,
122+ provider ,
107123 "with temperature:" ,
108124 temperature
109125 ) ;
110126
111- return NextResponse . json ( { generatedContent : text } ) ;
127+ if ( provider === "Gemini" ) {
128+ const genAI = new GoogleGenerativeAI ( apiKeyToUse ) ;
129+ const model = genAI . getGenerativeModel ( {
130+ model : selectedModel ,
131+ generationConfig : {
132+ temperature : temperature ,
133+ } ,
134+ } ) ;
135+ const prompt = promptTemplate
136+ . replace ( "{{infoMdContent}}" , infoMdContent )
137+ . replace ( "{{additionalInstructions}}" , additionalInstructionsText )
138+ . replace ( "{{imageAssets}}" , imageAssetsText )
139+ . replace ( / { { currentPath} } / g, currentPath )
140+ . replace ( "{{language}}" , language ) ;
141+ const result = await model . generateContent ( prompt ) ;
142+ const response = await result . response ;
143+ generatedContent = await response . text ( ) ;
144+ } else if ( provider === "OpenAI" ) {
145+ const openai = new OpenAI ( { apiKey : apiKeyToUse } ) ;
146+ const systemPrompt = promptTemplate
147+ . replace ( "{{infoMdContent}}" , "" )
148+ . replace ( "{{additionalInstructions}}" , "" )
149+ . replace ( "{{imageAssets}}" , "" )
150+ . replace ( / { { currentPath} } / g, currentPath )
151+ . replace ( "{{language}}" , language ) ;
152+ const userPrompt = `Here is the raw information about the candidate:\n---\n${ infoMdContent } \n---\n${ additionalInstructionsText } \n${ imageAssetsText } ` ;
153+ const completion = await openai . chat . completions . create ( {
154+ model : selectedModel ,
155+ temperature : temperature ,
156+ messages : [
157+ { role : "system" , content : systemPrompt } ,
158+ { role : "user" , content : userPrompt } ,
159+ ] ,
160+ } ) ;
161+ generatedContent = completion . choices [ 0 ] . message . content ?? "" ;
162+ } else if ( provider === "Claude" ) {
163+ const anthropic = new Anthropic ( { apiKey : apiKeyToUse } ) ;
164+ const systemPrompt = promptTemplate
165+ . replace ( "{{infoMdContent}}" , "" )
166+ . replace ( "{{additionalInstructions}}" , "" )
167+ . replace ( "{{imageAssets}}" , "" )
168+ . replace ( / { { currentPath} } / g, currentPath )
169+ . replace ( "{{language}}" , language ) ;
170+ const userPrompt = `Here is the raw information about the candidate:\n---\n${ infoMdContent } \n---\n${ additionalInstructionsText } \n${ imageAssetsText } ` ;
171+ const message = await anthropic . messages . create ( {
172+ model : selectedModel ,
173+ max_tokens : 4096 ,
174+ temperature : temperature ,
175+ system : systemPrompt ,
176+ messages : [ { role : "user" , content : userPrompt } ] ,
177+ } ) ;
178+ if ( message . content [ 0 ] . type === "text" ) {
179+ generatedContent = message . content [ 0 ] . text ;
180+ }
181+ }
182+
183+ return NextResponse . json ( { generatedContent } ) ;
112184 } catch ( error ) {
113185 console . error ( "Error generating content:" , error ) ;
114186 return NextResponse . json (
0 commit comments