diff --git a/src/app/api/pr/qnaGen/route.ts b/src/app/api/pr/qnaGen/route.ts new file mode 100644 index 00000000..06528389 --- /dev/null +++ b/src/app/api/pr/qnaGen/route.ts @@ -0,0 +1,65 @@ +// src/app/api/playground/chat/route.ts +'use server'; +import { NextRequest, NextResponse } from 'next/server'; +import fetch from 'node-fetch'; +import https from 'https'; + +export async function POST(req: NextRequest) { + try { + console.log('Received POST request'); + + const { question, systemRole } = await req.json(); + console.log('Parsed request JSON:', { question, systemRole }); + + const apiURL = 'https://granite-7b-lab-vllm-openai.apps.fmaas-backend.fmaas.res.ibm.com'; + const modelName = 'instructlab/granite-7b-lab'; + + const messages = [ + { role: 'system', content: systemRole }, + { role: 'user', content: question } + ]; + + const requestData = { + model: modelName, + messages, + stream: false // Disable streaming + }; + + console.log('Request data prepared for API call:', requestData); + + const agent = new https.Agent({ + rejectUnauthorized: false + }); + + const chatResponse = await fetch(`${apiURL}/v1/chat/completions`, { + method: 'POST', + headers: { + 'Content-Type': 'application/json', + accept: 'application/json' + }, + body: JSON.stringify(requestData), + agent: apiURL.startsWith('https') ? agent : undefined + }); + + console.log('API call made to:', `${apiURL}/v1/chat/completions`); + + if (!chatResponse.ok) { + console.error('Failed to fetch chat response. Status:', chatResponse.status); + const errorText = await chatResponse.text(); + console.error('Response text:', errorText); + return new NextResponse('Failed to fetch chat response', { status: chatResponse.status }); + } + + const result = await chatResponse.json(); // Wait for the complete response + console.log('Received response from API:', result); + + return new NextResponse(JSON.stringify(result), { + headers: { + 'Content-Type': 'application/json' + } + }); + } catch (error) { + console.error('Error processing request:', error); + return new NextResponse('Error processing request', { status: 500 }); + } +} diff --git a/src/components/Contribute/Knowledge/index.tsx b/src/components/Contribute/Knowledge/index.tsx index 33b9c5aa..0fbe5699 100644 --- a/src/components/Contribute/Knowledge/index.tsx +++ b/src/components/Contribute/Knowledge/index.tsx @@ -1,8 +1,8 @@ -// src/components/Contribute/Knowledge/index.tsx 'use client'; import React, { useEffect, useMemo, useState } from 'react'; import './knowledge.css'; import { Alert, AlertActionCloseButton } from '@patternfly/react-core/dist/dynamic/components/Alert'; +import { Modal } from '@patternfly/react-core/dist/esm/components/Modal/Modal'; import { ActionGroup } from '@patternfly/react-core/dist/dynamic/components/Form'; import { Form } from '@patternfly/react-core/dist/dynamic/components/Form'; import { getGitHubUsername } from '../../../utils/github'; @@ -121,7 +121,8 @@ export const KnowledgeForm: React.FunctionComponent = ({ kno const [disableAction, setDisableAction] = useState(true); const [reset, setReset] = useState(false); - + const [modalOpen, setModalOpen] = useState(false); + const [modalContent, setModalContent] = useState(''); const router = useRouter(); const emptySeedExample: SeedExample = { @@ -331,6 +332,53 @@ export const KnowledgeForm: React.FunctionComponent = ({ kno ); }; + const handleGenerateQAPairs = async (seedExampleIndex: number) => { + const context = seedExamples[seedExampleIndex].context; + const userContent = `Generate 3 question and answer pairs from the provided context. The output should be in the form of "Question 1" and "Answer 1" and next "Question 2" and "Answer 2" and then "Question 3" and "Answer 3". Only reply with the question and answers, no other content or commentary. Here is the context: ${context}`; + + try { + const response = await fetch('/api/pr/qnaGen', { + method: 'POST', + headers: { + 'Content-Type': 'application/json' + }, + body: JSON.stringify({ + question: userContent, + systemRole: 'user' + }) + }); + + if (!response.ok) { + throw new Error('Failed to generate Q&A pairs'); + } + + const result = await response.json(); + const generatedContent = result.choices[0].message.content; + + // Parse the QNAs from the LLM response + const qaPairs = generatedContent.match(/(Question \d+:.*?Answer \d+:.*?)(?=Question \d+:|$)/gs); + + if (qaPairs) { + // Format the QNA pairs + const formattedContent = qaPairs.map((pair, index) => ( +
+

{pair.split('Answer ')[0].trim()}

+

{pair.split('Answer ')[1].trim()}

+
+ )); + setModalContent(formattedContent); + } else { + setModalContent('Failed to parse the response from the model.'); + } + + setModalOpen(true); + } catch (error) { + console.error('Error generating Q&A pairs:', error); + setModalContent('Error generating Q&A pairs.'); + setModalOpen(true); + } + }; + const onCloseActionGroupAlert = () => { setActionGroupAlertContent(undefined); }; @@ -352,8 +400,6 @@ export const KnowledgeForm: React.FunctionComponent = ({ kno setFilePath(''); setSeedExamples([emptySeedExample, emptySeedExample, emptySeedExample, emptySeedExample, emptySeedExample]); setDisableAction(true); - - // setReset is just reset button, value has no impact. setReset(reset ? false : true); }; @@ -439,6 +485,15 @@ export const KnowledgeForm: React.FunctionComponent = ({ kno handleAnswerBlur={handleAnswerBlur} /> + {/* Generate Q&A Button for each Seed Example, TODO: figure out how to nest the buttons under context */} + {seedExamples.map((_, index) => ( +
+ +
+ ))} + = ({ kno + + {/* Modal for Q&A Pair Results */} + setModalOpen(false)} variant="small"> +
{modalContent}
+
); };