Skip to content

Commit

Permalink
Merge pull request #2 from senacor/feature/add-schema-validation
Browse files Browse the repository at this point in the history
Feature/add schema validation
  • Loading branch information
yannick-muenz authored Mar 21, 2024
2 parents f2d0174 + b1d0792 commit f6edbb4
Show file tree
Hide file tree
Showing 12 changed files with 230 additions and 92 deletions.
11 changes: 10 additions & 1 deletion package-lock.json

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

9 changes: 5 additions & 4 deletions package.json
Original file line number Diff line number Diff line change
Expand Up @@ -2,12 +2,12 @@
"name": "chatbot-poc-backend",
"version": "1.0.0",
"description": "",
"main": "./dist/index.js",
"main": "./dist/app.js",
"type": "module",
"scripts": {
"dev": "nodemon -r dotenv/config --exec node --loader ts-node/esm src/index.ts",
"dev": "nodemon -r dotenv/config --exec node --loader ts-node/esm src/app.ts",
"build": "rimraf ./dist && tsc",
"start": "npm run build && node dist/index.js"
"start": "npm run build && node dist/app.js"
},
"repository": {
"type": "git",
Expand All @@ -24,7 +24,8 @@
"dotenv": "^16.4.5",
"express": "^4.18.3",
"officeparser": "^4.0.8",
"openai": "^4.29.2"
"openai": "^4.29.2",
"zod": "^3.22.4"
},
"devDependencies": {
"@tsconfig/strictest": "^2.0.3",
Expand Down
Binary file added prompts/json-format-prompt.docx
Binary file not shown.
14 changes: 14 additions & 0 deletions src/app.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,14 @@
import "dotenv/config";
import express from "express";
import { registerRoutes } from "./index.js";

const app = express();
const port = process.env["PORT"] || 3000;

app.listen(port, () =>{
console.log(`Server is running at http://localhost:${port}`);
registerRoutes();
})


export { app };
59 changes: 59 additions & 0 deletions src/chat/get-init.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,59 @@
import { z } from "zod";
import { makeGetEndpoint } from "../middleware/validation/makeGetEndpoint.js";
import { fileReader, parseFileReaderResponse } from "../util/fileReader.js";
import { OPENAI_MODEL, PROMPT_FILE_NAME, RESPONSE_FORMAT, openai } from "./index.js";

//TODO: Rework type inference of fileReader
export const init = makeGetEndpoint(z.any(), async (_request, response) => {
let messages: string[] = []
const promptFile = await fileReader(PROMPT_FILE_NAME);
let jsonPrompt;

if(RESPONSE_FORMAT.type === "json_object"){

jsonPrompt = await fileReader('json-format-prompt.docx');
if(parseFileReaderResponse(jsonPrompt)){
messages.push(jsonPrompt.content);
}
else{
return response.status(500).send({
status: 500,
message: jsonPrompt.error
})
}
}

if(parseFileReaderResponse(promptFile)){
messages.push(promptFile.content);
}
else{
return response.status(500).send({
status: 500,
message: promptFile.error
})
}


const completion = await openai.chat.completions.create({
messages: messages.map(message => ({role: "system", content: message})),
model: OPENAI_MODEL,
response_format: RESPONSE_FORMAT
});
console.log(completion.choices[0]?.message);
return response
.status(200)
.send([
{
role: "system",
content: promptFile.content
},
{
role: "system",
content: jsonPrompt?.content !== undefined ? jsonPrompt.content : ""
},
{
role: "assistant",
content: "Hi, ich bin der virtuelle Assistent von Qonto und kann alles rund um Qonto und unsere Leistungen beantworten. Bitte fragen Sie mich etwas"
}
]);
});
77 changes: 25 additions & 52 deletions src/chat/index.ts
Original file line number Diff line number Diff line change
@@ -1,59 +1,32 @@
import express, { Request, Response} from "express";
import express from "express";
import "dotenv/config";
import OpenAI from "openai";
import { ChatCompletionCreateParamsBase } from "openai/resources/chat/completions.js";
import { fileReader, hasErrors } from "../util/fileReader.js";
import { ChatCompletionCreateParams, ChatCompletionCreateParamsBase } from "openai/resources/chat/completions.js";
import { init } from "./get-init.js";
import { newMessage } from "./post-new-message.js";

/**
* Change the prompt file, the model or the response format here
*/
const PROMPT_FILE_NAME = "Chatbot Qonto.docx"
const OPENAI_MODEL: ChatCompletionCreateParamsBase["model"] = "gpt-3.5-turbo";
const RESPONSE_FORMAT: ChatCompletionCreateParams.ResponseFormat = {type: "text"};

export const chatRouter = express.Router();

export const openai = new OpenAI();


chatRouter.post('/newMessage', async (request: Request, response: Response) => {
const messages = request.body.messages;
const completion = await openai.chat.completions.create({
messages: messages,
model: OPENAI_MODEL,
});
const choice = completion.choices[0];
if(choice === undefined){
response.status(500).send({
status: 500,
message: "Expected an answer from the bot but got none."
})
}
else{
response.status(200).send(choice.message);
}
});

chatRouter.get('/init', async (_request: Request, response: Response) => {
const fileContent = await fileReader(PROMPT_FILE_NAME);
if(hasErrors(fileContent)){
console.log(fileContent.error.stack);
response.status(500).json({
status: 500,
message: 'Something went wrong'
});
}
else{
await openai.chat.completions.create({
messages: [{role: "system", content: fileContent.data}],
model: OPENAI_MODEL,
});
response.status(200).send([
{
role: "system",
content: fileContent.data
},
{
role: "assistant",
content: "Hi, ich bin der virtuelle Assistent von Qonto und kann alles rund um Qonto und unsere Leistungen beantworten. Bitte fragen Sie mich etwas"
}
]);
}
});

const chatRouter = express.Router();
const openai = new OpenAI();


chatRouter.post('/newMessage', newMessage);
chatRouter.get("/test")

chatRouter.get('/init', init);


export {
PROMPT_FILE_NAME,
OPENAI_MODEL,
RESPONSE_FORMAT,
chatRouter,
openai,
}
31 changes: 31 additions & 0 deletions src/chat/post-new-message.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,31 @@
import { z } from "zod";
import { makePostEndpoint } from "../middleware/validation/makePostEndpoint.js";
import { OPENAI_MODEL, RESPONSE_FORMAT, openai } from "./index.js";

const ChatCompletionRole = z.union([z.literal('user'), z.literal('system'), z.literal('assistant')]);
export type ChatCompletionRole = z.infer<typeof ChatCompletionRole>;

const MessageHistory = z.object({
messages: z.array(
z.object({
role: ChatCompletionRole,
content: z.string(),
})
).nonempty()
});
type MessageHistory = z.infer<typeof MessageHistory>;

export const newMessage = makePostEndpoint(MessageHistory, async (request, response) => {
const messages = request.body.messages;
const completion = await openai.chat.completions.create({
messages,
model: OPENAI_MODEL,
response_format: RESPONSE_FORMAT
});
const chatResponse = completion.choices[0];
if(!chatResponse){
return response.status(500).send("Got no response from the bot");
}
console.log(chatResponse);
return response.status(200).send(chatResponse.message);
});
29 changes: 12 additions & 17 deletions src/index.ts
Original file line number Diff line number Diff line change
@@ -1,26 +1,21 @@
import "dotenv/config";
import express, { Request, Response } from "express";
import cors from "cors";
import express from "express";
import { chatRouter } from "./chat/index.js";

const app = express();
const port = process.env["PORT"] || 3000;
import { app } from "./app.js";
import cors from "cors";


const corsOptions = {
origin: ["http://localhost:4200", "https://chatbot-frontend-csf37hag2a-ey.a.run.app"],
optionsSuccessStatus: 204
}

app.options('*', cors(corsOptions));
app.use(cors(corsOptions));
app.use(express.json());

app.use('/chat', chatRouter);
app.get("/", (_: Request, response: Response) => {
response.status(200).json({message: "Hello World, running in Container"});
})
export const registerRoutes = () => {
app.options('*', cors(corsOptions));
app.use(cors(corsOptions));


app.use(express.json());

app.use('/chat', chatRouter);
}

app.listen(port, () =>{
console.log(`Server is running at http://localhost:${port}`);
})
22 changes: 22 additions & 0 deletions src/middleware/validation/makeGetEndpoint.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,22 @@
import { Request, Response } from "express"
import { z } from "zod"

export const makeGetEndpoint = <TQuery>(
schema: z.Schema<TQuery>,
callback: (
req: Request<any, any, any, TQuery>,
res: Response
) => void
) => (req:Request, res:Response) => {

const bodyValidation = schema.safeParse(req.query);
if(!bodyValidation.success){
return res
.status(400)
.send({
status: 400,
message: bodyValidation.error.message
});
}
return callback(req as any, res);
}
22 changes: 22 additions & 0 deletions src/middleware/validation/makePostEndpoint.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,22 @@
import { Request, Response } from "express"
import { z } from "zod"

export const makePostEndpoint = <TBody>(
schema: z.Schema<TBody>,
callback: (
req: Request<any, any, TBody, any>,
res: Response
) => void
) => (req:Request, res:Response) => {

const bodyValidation = schema.safeParse(req.body);
if(!bodyValidation.success){
return res
.status(400)
.send({
status: 400,
message: bodyValidation.error.message
});
}
return callback(req, res);
}
4 changes: 0 additions & 4 deletions src/models/message.ts

This file was deleted.

Loading

0 comments on commit f6edbb4

Please sign in to comment.