Skip to content

Commit

Permalink
feat: updated gtp4 and using official sdk everywhere (#3645)
Browse files Browse the repository at this point in the history
* feat: updated gtp4 and using official sdk everywhere

* fix(api): remove duplicated comment in AI endpoint
  • Loading branch information
SiTaggart authored Dec 11, 2023
1 parent 6a4f6e1 commit 8bb04a8
Show file tree
Hide file tree
Showing 5 changed files with 39 additions and 84 deletions.
5 changes: 2 additions & 3 deletions packages/paste-website/package.json
Original file line number Diff line number Diff line change
Expand Up @@ -138,7 +138,7 @@
"@types/lodash": "^4.14.182",
"@types/mdx-js__react": "^1.5.5",
"@vercel/og": "^0.5.20",
"ai": "^2.2.13",
"ai": "^2.2.27",
"airtable": "^0.11.6",
"color": "^3.1.2",
"common-tags": "^1.8.2",
Expand All @@ -160,8 +160,7 @@
"micromark-extension-mdxjs": "^2.0.0",
"minimist": "^1.2.8",
"next": "^14.0.0",
"openai": "^4.18.0",
"openai-edge": "^1.2.2",
"openai": "^4.20.1",
"pretty-format": "^28.1.0",
"prism-react-renderer": "^1.3.5",
"puppeteer-core": "^19.6.1",
Expand Down
45 changes: 17 additions & 28 deletions packages/paste-website/src/pages/api/ai.ts
Original file line number Diff line number Diff line change
Expand Up @@ -18,13 +18,7 @@ import { OpenAIStream, StreamingTextResponse } from "ai";
import { codeBlock, oneLine } from "common-tags";
import GPT3Tokenizer from "gpt3-tokenizer";
import type { NextRequest } from "next/server";
import {
type ChatCompletionRequestMessage,
Configuration,
type CreateEmbeddingResponse,
type CreateModerationResponse,
OpenAIApi,
} from "openai-edge";
import OpenAI from "openai";

class ApplicationError extends Error {
// eslint-disable-next-line @typescript-eslint/no-parameter-properties
Expand All @@ -39,10 +33,9 @@ const openAiSecret = process.env.OPENAI_API_SECRET;
const supabaseUrl = process.env.SUPABASE_URL;
const supabaseServiceKey = process.env.SUPABASE_KEY;

const config = new Configuration({
const openai = new OpenAI({
apiKey: openAiKey,
});
const openai = new OpenAIApi(config);

/**
* Because we're using an edge function for streaming we can't use winston for logging
Expand Down Expand Up @@ -102,9 +95,9 @@ export default async function handler(req: NextRequest): Promise<void | Response
console.log(`${LOG_PREFIX} Moderate user prompt`);

// Moderate the content to comply with OpenAI T&C
const moderationResponse: CreateModerationResponse = await openai
.createModeration({ input: sanitizedQuery })
.then((res: any) => res.json());
const moderationResponse: OpenAI.ModerationCreateResponse = await openai.moderations.create({
input: sanitizedQuery,
});

// @ts-expect-error this is a bug in the types
if (moderationResponse.error) {
Expand All @@ -126,18 +119,16 @@ export default async function handler(req: NextRequest): Promise<void | Response
console.log(`${LOG_PREFIX} Reqesting openai embedding`);

// Create embedding from query
const embeddingResponse = await openai.createEmbedding({
const embeddingResponse = await openai.embeddings.create({
model: "text-embedding-ada-002",
input: sanitizedQuery.replaceAll("\n", " "),
});

if (embeddingResponse.status !== 200) {
if (embeddingResponse.data.length === 0) {
throw new ApplicationError("Failed to create embedding for question", embeddingResponse);
}

const {
data: [{ embedding }],
}: CreateEmbeddingResponse = await embeddingResponse.json();
const { embedding } = embeddingResponse.data[0];

// eslint-disable-next-line no-console
console.log(`${LOG_PREFIX} Request Page sections based on embeddings`);
Expand Down Expand Up @@ -187,37 +178,35 @@ export default async function handler(req: NextRequest): Promise<void | Response
"Sorry, I don't know how to help with that."
`}
Include as many related code snippets inside your answer as needed if available, and links to the documentation using
the full https://paste.twilio.design domain.
Do not wrap your answer in any markdown or code blocks, just return the answer as plain text.
Context sections:
${contextText}
Question: """
${sanitizedQuery}
"""
Answer as markdown (including related code snippets if available):
`;

const chatMessage: ChatCompletionRequestMessage = {
const chatMessage: OpenAI.Chat.ChatCompletionMessageParam = {
role: "user",
content: prompt,
};
// eslint-disable-next-line no-console
console.log(`${LOG_PREFIX} Request chat completion`);

const response = await openai.createChatCompletion({
model: "gpt-4",
const response = await openai.chat.completions.create({
model: "gpt-4-1106-preview",
messages: [chatMessage],
// eslint-disable-next-line camelcase
max_tokens: 512,
max_tokens: 2000,
temperature: 0,
stream: true,
});

if (!response.ok) {
const error = await response.json();
throw new ApplicationError("Failed to generate completion", error);
}

// eslint-disable-next-line no-console
console.log(`${LOG_PREFIX} Open ai Returned response`);

Expand Down
13 changes: 5 additions & 8 deletions packages/paste-website/src/pages/api/discussions-search.ts
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
/* eslint-disable max-classes-per-file */
import { createClient } from "@supabase/supabase-js";
import type { NextApiRequest, NextApiResponse } from "next";
import { Configuration, type CreateEmbeddingResponse, OpenAIApi } from "openai-edge";
import OpenAI from "openai";
import Rollbar from "rollbar";

import { logger } from "../../functions-utils/logger";
Expand All @@ -24,10 +24,9 @@ const openAiSecret = process.env.OPENAI_API_SECRET;
const supabaseUrl = process.env.SUPABASE_URL;
const supabaseServiceKey = process.env.SUPABASE_KEY;

const config = new Configuration({
const openai = new OpenAI({
apiKey: openAiKey,
});
const openai = new OpenAIApi(config);

const LOG_PREFIX = "[/api/discussions-search]:";

Expand Down Expand Up @@ -71,18 +70,16 @@ export default async function handler(req: NextApiRequest, res: NextApiResponse)
logger.info(`${LOG_PREFIX} Reqesting openai embedding`);

// Create embedding from query
const embeddingResponse = await openai.createEmbedding({
const embeddingResponse = await openai.embeddings.create({
model: "text-embedding-ada-002",
input: sanitizedQuery.replaceAll("\n", " "),
});

if (embeddingResponse.status !== 200) {
if (embeddingResponse.data.length === 0) {
throw new ApplicationError("Failed to create embedding for question", embeddingResponse);
}

const {
data: [{ embedding }],
}: CreateEmbeddingResponse = await embeddingResponse.json();
const { embedding } = embeddingResponse.data[0];

logger.info(`${LOG_PREFIX} Request Discussion sections based on embeddings`);

Expand Down
13 changes: 5 additions & 8 deletions packages/paste-website/src/pages/api/docs-search.ts
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
/* eslint-disable max-classes-per-file */
import { createClient } from "@supabase/supabase-js";
import type { NextApiRequest, NextApiResponse } from "next";
import { Configuration, type CreateEmbeddingResponse, OpenAIApi } from "openai-edge";
import OpenAI from "openai";
import Rollbar from "rollbar";

import { logger } from "../../functions-utils/logger";
Expand All @@ -24,10 +24,9 @@ const openAiKey = process.env.OPENAI_API_KEY;
const supabaseUrl = process.env.SUPABASE_URL;
const supabaseServiceKey = process.env.SUPABASE_KEY;

const config = new Configuration({
const openai = new OpenAI({
apiKey: openAiKey,
});
const openai = new OpenAIApi(config);

const LOG_PREFIX = "[/api/docs-search]:";

Expand Down Expand Up @@ -68,18 +67,16 @@ export default async function handler(req: NextApiRequest, res: NextApiResponse)
logger.info(`${LOG_PREFIX} Reqesting openai embedding`);

// Create embedding from query
const embeddingResponse = await openai.createEmbedding({
const embeddingResponse = await openai.embeddings.create({
model: "text-embedding-ada-002",
input: sanitizedQuery.replaceAll("\n", " "),
});

if (embeddingResponse.status !== 200) {
if (embeddingResponse.data.length === 0) {
throw new ApplicationError("Failed to create embedding for question", embeddingResponse);
}

const {
data: [{ embedding }],
}: CreateEmbeddingResponse = await embeddingResponse.json();
const { embedding } = embeddingResponse.data[0];

logger.info(`${LOG_PREFIX} Request page sections based on embeddings`);

Expand Down
47 changes: 10 additions & 37 deletions yarn.lock
Original file line number Diff line number Diff line change
Expand Up @@ -14403,7 +14403,7 @@ __metadata:
"@types/lodash": ^4.14.182
"@types/mdx-js__react": ^1.5.5
"@vercel/og": ^0.5.20
ai: ^2.2.13
ai: ^2.2.27
airtable: ^0.11.6
color: ^3.1.2
common-tags: ^1.8.2
Expand All @@ -14425,8 +14425,7 @@ __metadata:
micromark-extension-mdxjs: ^2.0.0
minimist: ^1.2.8
next: ^14.0.0
openai: ^4.18.0
openai-edge: ^1.2.2
openai: ^4.20.1
pretty-format: ^28.1.0
prism-react-renderer: ^1.3.5
puppeteer-core: ^19.6.1
Expand Down Expand Up @@ -16636,13 +16635,12 @@ __metadata:
languageName: node
linkType: hard

"ai@npm:^2.2.13":
version: 2.2.13
resolution: "ai@npm:2.2.13"
"ai@npm:^2.2.27":
version: 2.2.27
resolution: "ai@npm:2.2.27"
dependencies:
eventsource-parser: 1.0.0
nanoid: 3.3.6
openai: 4.2.0
solid-swr-store: 0.10.7
sswr: 2.0.0
swr: 2.2.0
Expand All @@ -16662,7 +16660,7 @@ __metadata:
optional: true
vue:
optional: true
checksum: a9217e8d3b8de1e8ea313d5a9a51956ba789060625f8f6dcbbd91933ad42993dd35f1d6fa64c92b8c3a6701afc817297d3630bd09f963f7f6011bb063d570962
checksum: 45f6378ec0facd8e07d7b362b58b480d660a6e94e8102e8d2909bc97e1d9fc240284a6f5cc856e6a5d7c812cda8d4aa99ad7a2bb402d37788b5483f56adda9b7
languageName: node
linkType: hard

Expand Down Expand Up @@ -34164,34 +34162,9 @@ fsevents@^1.2.7:
languageName: node
linkType: hard

"openai-edge@npm:^1.2.2":
version: 1.2.2
resolution: "openai-edge@npm:1.2.2"
checksum: 9d6ff6e79fbad5a0f2b6dd5db75abeb489dfa530f3e14a664500856497c1e19f6ec24036c2988cd3194f3ce38325214112d6f4c9766743497a7e1c69bf2f5f39
languageName: node
linkType: hard

"openai@npm:4.2.0":
version: 4.2.0
resolution: "openai@npm:4.2.0"
dependencies:
"@types/node": ^18.11.18
"@types/node-fetch": ^2.6.4
abort-controller: ^3.0.0
agentkeepalive: ^4.2.1
digest-fetch: ^1.3.0
form-data-encoder: 1.7.2
formdata-node: ^4.3.2
node-fetch: ^2.6.7
bin:
openai: bin/cli
checksum: c2b28e422d7ea6a38a2d98d1c9c933819386d560e473ebcf4417d0cbd80a79f05392120ae32da8b8250732b62469a08270b9b4864e9ff2965aa1af897b986867
languageName: node
linkType: hard

"openai@npm:^4.18.0":
version: 4.18.0
resolution: "openai@npm:4.18.0"
"openai@npm:^4.20.1":
version: 4.20.1
resolution: "openai@npm:4.20.1"
dependencies:
"@types/node": ^18.11.18
"@types/node-fetch": ^2.6.4
Expand All @@ -34204,7 +34177,7 @@ fsevents@^1.2.7:
web-streams-polyfill: ^3.2.1
bin:
openai: bin/cli
checksum: 485193c566dab5f98515e7c74060ed4a81246f569a9502e049015992148ecd912c99deed187f613ea7f28a551bcad2a4ab0998e0ff8a95acae8f996484f15d62
checksum: 01d2aeaf9c1a0e93159cd49207d86188edf43a6276aeb1a4aab8614727f64591452bbe19baa344f799d0a94314ff386e39c019b62f305543952606759a3927f1
languageName: node
linkType: hard

Expand Down

1 comment on commit 8bb04a8

@vercel
Copy link

@vercel vercel bot commented on 8bb04a8 Dec 11, 2023

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Please sign in to comment.