From aaf43c6087b13aeb13a29ab470e465847894daf7 Mon Sep 17 00:00:00 2001 From: Chenchen Date: Tue, 3 Sep 2024 12:23:54 +0800 Subject: [PATCH 1/2] fix: add length check for toolRequestParts in `fromOpenAiChoice` --- plugins/openai/src/gpt.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugins/openai/src/gpt.ts b/plugins/openai/src/gpt.ts index 39bad265..ab6483f2 100644 --- a/plugins/openai/src/gpt.ts +++ b/plugins/openai/src/gpt.ts @@ -376,7 +376,7 @@ export function fromOpenAiChoice( finishReason: finishReasonMap[choice.finish_reason] || 'other', message: { role: 'model', - content: toolRequestParts + content: toolRequestParts && toolRequestParts.length > 0 ? // Note: Not sure why I have to cast here exactly. // Otherwise it thinks toolRequest must be 'undefined' if provided (toolRequestParts as ToolRequestPart[]) From 949e3f0d1b4213613b696a28d27952900c18c096 Mon Sep 17 00:00:00 2001 From: Chenchen Date: Wed, 4 Sep 2024 09:19:07 +0800 Subject: [PATCH 2/2] add built lib --- .gitignore | 2 +- examples/lib/genkit.config.js | 20 + examples/lib/genkit.config.js.map | 1 + examples/lib/index.js | 127 ++++ examples/lib/index.js.map | 1 + package-lock.json | 3 - plugins/convex/lib/index.d.mts | 28 + plugins/convex/lib/index.d.ts | 28 + plugins/convex/lib/index.js | 62 ++ plugins/convex/lib/index.js.map | 1 + plugins/convex/lib/index.mjs | 38 ++ plugins/convex/lib/index.mjs.map | 1 + plugins/hnsw/lib/actions/index.d.mts | 22 + plugins/hnsw/lib/actions/index.d.ts | 22 + plugins/hnsw/lib/actions/index.js | 46 ++ plugins/hnsw/lib/actions/index.js.map | 1 + plugins/hnsw/lib/actions/index.mjs | 21 + plugins/hnsw/lib/actions/index.mjs.map | 1 + plugins/hnsw/lib/config/index.d.mts | 48 ++ plugins/hnsw/lib/config/index.d.ts | 48 ++ plugins/hnsw/lib/config/index.js | 58 ++ plugins/hnsw/lib/config/index.js.map | 1 + plugins/hnsw/lib/config/index.mjs | 31 + plugins/hnsw/lib/config/index.mjs.map | 1 + plugins/hnsw/lib/constants/index.d.mts | 31 + plugins/hnsw/lib/constants/index.d.ts | 31 + plugins/hnsw/lib/constants/index.js | 68 ++ plugins/hnsw/lib/constants/index.js.map | 1 + plugins/hnsw/lib/constants/index.mjs | 31 + plugins/hnsw/lib/constants/index.mjs.map | 1 + plugins/hnsw/lib/index.d.mts | 7 + plugins/hnsw/lib/index.d.ts | 7 + plugins/hnsw/lib/index.js | 56 ++ plugins/hnsw/lib/index.js.map | 1 + plugins/hnsw/lib/index.mjs | 31 + plugins/hnsw/lib/index.mjs.map | 1 + plugins/hnsw/lib/indexer/index.d.mts | 21 + plugins/hnsw/lib/indexer/index.d.ts | 21 + plugins/hnsw/lib/indexer/index.js | 103 ++++ plugins/hnsw/lib/indexer/index.js.map | 1 + plugins/hnsw/lib/indexer/index.mjs | 73 +++ plugins/hnsw/lib/indexer/index.mjs.map | 1 + plugins/hnsw/lib/interfaces/index.d.mts | 35 ++ plugins/hnsw/lib/interfaces/index.d.ts | 35 ++ plugins/hnsw/lib/interfaces/index.js | 17 + plugins/hnsw/lib/interfaces/index.js.map | 1 + plugins/hnsw/lib/interfaces/index.mjs | 1 + plugins/hnsw/lib/interfaces/index.mjs.map | 1 + plugins/hnsw/lib/retriever/index.d.mts | 21 + plugins/hnsw/lib/retriever/index.d.ts | 21 + plugins/hnsw/lib/retriever/index.js | 92 +++ plugins/hnsw/lib/retriever/index.js.map | 1 + plugins/hnsw/lib/retriever/index.mjs | 68 ++ plugins/hnsw/lib/retriever/index.mjs.map | 1 + plugins/hnsw/lib/types/index.d.mts | 21 + plugins/hnsw/lib/types/index.d.ts | 21 + plugins/hnsw/lib/types/index.js | 17 + plugins/hnsw/lib/types/index.js.map | 1 + plugins/hnsw/lib/types/index.mjs | 1 + plugins/hnsw/lib/types/index.mjs.map | 1 + plugins/hnsw/lib/utilities/index.d.mts | 23 + plugins/hnsw/lib/utilities/index.d.ts | 23 + plugins/hnsw/lib/utilities/index.js | 43 ++ plugins/hnsw/lib/utilities/index.js.map | 1 + plugins/hnsw/lib/utilities/index.mjs | 18 + plugins/hnsw/lib/utilities/index.mjs.map | 1 + plugins/openai/.gitignore | 2 +- plugins/openai/lib/chunk-WFI2LP4G.mjs | 51 ++ plugins/openai/lib/chunk-WFI2LP4G.mjs.map | 1 + plugins/openai/lib/dalle.d.mts | 84 +++ plugins/openai/lib/dalle.d.ts | 84 +++ plugins/openai/lib/dalle.js | 146 +++++ plugins/openai/lib/dalle.js.map | 1 + plugins/openai/lib/dalle.mjs | 92 +++ plugins/openai/lib/dalle.mjs.map | 1 + plugins/openai/lib/embedder-DTnK2FJN.d.ts | 157 +++++ plugins/openai/lib/embedder-DZYwphxr.d.mts | 157 +++++ plugins/openai/lib/embedder.d.mts | 11 + plugins/openai/lib/embedder.d.ts | 11 + plugins/openai/lib/embedder.js | 145 +++++ plugins/openai/lib/embedder.js.map | 1 + plugins/openai/lib/embedder.mjs | 88 +++ plugins/openai/lib/embedder.mjs.map | 1 + plugins/openai/lib/gpt.d.mts | 682 +++++++++++++++++++++ plugins/openai/lib/gpt.d.ts | 682 +++++++++++++++++++++ plugins/openai/lib/gpt.js | 521 ++++++++++++++++ plugins/openai/lib/gpt.js.map | 1 + plugins/openai/lib/gpt.mjs | 440 +++++++++++++ plugins/openai/lib/gpt.mjs.map | 1 + plugins/openai/lib/index.d.mts | 11 + plugins/openai/lib/index.d.ts | 11 + plugins/openai/lib/index.js | 119 ++++ plugins/openai/lib/index.js.map | 1 + plugins/openai/lib/index.mjs | 70 +++ plugins/openai/lib/index.mjs.map | 1 + plugins/openai/lib/tts.d.mts | 178 ++++++ plugins/openai/lib/tts.d.ts | 178 ++++++ plugins/openai/lib/tts.js | 182 ++++++ plugins/openai/lib/tts.js.map | 1 + plugins/openai/lib/tts.mjs | 125 ++++ plugins/openai/lib/tts.mjs.map | 1 + plugins/openai/lib/whisper.d.mts | 72 +++ plugins/openai/lib/whisper.d.ts | 72 +++ plugins/openai/lib/whisper.js | 166 +++++ plugins/openai/lib/whisper.js.map | 1 + plugins/openai/lib/whisper.mjs | 112 ++++ plugins/openai/lib/whisper.mjs.map | 1 + 107 files changed, 6220 insertions(+), 5 deletions(-) create mode 100644 examples/lib/genkit.config.js create mode 100644 examples/lib/genkit.config.js.map create mode 100644 examples/lib/index.js create mode 100644 examples/lib/index.js.map create mode 100644 plugins/convex/lib/index.d.mts create mode 100644 plugins/convex/lib/index.d.ts create mode 100644 plugins/convex/lib/index.js create mode 100644 plugins/convex/lib/index.js.map create mode 100644 plugins/convex/lib/index.mjs create mode 100644 plugins/convex/lib/index.mjs.map create mode 100644 plugins/hnsw/lib/actions/index.d.mts create mode 100644 plugins/hnsw/lib/actions/index.d.ts create mode 100644 plugins/hnsw/lib/actions/index.js create mode 100644 plugins/hnsw/lib/actions/index.js.map create mode 100644 plugins/hnsw/lib/actions/index.mjs create mode 100644 plugins/hnsw/lib/actions/index.mjs.map create mode 100644 plugins/hnsw/lib/config/index.d.mts create mode 100644 plugins/hnsw/lib/config/index.d.ts create mode 100644 plugins/hnsw/lib/config/index.js create mode 100644 plugins/hnsw/lib/config/index.js.map create mode 100644 plugins/hnsw/lib/config/index.mjs create mode 100644 plugins/hnsw/lib/config/index.mjs.map create mode 100644 plugins/hnsw/lib/constants/index.d.mts create mode 100644 plugins/hnsw/lib/constants/index.d.ts create mode 100644 plugins/hnsw/lib/constants/index.js create mode 100644 plugins/hnsw/lib/constants/index.js.map create mode 100644 plugins/hnsw/lib/constants/index.mjs create mode 100644 plugins/hnsw/lib/constants/index.mjs.map create mode 100644 plugins/hnsw/lib/index.d.mts create mode 100644 plugins/hnsw/lib/index.d.ts create mode 100644 plugins/hnsw/lib/index.js create mode 100644 plugins/hnsw/lib/index.js.map create mode 100644 plugins/hnsw/lib/index.mjs create mode 100644 plugins/hnsw/lib/index.mjs.map create mode 100644 plugins/hnsw/lib/indexer/index.d.mts create mode 100644 plugins/hnsw/lib/indexer/index.d.ts create mode 100644 plugins/hnsw/lib/indexer/index.js create mode 100644 plugins/hnsw/lib/indexer/index.js.map create mode 100644 plugins/hnsw/lib/indexer/index.mjs create mode 100644 plugins/hnsw/lib/indexer/index.mjs.map create mode 100644 plugins/hnsw/lib/interfaces/index.d.mts create mode 100644 plugins/hnsw/lib/interfaces/index.d.ts create mode 100644 plugins/hnsw/lib/interfaces/index.js create mode 100644 plugins/hnsw/lib/interfaces/index.js.map create mode 100644 plugins/hnsw/lib/interfaces/index.mjs create mode 100644 plugins/hnsw/lib/interfaces/index.mjs.map create mode 100644 plugins/hnsw/lib/retriever/index.d.mts create mode 100644 plugins/hnsw/lib/retriever/index.d.ts create mode 100644 plugins/hnsw/lib/retriever/index.js create mode 100644 plugins/hnsw/lib/retriever/index.js.map create mode 100644 plugins/hnsw/lib/retriever/index.mjs create mode 100644 plugins/hnsw/lib/retriever/index.mjs.map create mode 100644 plugins/hnsw/lib/types/index.d.mts create mode 100644 plugins/hnsw/lib/types/index.d.ts create mode 100644 plugins/hnsw/lib/types/index.js create mode 100644 plugins/hnsw/lib/types/index.js.map create mode 100644 plugins/hnsw/lib/types/index.mjs create mode 100644 plugins/hnsw/lib/types/index.mjs.map create mode 100644 plugins/hnsw/lib/utilities/index.d.mts create mode 100644 plugins/hnsw/lib/utilities/index.d.ts create mode 100644 plugins/hnsw/lib/utilities/index.js create mode 100644 plugins/hnsw/lib/utilities/index.js.map create mode 100644 plugins/hnsw/lib/utilities/index.mjs create mode 100644 plugins/hnsw/lib/utilities/index.mjs.map create mode 100644 plugins/openai/lib/chunk-WFI2LP4G.mjs create mode 100644 plugins/openai/lib/chunk-WFI2LP4G.mjs.map create mode 100644 plugins/openai/lib/dalle.d.mts create mode 100644 plugins/openai/lib/dalle.d.ts create mode 100644 plugins/openai/lib/dalle.js create mode 100644 plugins/openai/lib/dalle.js.map create mode 100644 plugins/openai/lib/dalle.mjs create mode 100644 plugins/openai/lib/dalle.mjs.map create mode 100644 plugins/openai/lib/embedder-DTnK2FJN.d.ts create mode 100644 plugins/openai/lib/embedder-DZYwphxr.d.mts create mode 100644 plugins/openai/lib/embedder.d.mts create mode 100644 plugins/openai/lib/embedder.d.ts create mode 100644 plugins/openai/lib/embedder.js create mode 100644 plugins/openai/lib/embedder.js.map create mode 100644 plugins/openai/lib/embedder.mjs create mode 100644 plugins/openai/lib/embedder.mjs.map create mode 100644 plugins/openai/lib/gpt.d.mts create mode 100644 plugins/openai/lib/gpt.d.ts create mode 100644 plugins/openai/lib/gpt.js create mode 100644 plugins/openai/lib/gpt.js.map create mode 100644 plugins/openai/lib/gpt.mjs create mode 100644 plugins/openai/lib/gpt.mjs.map create mode 100644 plugins/openai/lib/index.d.mts create mode 100644 plugins/openai/lib/index.d.ts create mode 100644 plugins/openai/lib/index.js create mode 100644 plugins/openai/lib/index.js.map create mode 100644 plugins/openai/lib/index.mjs create mode 100644 plugins/openai/lib/index.mjs.map create mode 100644 plugins/openai/lib/tts.d.mts create mode 100644 plugins/openai/lib/tts.d.ts create mode 100644 plugins/openai/lib/tts.js create mode 100644 plugins/openai/lib/tts.js.map create mode 100644 plugins/openai/lib/tts.mjs create mode 100644 plugins/openai/lib/tts.mjs.map create mode 100644 plugins/openai/lib/whisper.d.mts create mode 100644 plugins/openai/lib/whisper.d.ts create mode 100644 plugins/openai/lib/whisper.js create mode 100644 plugins/openai/lib/whisper.js.map create mode 100644 plugins/openai/lib/whisper.mjs create mode 100644 plugins/openai/lib/whisper.mjs.map diff --git a/.gitignore b/.gitignore index c0d3a37b..519cae4e 100644 --- a/.gitignore +++ b/.gitignore @@ -1,6 +1,6 @@ node_modules/ .nx/ -lib/ +# lib/ .env .vscode/launch.json plugins/.DS_Store diff --git a/examples/lib/genkit.config.js b/examples/lib/genkit.config.js new file mode 100644 index 00000000..aed7f9d1 --- /dev/null +++ b/examples/lib/genkit.config.js @@ -0,0 +1,20 @@ +"use strict"; +var __importDefault = (this && this.__importDefault) || function (mod) { + return (mod && mod.__esModule) ? mod : { "default": mod }; +}; +Object.defineProperty(exports, "__esModule", { value: true }); +require("dotenv/config"); +const core_1 = require("@genkit-ai/core"); +const genkitx_groq_1 = __importDefault(require("genkitx-groq")); +const genkitx_cohere_1 = __importDefault(require("genkitx-cohere")); +const genkitx_anthropic_1 = __importDefault(require("genkitx-anthropic")); +const genkitx_mistral_1 = __importDefault(require("genkitx-mistral")); +const genkitx_openai_1 = __importDefault(require("genkitx-openai")); +const dotprompt_1 = require("@genkit-ai/dotprompt"); +exports.default = (0, core_1.configureGenkit)({ + plugins: [(0, genkitx_openai_1.default)(), (0, genkitx_groq_1.default)(), (0, genkitx_cohere_1.default)(), (0, genkitx_anthropic_1.default)(), (0, genkitx_mistral_1.default)(), (0, dotprompt_1.dotprompt)()], + logLevel: 'debug', + enableTracingAndMetrics: true, + promptDir: '../prompts', +}); +//# sourceMappingURL=genkit.config.js.map \ No newline at end of file diff --git a/examples/lib/genkit.config.js.map b/examples/lib/genkit.config.js.map new file mode 100644 index 00000000..de3a2193 --- /dev/null +++ b/examples/lib/genkit.config.js.map @@ -0,0 +1 @@ +{"version":3,"file":"genkit.config.js","sourceRoot":"","sources":["../src/genkit.config.ts"],"names":[],"mappings":";;;;;AAAA,yBAAuB;AAEvB,0CAAkD;AAClD,gEAAgC;AAChC,oEAAoC;AACpC,0EAA0C;AAC1C,sEAAsC;AACtC,oEAAoC;AACpC,oDAAiD;AAEjD,kBAAe,IAAA,sBAAe,EAAC;IAC7B,OAAO,EAAE,CAAC,IAAA,wBAAM,GAAE,EAAE,IAAA,sBAAI,GAAE,EAAE,IAAA,wBAAM,GAAE,EAAE,IAAA,2BAAS,GAAE,EAAE,IAAA,yBAAO,GAAE,EAAE,IAAA,qBAAS,GAAE,CAAC;IAC1E,QAAQ,EAAE,OAAO;IACjB,uBAAuB,EAAE,IAAI;IAC7B,SAAS,EAAE,YAAY;CACxB,CAAC,CAAC"} \ No newline at end of file diff --git a/examples/lib/index.js b/examples/lib/index.js new file mode 100644 index 00000000..01046bdf --- /dev/null +++ b/examples/lib/index.js @@ -0,0 +1,127 @@ +"use strict"; +var __createBinding = (this && this.__createBinding) || (Object.create ? (function(o, m, k, k2) { + if (k2 === undefined) k2 = k; + var desc = Object.getOwnPropertyDescriptor(m, k); + if (!desc || ("get" in desc ? !m.__esModule : desc.writable || desc.configurable)) { + desc = { enumerable: true, get: function() { return m[k]; } }; + } + Object.defineProperty(o, k2, desc); +}) : (function(o, m, k, k2) { + if (k2 === undefined) k2 = k; + o[k2] = m[k]; +})); +var __setModuleDefault = (this && this.__setModuleDefault) || (Object.create ? (function(o, v) { + Object.defineProperty(o, "default", { enumerable: true, value: v }); +}) : function(o, v) { + o["default"] = v; +}); +var __importStar = (this && this.__importStar) || function (mod) { + if (mod && mod.__esModule) return mod; + var result = {}; + if (mod != null) for (var k in mod) if (k !== "default" && Object.prototype.hasOwnProperty.call(mod, k)) __createBinding(result, mod, k); + __setModuleDefault(result, mod); + return result; +}; +var __importDefault = (this && this.__importDefault) || function (mod) { + return (mod && mod.__esModule) ? mod : { "default": mod }; +}; +Object.defineProperty(exports, "__esModule", { value: true }); +exports.myFlow = void 0; +require("dotenv/config"); +const dotprompt_1 = require("@genkit-ai/dotprompt"); +const ai_1 = require("@genkit-ai/ai"); +const flow_1 = require("@genkit-ai/flow"); +const z = __importStar(require("zod")); +const genkitx_openai_1 = require("genkitx-openai"); +const core_1 = require("@genkit-ai/core"); +const genkit_config_1 = __importDefault(require("./genkit.config")); +(0, core_1.initializeGenkit)(genkit_config_1.default); +// Define standard prompts +const helloPrompt = (0, ai_1.definePrompt)({ + name: 'helloPrompt', + inputSchema: z.object({ name: z.string() }), +}, async (input) => { + const promptText = `You are a helpful AI assistant named Walt. + Say hello to ${input.name}.`; + return { + messages: [{ role: 'user', content: [{ text: promptText }] }], + config: { temperature: 0.3 }, + }; +}); +// Tool definition +const tool = (0, ai_1.defineTool)({ + name: 'myJoke', + description: 'useful when you need a joke to tell.', + inputSchema: z.string(), + outputSchema: z.string(), +}, async (input) => `haha Just kidding no joke about for you! got you`); +// define Dotprompts +// export const greetingPrompt = prompt('basic'); +// const multimodalPrompt = prompt('multimodalInput'); +// const structuredOutputPrompt = prompt('structuredInputOutput'); +// const customConfigPrompt = prompt('customConfig'); +// Define a Dotprompt in code +const codeDotPrompt = (0, dotprompt_1.defineDotprompt)({ + name: 'exampleDotPrompt', + model: genkitx_openai_1.gpt4o, + input: { + schema: z.object({ + object_name: z.string(), + image_url: z.string(), + }), + }, + output: { + schema: z.object({ + exist: z.boolean().describe('Whether the object exists in the image'), + color: z.string().describe('The color of the object'), + details: z.string().describe('Details about the object'), + }), + }, + config: { + temperature: 1.0, + topP: 0.9, + maxOutputTokens: 100, + topK: 20, + stopSequences: ['abc'], + visualDetailLevel: 'high', + }, +}, `Does the object {{object_name}} exist in the given image {{media url=image_url}}? If it does, what color is it and what are some details about it?`); +// Define flows +exports.myFlow = (0, flow_1.defineFlow)({ + name: 'menuSuggestionFlow', + inputSchema: z.string(), + outputSchema: z.string(), +}, async (subject) => { + const llmResponse = await (0, ai_1.generate)({ + prompt: `Suggest an item for the menu of a ${subject} themed restaurant`, + model: genkitx_openai_1.gpt4o, + }); + return llmResponse.text(); +}); +(0, flow_1.startFlowsServer)(); +// Tool use +// const createReminder = defineTool( +// { +// name: 'createReminder', +// description: 'Use this to create reminders for things in the future', +// inputSchema: z.object({ +// time: z +// .string() +// .describe('ISO timestamp string, e.g. 2024-04-03T12:23:00Z'), +// reminder: z.string().describe('the content of the reminder'), +// }), +// outputSchema: z.number().describe('the ID of the created reminder'), +// }, +// (reminder) => Promise.resolve(3) +// ); +// const result = generate({ +// model: llama3x70b, +// tools: [createReminder], +// prompt: ` +// You are a reminder assistant. +// If you create a reminder, describe in text the reminder you created as a response. +// Query: I have a meeting with Anna at 3 for dinner - can you set a reminder for the time? +// `, +// }); +// console.log(result.then((res) => res.text())); +//# sourceMappingURL=index.js.map \ No newline at end of file diff --git a/examples/lib/index.js.map b/examples/lib/index.js.map new file mode 100644 index 00000000..0f9d66d6 --- /dev/null +++ b/examples/lib/index.js.map @@ -0,0 +1 @@ +{"version":3,"file":"index.js","sourceRoot":"","sources":["../src/index.ts"],"names":[],"mappings":";;;;;;;;;;;;;;;;;;;;;;;;;;;;;AAAA,yBAAuB;AAEvB,oDAA+D;AAC/D,sCAAmE;AAEnE,0CAA+D;AAC/D,uCAAyB;AAEzB,mDAAuC;AACvC,0CAAmD;AACnD,oEAAqC;AAGrC,IAAA,uBAAgB,EAAC,uBAAM,CAAC,CAAC;AAEzB,0BAA0B;AAC1B,MAAM,WAAW,GAAG,IAAA,iBAAY,EAC9B;IACE,IAAI,EAAE,aAAa;IACnB,WAAW,EAAE,CAAC,CAAC,MAAM,CAAC,EAAE,IAAI,EAAE,CAAC,CAAC,MAAM,EAAE,EAAE,CAAC;CAC5C,EACD,KAAK,EAAE,KAAK,EAAE,EAAE;IACd,MAAM,UAAU,GAAG;mBACJ,KAAK,CAAC,IAAI,GAAG,CAAC;IAE7B,OAAO;QACL,QAAQ,EAAE,CAAC,EAAE,IAAI,EAAE,MAAM,EAAE,OAAO,EAAE,CAAC,EAAE,IAAI,EAAE,UAAU,EAAE,CAAC,EAAE,CAAC;QAC7D,MAAM,EAAE,EAAE,WAAW,EAAE,GAAG,EAAE;KAC7B,CAAC;AACJ,CAAC,CACF,CAAC;AAEF,kBAAkB;AAClB,MAAM,IAAI,GAAG,IAAA,eAAU,EACrB;IACE,IAAI,EAAE,QAAQ;IACd,WAAW,EAAE,sCAAsC;IACnD,WAAW,EAAE,CAAC,CAAC,MAAM,EAAE;IACvB,YAAY,EAAE,CAAC,CAAC,MAAM,EAAE;CACzB,EACD,KAAK,EAAE,KAAK,EAAE,EAAE,CAAC,kDAAkD,CACpE,CAAC;AAEF,oBAAoB;AACpB,iDAAiD;AACjD,sDAAsD;AACtD,kEAAkE;AAClE,qDAAqD;AAErD,6BAA6B;AAC7B,MAAM,aAAa,GAAG,IAAA,2BAAe,EACnC;IACE,IAAI,EAAE,kBAAkB;IACxB,KAAK,EAAE,sBAAK;IACZ,KAAK,EAAE;QACL,MAAM,EAAE,CAAC,CAAC,MAAM,CAAC;YACf,WAAW,EAAE,CAAC,CAAC,MAAM,EAAE;YACvB,SAAS,EAAE,CAAC,CAAC,MAAM,EAAE;SACtB,CAAC;KACH;IACD,MAAM,EAAE;QACN,MAAM,EAAE,CAAC,CAAC,MAAM,CAAC;YACf,KAAK,EAAE,CAAC,CAAC,OAAO,EAAE,CAAC,QAAQ,CAAC,wCAAwC,CAAC;YACrE,KAAK,EAAE,CAAC,CAAC,MAAM,EAAE,CAAC,QAAQ,CAAC,yBAAyB,CAAC;YACrD,OAAO,EAAE,CAAC,CAAC,MAAM,EAAE,CAAC,QAAQ,CAAC,0BAA0B,CAAC;SACzD,CAAC;KACH;IACD,MAAM,EAAE;QACN,WAAW,EAAE,GAAG;QAChB,IAAI,EAAE,GAAG;QACT,eAAe,EAAE,GAAG;QACpB,IAAI,EAAE,EAAE;QACR,aAAa,EAAE,CAAC,KAAK,CAAC;QACtB,iBAAiB,EAAE,MAAM;KAC1B;CACF,EACD,oJAAoJ,CACrJ,CAAC;AAEF,eAAe;AACF,QAAA,MAAM,GAAG,IAAA,iBAAU,EAC9B;IACE,IAAI,EAAE,oBAAoB;IAC1B,WAAW,EAAE,CAAC,CAAC,MAAM,EAAE;IACvB,YAAY,EAAE,CAAC,CAAC,MAAM,EAAE;CACzB,EACD,KAAK,EAAE,OAAO,EAAE,EAAE;IAChB,MAAM,WAAW,GAAG,MAAM,IAAA,aAAQ,EAAC;QACjC,MAAM,EAAE,qCAAqC,OAAO,oBAAoB;QACxE,KAAK,EAAE,sBAAK;KACb,CAAC,CAAC;IAEH,OAAO,WAAW,CAAC,IAAI,EAAE,CAAC;AAC5B,CAAC,CACF,CAAC;AACF,IAAA,uBAAgB,GAAE,CAAC;AAEnB,WAAW;AACX,qCAAqC;AACrC,MAAM;AACN,8BAA8B;AAC9B,4EAA4E;AAC5E,8BAA8B;AAC9B,gBAAgB;AAChB,oBAAoB;AACpB,wEAAwE;AACxE,sEAAsE;AACtE,UAAU;AACV,2EAA2E;AAC3E,OAAO;AACP,qCAAqC;AACrC,KAAK;AAEL,4BAA4B;AAC5B,uBAAuB;AACvB,6BAA6B;AAC7B,cAAc;AACd,kCAAkC;AAClC,uFAAuF;AAEvF,6FAA6F;AAC7F,OAAO;AACP,MAAM;AAEN,iDAAiD"} \ No newline at end of file diff --git a/package-lock.json b/package-lock.json index 8bdfc59d..8cc2ab65 100644 --- a/package-lock.json +++ b/package-lock.json @@ -13013,7 +13013,6 @@ "version": "0.1.13", "resolved": "https://registry.npmjs.org/encoding/-/encoding-0.1.13.tgz", "integrity": "sha512-ETBauow1T35Y/WZMkio9jiM0Z5xjHHmJ4XmjZOq1l/dXz3lr2sRn87nJy20RupqSh1F2m3HHPSp8ShIPQJrJ3A==", - "dev": true, "optional": true, "dependencies": { "iconv-lite": "^0.6.2" @@ -13023,7 +13022,6 @@ "version": "0.6.3", "resolved": "https://registry.npmjs.org/iconv-lite/-/iconv-lite-0.6.3.tgz", "integrity": "sha512-4fCk79wshMdzMp2rH06qWrJE4iolqLhCUH+OiuIgU++RB0+94NlDL81atO7GX55uUKueo0txHNtvEyI6D7WdMw==", - "dev": true, "optional": true, "dependencies": { "safer-buffer": ">= 2.1.2 < 3.0.0" @@ -29670,7 +29668,6 @@ "version": "5.4.5", "resolved": "https://registry.npmjs.org/typescript/-/typescript-5.4.5.tgz", "integrity": "sha512-vcI4UpRgg81oIRUFwR0WSIHKt11nJ7SAVlYNIu+QpqeyXP+gpQJy/Z4+F0aGxSE4MqwjyXvW/TzgkLAx2AGHwQ==", - "dev": true, "bin": { "tsc": "bin/tsc", "tsserver": "bin/tsserver" diff --git a/plugins/convex/lib/index.d.mts b/plugins/convex/lib/index.d.mts new file mode 100644 index 00000000..c33dbd34 --- /dev/null +++ b/plugins/convex/lib/index.d.mts @@ -0,0 +1,28 @@ +import { EmbedderArgument } from '@genkit-ai/ai/embedder'; +import { PluginProvider } from '@genkit-ai/core'; +import * as z from 'zod'; + +/** + * Copyright 2024 The Fire Company + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +interface Params { + indexName: string; + embedder: EmbedderArgument; + embedderOptions?: z.infer; +} +declare function convexVectorstore(params: Params[]): PluginProvider; + +export { convexVectorstore, convexVectorstore as default }; diff --git a/plugins/convex/lib/index.d.ts b/plugins/convex/lib/index.d.ts new file mode 100644 index 00000000..c33dbd34 --- /dev/null +++ b/plugins/convex/lib/index.d.ts @@ -0,0 +1,28 @@ +import { EmbedderArgument } from '@genkit-ai/ai/embedder'; +import { PluginProvider } from '@genkit-ai/core'; +import * as z from 'zod'; + +/** + * Copyright 2024 The Fire Company + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +interface Params { + indexName: string; + embedder: EmbedderArgument; + embedderOptions?: z.infer; +} +declare function convexVectorstore(params: Params[]): PluginProvider; + +export { convexVectorstore, convexVectorstore as default }; diff --git a/plugins/convex/lib/index.js b/plugins/convex/lib/index.js new file mode 100644 index 00000000..8048552d --- /dev/null +++ b/plugins/convex/lib/index.js @@ -0,0 +1,62 @@ +"use strict"; +var __defProp = Object.defineProperty; +var __getOwnPropDesc = Object.getOwnPropertyDescriptor; +var __getOwnPropNames = Object.getOwnPropertyNames; +var __hasOwnProp = Object.prototype.hasOwnProperty; +var __export = (target, all) => { + for (var name in all) + __defProp(target, name, { get: all[name], enumerable: true }); +}; +var __copyProps = (to, from, except, desc) => { + if (from && typeof from === "object" || typeof from === "function") { + for (let key of __getOwnPropNames(from)) + if (!__hasOwnProp.call(to, key) && key !== except) + __defProp(to, key, { get: () => from[key], enumerable: !(desc = __getOwnPropDesc(from, key)) || desc.enumerable }); + } + return to; +}; +var __toCommonJS = (mod) => __copyProps(__defProp({}, "__esModule", { value: true }), mod); +var __async = (__this, __arguments, generator) => { + return new Promise((resolve, reject) => { + var fulfilled = (value) => { + try { + step(generator.next(value)); + } catch (e) { + reject(e); + } + }; + var rejected = (value) => { + try { + step(generator.throw(value)); + } catch (e) { + reject(e); + } + }; + var step = (x) => x.done ? resolve(x.value) : Promise.resolve(x.value).then(fulfilled, rejected); + step((generator = generator.apply(__this, __arguments)).next()); + }); +}; +var src_exports = {}; +__export(src_exports, { + convexVectorstore: () => convexVectorstore, + default: () => src_default +}); +module.exports = __toCommonJS(src_exports); +var import_core = require("@genkit-ai/core"); +function convexVectorstore(params) { + const plugin = (0, import_core.genkitPlugin)( + "convexVectorstore", + (params2) => __async(this, null, function* () { + return { + // retrievers: params.map((p) => configureDevLocalRetriever(p)), + }; + }) + ); + return plugin(params); +} +var src_default = convexVectorstore; +// Annotate the CommonJS export names for ESM import in node: +0 && (module.exports = { + convexVectorstore +}); +//# sourceMappingURL=index.js.map \ No newline at end of file diff --git a/plugins/convex/lib/index.js.map b/plugins/convex/lib/index.js.map new file mode 100644 index 00000000..e1200a9a --- /dev/null +++ b/plugins/convex/lib/index.js.map @@ -0,0 +1 @@ +{"version":3,"sources":["../src/index.ts"],"sourcesContent":["/**\n * Copyright 2024 The Fire Company\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\nimport { EmbedderArgument } from '@genkit-ai/ai/embedder';\n\nimport { genkitPlugin, PluginProvider } from '@genkit-ai/core';\nimport * as z from 'zod';\n\ninterface Params {\n indexName: string;\n embedder: EmbedderArgument;\n embedderOptions?: z.infer;\n}\n\nexport function convexVectorstore(\n params: Params[]\n): PluginProvider {\n const plugin = genkitPlugin(\n 'convexVectorstore',\n async (params: Params[]) => ({\n // retrievers: params.map((p) => configureDevLocalRetriever(p)),\n })\n );\n return plugin(params);\n}\n\nexport default convexVectorstore;\n"],"mappings":";;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAkBA,kBAA6C;AAStC,SAAS,kBACd,QACgB;AAChB,QAAM,aAAS;AAAA,IACb;AAAA,IACA,CAAOA,YAAyC;AAAI;AAAA;AAAA,MAEpD;AAAA;AAAA,EACF;AACA,SAAO,OAAO,MAAM;AACtB;AAEA,IAAO,cAAQ;","names":["params"]} \ No newline at end of file diff --git a/plugins/convex/lib/index.mjs b/plugins/convex/lib/index.mjs new file mode 100644 index 00000000..048045a5 --- /dev/null +++ b/plugins/convex/lib/index.mjs @@ -0,0 +1,38 @@ +var __async = (__this, __arguments, generator) => { + return new Promise((resolve, reject) => { + var fulfilled = (value) => { + try { + step(generator.next(value)); + } catch (e) { + reject(e); + } + }; + var rejected = (value) => { + try { + step(generator.throw(value)); + } catch (e) { + reject(e); + } + }; + var step = (x) => x.done ? resolve(x.value) : Promise.resolve(x.value).then(fulfilled, rejected); + step((generator = generator.apply(__this, __arguments)).next()); + }); +}; +import { genkitPlugin } from "@genkit-ai/core"; +function convexVectorstore(params) { + const plugin = genkitPlugin( + "convexVectorstore", + (params2) => __async(this, null, function* () { + return { + // retrievers: params.map((p) => configureDevLocalRetriever(p)), + }; + }) + ); + return plugin(params); +} +var src_default = convexVectorstore; +export { + convexVectorstore, + src_default as default +}; +//# sourceMappingURL=index.mjs.map \ No newline at end of file diff --git a/plugins/convex/lib/index.mjs.map b/plugins/convex/lib/index.mjs.map new file mode 100644 index 00000000..6e5cd28d --- /dev/null +++ b/plugins/convex/lib/index.mjs.map @@ -0,0 +1 @@ +{"version":3,"sources":["../src/index.ts"],"sourcesContent":["/**\n * Copyright 2024 The Fire Company\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\nimport { EmbedderArgument } from '@genkit-ai/ai/embedder';\n\nimport { genkitPlugin, PluginProvider } from '@genkit-ai/core';\nimport * as z from 'zod';\n\ninterface Params {\n indexName: string;\n embedder: EmbedderArgument;\n embedderOptions?: z.infer;\n}\n\nexport function convexVectorstore(\n params: Params[]\n): PluginProvider {\n const plugin = genkitPlugin(\n 'convexVectorstore',\n async (params: Params[]) => ({\n // retrievers: params.map((p) => configureDevLocalRetriever(p)),\n })\n );\n return plugin(params);\n}\n\nexport default convexVectorstore;\n"],"mappings":";;;;;;;;;;;;;;;;;;;;AAkBA,SAAS,oBAAoC;AAStC,SAAS,kBACd,QACgB;AAChB,QAAM,SAAS;AAAA,IACb;AAAA,IACA,CAAOA,YAAyC;AAAI;AAAA;AAAA,MAEpD;AAAA;AAAA,EACF;AACA,SAAO,OAAO,MAAM;AACtB;AAEA,IAAO,cAAQ;","names":["params"]} \ No newline at end of file diff --git a/plugins/hnsw/lib/actions/index.d.mts b/plugins/hnsw/lib/actions/index.d.mts new file mode 100644 index 00000000..c3f151d8 --- /dev/null +++ b/plugins/hnsw/lib/actions/index.d.mts @@ -0,0 +1,22 @@ +import { IndexerFlowOptions, PluginOptions, RetrieverFlowOptions } from '../interfaces/index.mjs'; + +/** + * Copyright 2024 The Fire Company + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +declare const hnswIndexerAction: (flowOptions: IndexerFlowOptions, pluginOptions: PluginOptions) => Promise; +declare const hnswRetrieverAction: (flowOptions: RetrieverFlowOptions, pluginOptions: PluginOptions) => Promise; + +export { hnswIndexerAction, hnswRetrieverAction }; diff --git a/plugins/hnsw/lib/actions/index.d.ts b/plugins/hnsw/lib/actions/index.d.ts new file mode 100644 index 00000000..22243545 --- /dev/null +++ b/plugins/hnsw/lib/actions/index.d.ts @@ -0,0 +1,22 @@ +import { IndexerFlowOptions, PluginOptions, RetrieverFlowOptions } from '../interfaces/index.js'; + +/** + * Copyright 2024 The Fire Company + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +declare const hnswIndexerAction: (flowOptions: IndexerFlowOptions, pluginOptions: PluginOptions) => Promise; +declare const hnswRetrieverAction: (flowOptions: RetrieverFlowOptions, pluginOptions: PluginOptions) => Promise; + +export { hnswIndexerAction, hnswRetrieverAction }; diff --git a/plugins/hnsw/lib/actions/index.js b/plugins/hnsw/lib/actions/index.js new file mode 100644 index 00000000..b8ab72f0 --- /dev/null +++ b/plugins/hnsw/lib/actions/index.js @@ -0,0 +1,46 @@ +"use strict"; +var __defProp = Object.defineProperty; +var __getOwnPropDesc = Object.getOwnPropertyDescriptor; +var __getOwnPropNames = Object.getOwnPropertyNames; +var __hasOwnProp = Object.prototype.hasOwnProperty; +var __export = (target, all) => { + for (var name in all) + __defProp(target, name, { get: all[name], enumerable: true }); +}; +var __copyProps = (to, from, except, desc) => { + if (from && typeof from === "object" || typeof from === "function") { + for (let key of __getOwnPropNames(from)) + if (!__hasOwnProp.call(to, key) && key !== except) + __defProp(to, key, { get: () => from[key], enumerable: !(desc = __getOwnPropDesc(from, key)) || desc.enumerable }); + } + return to; +}; +var __toCommonJS = (mod) => __copyProps(__defProp({}, "__esModule", { value: true }), mod); +var actions_exports = {}; +__export(actions_exports, { + hnswIndexerAction: () => hnswIndexerAction, + hnswRetrieverAction: () => hnswRetrieverAction +}); +module.exports = __toCommonJS(actions_exports); +var import_indexer = require("./../indexer"); +var import_retriever = require("./../retriever"); +const hnswIndexerAction = async (flowOptions, pluginOptions) => { + try { + return await (0, import_indexer.saveVectorIndexer)(flowOptions, pluginOptions); + } catch (error) { + return `Vector index saving error, ${error}`; + } +}; +const hnswRetrieverAction = async (flowOptions, pluginOptions) => { + try { + return await (0, import_retriever.retrieveResponseWithVector)(flowOptions, pluginOptions); + } catch (error) { + return `Error generating prompt response, ${error}`; + } +}; +// Annotate the CommonJS export names for ESM import in node: +0 && (module.exports = { + hnswIndexerAction, + hnswRetrieverAction +}); +//# sourceMappingURL=index.js.map \ No newline at end of file diff --git a/plugins/hnsw/lib/actions/index.js.map b/plugins/hnsw/lib/actions/index.js.map new file mode 100644 index 00000000..b4ca6004 --- /dev/null +++ b/plugins/hnsw/lib/actions/index.js.map @@ -0,0 +1 @@ +{"version":3,"sources":["../../src/actions/index.ts"],"sourcesContent":["/**\n * Copyright 2024 The Fire Company\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\nimport { saveVectorIndexer } from './../indexer';\nimport { retrieveResponseWithVector } from './../retriever';\nimport {\n RetrieverFlowOptions,\n PluginOptions,\n IndexerFlowOptions,\n} from './../interfaces';\n\nexport const hnswIndexerAction = async (\n flowOptions: IndexerFlowOptions,\n pluginOptions: PluginOptions\n) => {\n try {\n return await saveVectorIndexer(flowOptions, pluginOptions);\n } catch (error) {\n return `Vector index saving error, ${error}`;\n }\n};\n\nexport const hnswRetrieverAction = async (\n flowOptions: RetrieverFlowOptions,\n pluginOptions: PluginOptions\n) => {\n try {\n return await retrieveResponseWithVector(flowOptions, pluginOptions);\n } catch (error) {\n return `Error generating prompt response, ${error}`;\n }\n};\n"],"mappings":";;;;;;;;;;;;;;;;;;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAgBA,qBAAkC;AAClC,uBAA2C;AAOpC,MAAM,oBAAoB,OAC/B,aACA,kBACG;AACH,MAAI;AACF,WAAO,UAAM,kCAAkB,aAAa,aAAa;AAAA,EAC3D,SAAS,OAAO;AACd,WAAO,8BAA8B,KAAK;AAAA,EAC5C;AACF;AAEO,MAAM,sBAAsB,OACjC,aACA,kBACG;AACH,MAAI;AACF,WAAO,UAAM,6CAA2B,aAAa,aAAa;AAAA,EACpE,SAAS,OAAO;AACd,WAAO,qCAAqC,KAAK;AAAA,EACnD;AACF;","names":[]} \ No newline at end of file diff --git a/plugins/hnsw/lib/actions/index.mjs b/plugins/hnsw/lib/actions/index.mjs new file mode 100644 index 00000000..83d5fff9 --- /dev/null +++ b/plugins/hnsw/lib/actions/index.mjs @@ -0,0 +1,21 @@ +import { saveVectorIndexer } from "./../indexer"; +import { retrieveResponseWithVector } from "./../retriever"; +const hnswIndexerAction = async (flowOptions, pluginOptions) => { + try { + return await saveVectorIndexer(flowOptions, pluginOptions); + } catch (error) { + return `Vector index saving error, ${error}`; + } +}; +const hnswRetrieverAction = async (flowOptions, pluginOptions) => { + try { + return await retrieveResponseWithVector(flowOptions, pluginOptions); + } catch (error) { + return `Error generating prompt response, ${error}`; + } +}; +export { + hnswIndexerAction, + hnswRetrieverAction +}; +//# sourceMappingURL=index.mjs.map \ No newline at end of file diff --git a/plugins/hnsw/lib/actions/index.mjs.map b/plugins/hnsw/lib/actions/index.mjs.map new file mode 100644 index 00000000..24a7f4d3 --- /dev/null +++ b/plugins/hnsw/lib/actions/index.mjs.map @@ -0,0 +1 @@ +{"version":3,"sources":["../../src/actions/index.ts"],"sourcesContent":["/**\n * Copyright 2024 The Fire Company\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\nimport { saveVectorIndexer } from './../indexer';\nimport { retrieveResponseWithVector } from './../retriever';\nimport {\n RetrieverFlowOptions,\n PluginOptions,\n IndexerFlowOptions,\n} from './../interfaces';\n\nexport const hnswIndexerAction = async (\n flowOptions: IndexerFlowOptions,\n pluginOptions: PluginOptions\n) => {\n try {\n return await saveVectorIndexer(flowOptions, pluginOptions);\n } catch (error) {\n return `Vector index saving error, ${error}`;\n }\n};\n\nexport const hnswRetrieverAction = async (\n flowOptions: RetrieverFlowOptions,\n pluginOptions: PluginOptions\n) => {\n try {\n return await retrieveResponseWithVector(flowOptions, pluginOptions);\n } catch (error) {\n return `Error generating prompt response, ${error}`;\n }\n};\n"],"mappings":"AAgBA,SAAS,yBAAyB;AAClC,SAAS,kCAAkC;AAOpC,MAAM,oBAAoB,OAC/B,aACA,kBACG;AACH,MAAI;AACF,WAAO,MAAM,kBAAkB,aAAa,aAAa;AAAA,EAC3D,SAAS,OAAO;AACd,WAAO,8BAA8B,KAAK;AAAA,EAC5C;AACF;AAEO,MAAM,sBAAsB,OACjC,aACA,kBACG;AACH,MAAI;AACF,WAAO,MAAM,2BAA2B,aAAa,aAAa;AAAA,EACpE,SAAS,OAAO;AACd,WAAO,qCAAqC,KAAK;AAAA,EACnD;AACF;","names":[]} \ No newline at end of file diff --git a/plugins/hnsw/lib/config/index.d.mts b/plugins/hnsw/lib/config/index.d.mts new file mode 100644 index 00000000..a88edfdb --- /dev/null +++ b/plugins/hnsw/lib/config/index.d.mts @@ -0,0 +1,48 @@ +import * as z from 'zod'; + +/** + * Copyright 2024 The Fire Company + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +declare const indexerFlowConfig: { + name: string; + inputSchema: z.ZodObject<{ + dataPath: z.ZodString; + indexOutputPath: z.ZodString; + }, "strip", z.ZodTypeAny, { + indexOutputPath: string; + dataPath: string; + }, { + indexOutputPath: string; + dataPath: string; + }>; + outputSchema: z.ZodString; +}; +declare const retrieverflowConfig: { + name: string; + inputSchema: z.ZodObject<{ + prompt: z.ZodString; + indexPath: z.ZodString; + }, "strip", z.ZodTypeAny, { + prompt: string; + indexPath: string; + }, { + prompt: string; + indexPath: string; + }>; + outputSchema: z.ZodString; +}; + +export { indexerFlowConfig, retrieverflowConfig }; diff --git a/plugins/hnsw/lib/config/index.d.ts b/plugins/hnsw/lib/config/index.d.ts new file mode 100644 index 00000000..a88edfdb --- /dev/null +++ b/plugins/hnsw/lib/config/index.d.ts @@ -0,0 +1,48 @@ +import * as z from 'zod'; + +/** + * Copyright 2024 The Fire Company + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +declare const indexerFlowConfig: { + name: string; + inputSchema: z.ZodObject<{ + dataPath: z.ZodString; + indexOutputPath: z.ZodString; + }, "strip", z.ZodTypeAny, { + indexOutputPath: string; + dataPath: string; + }, { + indexOutputPath: string; + dataPath: string; + }>; + outputSchema: z.ZodString; +}; +declare const retrieverflowConfig: { + name: string; + inputSchema: z.ZodObject<{ + prompt: z.ZodString; + indexPath: z.ZodString; + }, "strip", z.ZodTypeAny, { + prompt: string; + indexPath: string; + }, { + prompt: string; + indexPath: string; + }>; + outputSchema: z.ZodString; +}; + +export { indexerFlowConfig, retrieverflowConfig }; diff --git a/plugins/hnsw/lib/config/index.js b/plugins/hnsw/lib/config/index.js new file mode 100644 index 00000000..796bbf00 --- /dev/null +++ b/plugins/hnsw/lib/config/index.js @@ -0,0 +1,58 @@ +"use strict"; +var __create = Object.create; +var __defProp = Object.defineProperty; +var __getOwnPropDesc = Object.getOwnPropertyDescriptor; +var __getOwnPropNames = Object.getOwnPropertyNames; +var __getProtoOf = Object.getPrototypeOf; +var __hasOwnProp = Object.prototype.hasOwnProperty; +var __export = (target, all) => { + for (var name in all) + __defProp(target, name, { get: all[name], enumerable: true }); +}; +var __copyProps = (to, from, except, desc) => { + if (from && typeof from === "object" || typeof from === "function") { + for (let key of __getOwnPropNames(from)) + if (!__hasOwnProp.call(to, key) && key !== except) + __defProp(to, key, { get: () => from[key], enumerable: !(desc = __getOwnPropDesc(from, key)) || desc.enumerable }); + } + return to; +}; +var __toESM = (mod, isNodeMode, target) => (target = mod != null ? __create(__getProtoOf(mod)) : {}, __copyProps( + // If the importer is in node compatibility mode or this is not an ESM + // file that has been converted to a CommonJS file using a Babel- + // compatible transform (i.e. "__esModule" has not been set), then set + // "default" to the CommonJS "module.exports" for node compatibility. + isNodeMode || !mod || !mod.__esModule ? __defProp(target, "default", { value: mod, enumerable: true }) : target, + mod +)); +var __toCommonJS = (mod) => __copyProps(__defProp({}, "__esModule", { value: true }), mod); +var config_exports = {}; +__export(config_exports, { + indexerFlowConfig: () => indexerFlowConfig, + retrieverflowConfig: () => retrieverflowConfig +}); +module.exports = __toCommonJS(config_exports); +var z = __toESM(require("zod")); +var import_constants = require("./../constants"); +const indexerFlowConfig = { + name: import_constants.FLOW_NAME_INDEXER, + inputSchema: z.object({ + dataPath: z.string().describe(import_constants.SCHEMA_TRAINABLE_PATH), + indexOutputPath: z.string().describe(import_constants.SCHEMA_INDEX_OUTPUT_PATH) + }), + outputSchema: z.string().describe(import_constants.SCHEMA_RESULT) +}; +const retrieverflowConfig = { + name: import_constants.FLOW_NAME_RETRIEVER, + inputSchema: z.object({ + prompt: z.string().describe(import_constants.SCHEMA_PROMPT), + indexPath: z.string().describe(import_constants.SCHEMA_INDEX_PATH) + }), + outputSchema: z.string().describe(import_constants.SCHEMA_RESULT) +}; +// Annotate the CommonJS export names for ESM import in node: +0 && (module.exports = { + indexerFlowConfig, + retrieverflowConfig +}); +//# sourceMappingURL=index.js.map \ No newline at end of file diff --git a/plugins/hnsw/lib/config/index.js.map b/plugins/hnsw/lib/config/index.js.map new file mode 100644 index 00000000..c0cb9f8f --- /dev/null +++ b/plugins/hnsw/lib/config/index.js.map @@ -0,0 +1 @@ +{"version":3,"sources":["../../src/config/index.ts"],"sourcesContent":["/**\n * Copyright 2024 The Fire Company\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\nimport * as z from 'zod';\n\nimport {\n FLOW_NAME_INDEXER,\n FLOW_NAME_RETRIEVER,\n SCHEMA_PROMPT,\n SCHEMA_INDEX_PATH,\n SCHEMA_RESULT,\n SCHEMA_TRAINABLE_PATH,\n SCHEMA_INDEX_OUTPUT_PATH,\n} from './../constants';\n\nexport const indexerFlowConfig = {\n name: FLOW_NAME_INDEXER,\n inputSchema: z.object({\n dataPath: z.string().describe(SCHEMA_TRAINABLE_PATH),\n indexOutputPath: z.string().describe(SCHEMA_INDEX_OUTPUT_PATH),\n }),\n outputSchema: z.string().describe(SCHEMA_RESULT),\n};\n\nexport const retrieverflowConfig = {\n name: FLOW_NAME_RETRIEVER,\n inputSchema: z.object({\n prompt: z.string().describe(SCHEMA_PROMPT),\n indexPath: z.string().describe(SCHEMA_INDEX_PATH),\n }),\n outputSchema: z.string().describe(SCHEMA_RESULT),\n};\n"],"mappings":";;;;;;;;;;;;;;;;;;;;;;;;;;;;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAgBA,QAAmB;AAEnB,uBAQO;AAEA,MAAM,oBAAoB;AAAA,EAC/B,MAAM;AAAA,EACN,aAAa,EAAE,OAAO;AAAA,IACpB,UAAU,EAAE,OAAO,EAAE,SAAS,sCAAqB;AAAA,IACnD,iBAAiB,EAAE,OAAO,EAAE,SAAS,yCAAwB;AAAA,EAC/D,CAAC;AAAA,EACD,cAAc,EAAE,OAAO,EAAE,SAAS,8BAAa;AACjD;AAEO,MAAM,sBAAsB;AAAA,EACjC,MAAM;AAAA,EACN,aAAa,EAAE,OAAO;AAAA,IACpB,QAAQ,EAAE,OAAO,EAAE,SAAS,8BAAa;AAAA,IACzC,WAAW,EAAE,OAAO,EAAE,SAAS,kCAAiB;AAAA,EAClD,CAAC;AAAA,EACD,cAAc,EAAE,OAAO,EAAE,SAAS,8BAAa;AACjD;","names":[]} \ No newline at end of file diff --git a/plugins/hnsw/lib/config/index.mjs b/plugins/hnsw/lib/config/index.mjs new file mode 100644 index 00000000..fd9b55ab --- /dev/null +++ b/plugins/hnsw/lib/config/index.mjs @@ -0,0 +1,31 @@ +import * as z from "zod"; +import { + FLOW_NAME_INDEXER, + FLOW_NAME_RETRIEVER, + SCHEMA_PROMPT, + SCHEMA_INDEX_PATH, + SCHEMA_RESULT, + SCHEMA_TRAINABLE_PATH, + SCHEMA_INDEX_OUTPUT_PATH +} from "./../constants"; +const indexerFlowConfig = { + name: FLOW_NAME_INDEXER, + inputSchema: z.object({ + dataPath: z.string().describe(SCHEMA_TRAINABLE_PATH), + indexOutputPath: z.string().describe(SCHEMA_INDEX_OUTPUT_PATH) + }), + outputSchema: z.string().describe(SCHEMA_RESULT) +}; +const retrieverflowConfig = { + name: FLOW_NAME_RETRIEVER, + inputSchema: z.object({ + prompt: z.string().describe(SCHEMA_PROMPT), + indexPath: z.string().describe(SCHEMA_INDEX_PATH) + }), + outputSchema: z.string().describe(SCHEMA_RESULT) +}; +export { + indexerFlowConfig, + retrieverflowConfig +}; +//# sourceMappingURL=index.mjs.map \ No newline at end of file diff --git a/plugins/hnsw/lib/config/index.mjs.map b/plugins/hnsw/lib/config/index.mjs.map new file mode 100644 index 00000000..75563cfd --- /dev/null +++ b/plugins/hnsw/lib/config/index.mjs.map @@ -0,0 +1 @@ +{"version":3,"sources":["../../src/config/index.ts"],"sourcesContent":["/**\n * Copyright 2024 The Fire Company\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\nimport * as z from 'zod';\n\nimport {\n FLOW_NAME_INDEXER,\n FLOW_NAME_RETRIEVER,\n SCHEMA_PROMPT,\n SCHEMA_INDEX_PATH,\n SCHEMA_RESULT,\n SCHEMA_TRAINABLE_PATH,\n SCHEMA_INDEX_OUTPUT_PATH,\n} from './../constants';\n\nexport const indexerFlowConfig = {\n name: FLOW_NAME_INDEXER,\n inputSchema: z.object({\n dataPath: z.string().describe(SCHEMA_TRAINABLE_PATH),\n indexOutputPath: z.string().describe(SCHEMA_INDEX_OUTPUT_PATH),\n }),\n outputSchema: z.string().describe(SCHEMA_RESULT),\n};\n\nexport const retrieverflowConfig = {\n name: FLOW_NAME_RETRIEVER,\n inputSchema: z.object({\n prompt: z.string().describe(SCHEMA_PROMPT),\n indexPath: z.string().describe(SCHEMA_INDEX_PATH),\n }),\n outputSchema: z.string().describe(SCHEMA_RESULT),\n};\n"],"mappings":"AAgBA,YAAY,OAAO;AAEnB;AAAA,EACE;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,OACK;AAEA,MAAM,oBAAoB;AAAA,EAC/B,MAAM;AAAA,EACN,aAAa,EAAE,OAAO;AAAA,IACpB,UAAU,EAAE,OAAO,EAAE,SAAS,qBAAqB;AAAA,IACnD,iBAAiB,EAAE,OAAO,EAAE,SAAS,wBAAwB;AAAA,EAC/D,CAAC;AAAA,EACD,cAAc,EAAE,OAAO,EAAE,SAAS,aAAa;AACjD;AAEO,MAAM,sBAAsB;AAAA,EACjC,MAAM;AAAA,EACN,aAAa,EAAE,OAAO;AAAA,IACpB,QAAQ,EAAE,OAAO,EAAE,SAAS,aAAa;AAAA,IACzC,WAAW,EAAE,OAAO,EAAE,SAAS,iBAAiB;AAAA,EAClD,CAAC;AAAA,EACD,cAAc,EAAE,OAAO,EAAE,SAAS,aAAa;AACjD;","names":[]} \ No newline at end of file diff --git a/plugins/hnsw/lib/constants/index.d.mts b/plugins/hnsw/lib/constants/index.d.mts new file mode 100644 index 00000000..c291f128 --- /dev/null +++ b/plugins/hnsw/lib/constants/index.d.mts @@ -0,0 +1,31 @@ +/** + * Copyright 2024 The Fire Company + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +declare const PLUGIN_NAME_INDEXER = "HNSW Indexer"; +declare const PLUGIN_NAME_RETRIEVER = "HNSW Retriever"; +declare const FLOW_NAME_INDEXER = "HNSW Indexer"; +declare const FLOW_NAME_RETRIEVER = "HNSW Retriever"; +declare const ERROR_NO_API_KEY = "Must supply either `options.apiKey` or set `GOOGLE_API_KEY` environment variable."; +declare const ERROR_INVALID_ARGUMENT = "INVALID_ARGUMENT"; +declare const SCHEMA_PROMPT = "Type your prompt for the LLM Model and the HNSW Vector to process"; +declare const SCHEMA_INDEX_PATH = "Define Vector Index path you wanna use, can be retrieved from genkitx-hnsw-indexer plugin"; +declare const SCHEMA_RESULT = "The prompt result with more context from HNSW Vector"; +declare const SCHEMA_TRAINABLE_PATH = "Your data and other documents path to be learned by the AI"; +declare const SCHEMA_INDEX_OUTPUT_PATH = "Your expected output path for your Vector Store Index that is processed based on the data and documents you provided"; +declare const EMBEDDING_MODEL_NAME = "Gemini Model embedding-001"; +declare const EMBEDDING_MODEL = "embedding-001"; +declare const EMBEDDING_TITLE = "Gemini embedding-001"; + +export { EMBEDDING_MODEL, EMBEDDING_MODEL_NAME, EMBEDDING_TITLE, ERROR_INVALID_ARGUMENT, ERROR_NO_API_KEY, FLOW_NAME_INDEXER, FLOW_NAME_RETRIEVER, PLUGIN_NAME_INDEXER, PLUGIN_NAME_RETRIEVER, SCHEMA_INDEX_OUTPUT_PATH, SCHEMA_INDEX_PATH, SCHEMA_PROMPT, SCHEMA_RESULT, SCHEMA_TRAINABLE_PATH }; diff --git a/plugins/hnsw/lib/constants/index.d.ts b/plugins/hnsw/lib/constants/index.d.ts new file mode 100644 index 00000000..c291f128 --- /dev/null +++ b/plugins/hnsw/lib/constants/index.d.ts @@ -0,0 +1,31 @@ +/** + * Copyright 2024 The Fire Company + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +declare const PLUGIN_NAME_INDEXER = "HNSW Indexer"; +declare const PLUGIN_NAME_RETRIEVER = "HNSW Retriever"; +declare const FLOW_NAME_INDEXER = "HNSW Indexer"; +declare const FLOW_NAME_RETRIEVER = "HNSW Retriever"; +declare const ERROR_NO_API_KEY = "Must supply either `options.apiKey` or set `GOOGLE_API_KEY` environment variable."; +declare const ERROR_INVALID_ARGUMENT = "INVALID_ARGUMENT"; +declare const SCHEMA_PROMPT = "Type your prompt for the LLM Model and the HNSW Vector to process"; +declare const SCHEMA_INDEX_PATH = "Define Vector Index path you wanna use, can be retrieved from genkitx-hnsw-indexer plugin"; +declare const SCHEMA_RESULT = "The prompt result with more context from HNSW Vector"; +declare const SCHEMA_TRAINABLE_PATH = "Your data and other documents path to be learned by the AI"; +declare const SCHEMA_INDEX_OUTPUT_PATH = "Your expected output path for your Vector Store Index that is processed based on the data and documents you provided"; +declare const EMBEDDING_MODEL_NAME = "Gemini Model embedding-001"; +declare const EMBEDDING_MODEL = "embedding-001"; +declare const EMBEDDING_TITLE = "Gemini embedding-001"; + +export { EMBEDDING_MODEL, EMBEDDING_MODEL_NAME, EMBEDDING_TITLE, ERROR_INVALID_ARGUMENT, ERROR_NO_API_KEY, FLOW_NAME_INDEXER, FLOW_NAME_RETRIEVER, PLUGIN_NAME_INDEXER, PLUGIN_NAME_RETRIEVER, SCHEMA_INDEX_OUTPUT_PATH, SCHEMA_INDEX_PATH, SCHEMA_PROMPT, SCHEMA_RESULT, SCHEMA_TRAINABLE_PATH }; diff --git a/plugins/hnsw/lib/constants/index.js b/plugins/hnsw/lib/constants/index.js new file mode 100644 index 00000000..a1d5dea8 --- /dev/null +++ b/plugins/hnsw/lib/constants/index.js @@ -0,0 +1,68 @@ +"use strict"; +var __defProp = Object.defineProperty; +var __getOwnPropDesc = Object.getOwnPropertyDescriptor; +var __getOwnPropNames = Object.getOwnPropertyNames; +var __hasOwnProp = Object.prototype.hasOwnProperty; +var __export = (target, all) => { + for (var name in all) + __defProp(target, name, { get: all[name], enumerable: true }); +}; +var __copyProps = (to, from, except, desc) => { + if (from && typeof from === "object" || typeof from === "function") { + for (let key of __getOwnPropNames(from)) + if (!__hasOwnProp.call(to, key) && key !== except) + __defProp(to, key, { get: () => from[key], enumerable: !(desc = __getOwnPropDesc(from, key)) || desc.enumerable }); + } + return to; +}; +var __toCommonJS = (mod) => __copyProps(__defProp({}, "__esModule", { value: true }), mod); +var constants_exports = {}; +__export(constants_exports, { + EMBEDDING_MODEL: () => EMBEDDING_MODEL, + EMBEDDING_MODEL_NAME: () => EMBEDDING_MODEL_NAME, + EMBEDDING_TITLE: () => EMBEDDING_TITLE, + ERROR_INVALID_ARGUMENT: () => ERROR_INVALID_ARGUMENT, + ERROR_NO_API_KEY: () => ERROR_NO_API_KEY, + FLOW_NAME_INDEXER: () => FLOW_NAME_INDEXER, + FLOW_NAME_RETRIEVER: () => FLOW_NAME_RETRIEVER, + PLUGIN_NAME_INDEXER: () => PLUGIN_NAME_INDEXER, + PLUGIN_NAME_RETRIEVER: () => PLUGIN_NAME_RETRIEVER, + SCHEMA_INDEX_OUTPUT_PATH: () => SCHEMA_INDEX_OUTPUT_PATH, + SCHEMA_INDEX_PATH: () => SCHEMA_INDEX_PATH, + SCHEMA_PROMPT: () => SCHEMA_PROMPT, + SCHEMA_RESULT: () => SCHEMA_RESULT, + SCHEMA_TRAINABLE_PATH: () => SCHEMA_TRAINABLE_PATH +}); +module.exports = __toCommonJS(constants_exports); +const PLUGIN_NAME_INDEXER = "HNSW Indexer"; +const PLUGIN_NAME_RETRIEVER = "HNSW Retriever"; +const FLOW_NAME_INDEXER = "HNSW Indexer"; +const FLOW_NAME_RETRIEVER = "HNSW Retriever"; +const ERROR_NO_API_KEY = "Must supply either `options.apiKey` or set `GOOGLE_API_KEY` environment variable."; +const ERROR_INVALID_ARGUMENT = "INVALID_ARGUMENT"; +const SCHEMA_PROMPT = "Type your prompt for the LLM Model and the HNSW Vector to process"; +const SCHEMA_INDEX_PATH = "Define Vector Index path you wanna use, can be retrieved from genkitx-hnsw-indexer plugin"; +const SCHEMA_RESULT = "The prompt result with more context from HNSW Vector"; +const SCHEMA_TRAINABLE_PATH = "Your data and other documents path to be learned by the AI"; +const SCHEMA_INDEX_OUTPUT_PATH = "Your expected output path for your Vector Store Index that is processed based on the data and documents you provided"; +const EMBEDDING_MODEL_NAME = "Gemini Model embedding-001"; +const EMBEDDING_MODEL = "embedding-001"; +const EMBEDDING_TITLE = "Gemini embedding-001"; +// Annotate the CommonJS export names for ESM import in node: +0 && (module.exports = { + EMBEDDING_MODEL, + EMBEDDING_MODEL_NAME, + EMBEDDING_TITLE, + ERROR_INVALID_ARGUMENT, + ERROR_NO_API_KEY, + FLOW_NAME_INDEXER, + FLOW_NAME_RETRIEVER, + PLUGIN_NAME_INDEXER, + PLUGIN_NAME_RETRIEVER, + SCHEMA_INDEX_OUTPUT_PATH, + SCHEMA_INDEX_PATH, + SCHEMA_PROMPT, + SCHEMA_RESULT, + SCHEMA_TRAINABLE_PATH +}); +//# sourceMappingURL=index.js.map \ No newline at end of file diff --git a/plugins/hnsw/lib/constants/index.js.map b/plugins/hnsw/lib/constants/index.js.map new file mode 100644 index 00000000..79b8d9d2 --- /dev/null +++ b/plugins/hnsw/lib/constants/index.js.map @@ -0,0 +1 @@ +{"version":3,"sources":["../../src/constants/index.ts"],"sourcesContent":["/**\n * Copyright 2024 The Fire Company\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\nexport const PLUGIN_NAME_INDEXER = 'HNSW Indexer';\nexport const PLUGIN_NAME_RETRIEVER = 'HNSW Retriever';\nexport const FLOW_NAME_INDEXER = 'HNSW Indexer';\nexport const FLOW_NAME_RETRIEVER = 'HNSW Retriever';\nexport const ERROR_NO_API_KEY =\n 'Must supply either `options.apiKey` or set `GOOGLE_API_KEY` environment variable.';\nexport const ERROR_INVALID_ARGUMENT = 'INVALID_ARGUMENT';\nexport const SCHEMA_PROMPT =\n 'Type your prompt for the LLM Model and the HNSW Vector to process';\nexport const SCHEMA_INDEX_PATH =\n 'Define Vector Index path you wanna use, can be retrieved from genkitx-hnsw-indexer plugin';\nexport const SCHEMA_RESULT =\n 'The prompt result with more context from HNSW Vector';\nexport const SCHEMA_TRAINABLE_PATH =\n 'Your data and other documents path to be learned by the AI';\nexport const SCHEMA_INDEX_OUTPUT_PATH =\n 'Your expected output path for your Vector Store Index that is processed based on the data and documents you provided';\nexport const EMBEDDING_MODEL_NAME = 'Gemini Model embedding-001';\nexport const EMBEDDING_MODEL = 'embedding-001';\nexport const EMBEDDING_TITLE = 'Gemini embedding-001';\n"],"mappings":";;;;;;;;;;;;;;;;;;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAgBO,MAAM,sBAAsB;AAC5B,MAAM,wBAAwB;AAC9B,MAAM,oBAAoB;AAC1B,MAAM,sBAAsB;AAC5B,MAAM,mBACX;AACK,MAAM,yBAAyB;AAC/B,MAAM,gBACX;AACK,MAAM,oBACX;AACK,MAAM,gBACX;AACK,MAAM,wBACX;AACK,MAAM,2BACX;AACK,MAAM,uBAAuB;AAC7B,MAAM,kBAAkB;AACxB,MAAM,kBAAkB;","names":[]} \ No newline at end of file diff --git a/plugins/hnsw/lib/constants/index.mjs b/plugins/hnsw/lib/constants/index.mjs new file mode 100644 index 00000000..b4670480 --- /dev/null +++ b/plugins/hnsw/lib/constants/index.mjs @@ -0,0 +1,31 @@ +const PLUGIN_NAME_INDEXER = "HNSW Indexer"; +const PLUGIN_NAME_RETRIEVER = "HNSW Retriever"; +const FLOW_NAME_INDEXER = "HNSW Indexer"; +const FLOW_NAME_RETRIEVER = "HNSW Retriever"; +const ERROR_NO_API_KEY = "Must supply either `options.apiKey` or set `GOOGLE_API_KEY` environment variable."; +const ERROR_INVALID_ARGUMENT = "INVALID_ARGUMENT"; +const SCHEMA_PROMPT = "Type your prompt for the LLM Model and the HNSW Vector to process"; +const SCHEMA_INDEX_PATH = "Define Vector Index path you wanna use, can be retrieved from genkitx-hnsw-indexer plugin"; +const SCHEMA_RESULT = "The prompt result with more context from HNSW Vector"; +const SCHEMA_TRAINABLE_PATH = "Your data and other documents path to be learned by the AI"; +const SCHEMA_INDEX_OUTPUT_PATH = "Your expected output path for your Vector Store Index that is processed based on the data and documents you provided"; +const EMBEDDING_MODEL_NAME = "Gemini Model embedding-001"; +const EMBEDDING_MODEL = "embedding-001"; +const EMBEDDING_TITLE = "Gemini embedding-001"; +export { + EMBEDDING_MODEL, + EMBEDDING_MODEL_NAME, + EMBEDDING_TITLE, + ERROR_INVALID_ARGUMENT, + ERROR_NO_API_KEY, + FLOW_NAME_INDEXER, + FLOW_NAME_RETRIEVER, + PLUGIN_NAME_INDEXER, + PLUGIN_NAME_RETRIEVER, + SCHEMA_INDEX_OUTPUT_PATH, + SCHEMA_INDEX_PATH, + SCHEMA_PROMPT, + SCHEMA_RESULT, + SCHEMA_TRAINABLE_PATH +}; +//# sourceMappingURL=index.mjs.map \ No newline at end of file diff --git a/plugins/hnsw/lib/constants/index.mjs.map b/plugins/hnsw/lib/constants/index.mjs.map new file mode 100644 index 00000000..59527eab --- /dev/null +++ b/plugins/hnsw/lib/constants/index.mjs.map @@ -0,0 +1 @@ +{"version":3,"sources":["../../src/constants/index.ts"],"sourcesContent":["/**\n * Copyright 2024 The Fire Company\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\nexport const PLUGIN_NAME_INDEXER = 'HNSW Indexer';\nexport const PLUGIN_NAME_RETRIEVER = 'HNSW Retriever';\nexport const FLOW_NAME_INDEXER = 'HNSW Indexer';\nexport const FLOW_NAME_RETRIEVER = 'HNSW Retriever';\nexport const ERROR_NO_API_KEY =\n 'Must supply either `options.apiKey` or set `GOOGLE_API_KEY` environment variable.';\nexport const ERROR_INVALID_ARGUMENT = 'INVALID_ARGUMENT';\nexport const SCHEMA_PROMPT =\n 'Type your prompt for the LLM Model and the HNSW Vector to process';\nexport const SCHEMA_INDEX_PATH =\n 'Define Vector Index path you wanna use, can be retrieved from genkitx-hnsw-indexer plugin';\nexport const SCHEMA_RESULT =\n 'The prompt result with more context from HNSW Vector';\nexport const SCHEMA_TRAINABLE_PATH =\n 'Your data and other documents path to be learned by the AI';\nexport const SCHEMA_INDEX_OUTPUT_PATH =\n 'Your expected output path for your Vector Store Index that is processed based on the data and documents you provided';\nexport const EMBEDDING_MODEL_NAME = 'Gemini Model embedding-001';\nexport const EMBEDDING_MODEL = 'embedding-001';\nexport const EMBEDDING_TITLE = 'Gemini embedding-001';\n"],"mappings":"AAgBO,MAAM,sBAAsB;AAC5B,MAAM,wBAAwB;AAC9B,MAAM,oBAAoB;AAC1B,MAAM,sBAAsB;AAC5B,MAAM,mBACX;AACK,MAAM,yBAAyB;AAC/B,MAAM,gBACX;AACK,MAAM,oBACX;AACK,MAAM,gBACX;AACK,MAAM,wBACX;AACK,MAAM,2BACX;AACK,MAAM,uBAAuB;AAC7B,MAAM,kBAAkB;AACxB,MAAM,kBAAkB;","names":[]} \ No newline at end of file diff --git a/plugins/hnsw/lib/index.d.mts b/plugins/hnsw/lib/index.d.mts new file mode 100644 index 00000000..4ac5a3c3 --- /dev/null +++ b/plugins/hnsw/lib/index.d.mts @@ -0,0 +1,7 @@ +import * as _genkit_ai_core from '@genkit-ai/core'; +import { PluginOptions } from './interfaces/index.mjs'; + +declare const hnswIndexer: _genkit_ai_core.Plugin<[pluginOptions: PluginOptions]>; +declare const hnswRetriever: _genkit_ai_core.Plugin<[pluginOptions: PluginOptions]>; + +export { hnswIndexer, hnswRetriever }; diff --git a/plugins/hnsw/lib/index.d.ts b/plugins/hnsw/lib/index.d.ts new file mode 100644 index 00000000..1abed650 --- /dev/null +++ b/plugins/hnsw/lib/index.d.ts @@ -0,0 +1,7 @@ +import * as _genkit_ai_core from '@genkit-ai/core'; +import { PluginOptions } from './interfaces/index.js'; + +declare const hnswIndexer: _genkit_ai_core.Plugin<[pluginOptions: PluginOptions]>; +declare const hnswRetriever: _genkit_ai_core.Plugin<[pluginOptions: PluginOptions]>; + +export { hnswIndexer, hnswRetriever }; diff --git a/plugins/hnsw/lib/index.js b/plugins/hnsw/lib/index.js new file mode 100644 index 00000000..7e1811b3 --- /dev/null +++ b/plugins/hnsw/lib/index.js @@ -0,0 +1,56 @@ +"use strict"; +var __defProp = Object.defineProperty; +var __getOwnPropDesc = Object.getOwnPropertyDescriptor; +var __getOwnPropNames = Object.getOwnPropertyNames; +var __hasOwnProp = Object.prototype.hasOwnProperty; +var __export = (target, all) => { + for (var name in all) + __defProp(target, name, { get: all[name], enumerable: true }); +}; +var __copyProps = (to, from, except, desc) => { + if (from && typeof from === "object" || typeof from === "function") { + for (let key of __getOwnPropNames(from)) + if (!__hasOwnProp.call(to, key) && key !== except) + __defProp(to, key, { get: () => from[key], enumerable: !(desc = __getOwnPropDesc(from, key)) || desc.enumerable }); + } + return to; +}; +var __toCommonJS = (mod) => __copyProps(__defProp({}, "__esModule", { value: true }), mod); +var src_exports = {}; +__export(src_exports, { + hnswIndexer: () => hnswIndexer, + hnswRetriever: () => hnswRetriever +}); +module.exports = __toCommonJS(src_exports); +var import_core = require("@genkit-ai/core"); +var import_flow = require("@genkit-ai/flow"); +var import_actions = require("./actions"); +var import_config = require("./config"); +var import_utilities = require("./utilities"); +var import_constants = require("./constants"); +const hnswIndexer = (0, import_core.genkitPlugin)( + import_constants.PLUGIN_NAME_INDEXER, + async (pluginOptions) => { + (0, import_utilities.checkApiKey)(pluginOptions); + (0, import_flow.defineFlow)( + import_config.indexerFlowConfig, + (flowOptions) => (0, import_actions.hnswIndexerAction)(flowOptions, pluginOptions) + ); + } +); +const hnswRetriever = (0, import_core.genkitPlugin)( + import_constants.PLUGIN_NAME_RETRIEVER, + async (pluginOptions) => { + (0, import_utilities.checkApiKey)(pluginOptions); + (0, import_flow.defineFlow)( + import_config.retrieverflowConfig, + (flowOptions) => (0, import_actions.hnswRetrieverAction)(flowOptions, pluginOptions) + ); + } +); +// Annotate the CommonJS export names for ESM import in node: +0 && (module.exports = { + hnswIndexer, + hnswRetriever +}); +//# sourceMappingURL=index.js.map \ No newline at end of file diff --git a/plugins/hnsw/lib/index.js.map b/plugins/hnsw/lib/index.js.map new file mode 100644 index 00000000..76d79364 --- /dev/null +++ b/plugins/hnsw/lib/index.js.map @@ -0,0 +1 @@ +{"version":3,"sources":["../src/index.ts"],"sourcesContent":["/**\n * Copyright 2024 The Fire Company\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\nimport { genkitPlugin } from '@genkit-ai/core';\nimport { defineFlow } from '@genkit-ai/flow';\n\nimport { PluginOptions } from './interfaces';\nimport { hnswIndexerAction, hnswRetrieverAction } from './actions';\nimport { indexerFlowConfig, retrieverflowConfig } from './config';\nimport { checkApiKey } from './utilities';\nimport { PLUGIN_NAME_INDEXER, PLUGIN_NAME_RETRIEVER } from './constants';\n\nexport const hnswIndexer = genkitPlugin(\n PLUGIN_NAME_INDEXER,\n async (pluginOptions: PluginOptions) => {\n checkApiKey(pluginOptions);\n defineFlow(indexerFlowConfig, (flowOptions) =>\n hnswIndexerAction(flowOptions, pluginOptions)\n );\n }\n);\n\nexport const hnswRetriever = genkitPlugin(\n PLUGIN_NAME_RETRIEVER,\n async (pluginOptions: PluginOptions) => {\n checkApiKey(pluginOptions);\n defineFlow(retrieverflowConfig, (flowOptions) =>\n hnswRetrieverAction(flowOptions, pluginOptions)\n );\n }\n);\n"],"mappings":";;;;;;;;;;;;;;;;;;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAgBA,kBAA6B;AAC7B,kBAA2B;AAG3B,qBAAuD;AACvD,oBAAuD;AACvD,uBAA4B;AAC5B,uBAA2D;AAEpD,MAAM,kBAAc;AAAA,EACzB;AAAA,EACA,OAAO,kBAAiC;AACtC,sCAAY,aAAa;AACzB;AAAA,MAAW;AAAA,MAAmB,CAAC,oBAC7B,kCAAkB,aAAa,aAAa;AAAA,IAC9C;AAAA,EACF;AACF;AAEO,MAAM,oBAAgB;AAAA,EAC3B;AAAA,EACA,OAAO,kBAAiC;AACtC,sCAAY,aAAa;AACzB;AAAA,MAAW;AAAA,MAAqB,CAAC,oBAC/B,oCAAoB,aAAa,aAAa;AAAA,IAChD;AAAA,EACF;AACF;","names":[]} \ No newline at end of file diff --git a/plugins/hnsw/lib/index.mjs b/plugins/hnsw/lib/index.mjs new file mode 100644 index 00000000..10e17122 --- /dev/null +++ b/plugins/hnsw/lib/index.mjs @@ -0,0 +1,31 @@ +import { genkitPlugin } from "@genkit-ai/core"; +import { defineFlow } from "@genkit-ai/flow"; +import { hnswIndexerAction, hnswRetrieverAction } from "./actions"; +import { indexerFlowConfig, retrieverflowConfig } from "./config"; +import { checkApiKey } from "./utilities"; +import { PLUGIN_NAME_INDEXER, PLUGIN_NAME_RETRIEVER } from "./constants"; +const hnswIndexer = genkitPlugin( + PLUGIN_NAME_INDEXER, + async (pluginOptions) => { + checkApiKey(pluginOptions); + defineFlow( + indexerFlowConfig, + (flowOptions) => hnswIndexerAction(flowOptions, pluginOptions) + ); + } +); +const hnswRetriever = genkitPlugin( + PLUGIN_NAME_RETRIEVER, + async (pluginOptions) => { + checkApiKey(pluginOptions); + defineFlow( + retrieverflowConfig, + (flowOptions) => hnswRetrieverAction(flowOptions, pluginOptions) + ); + } +); +export { + hnswIndexer, + hnswRetriever +}; +//# sourceMappingURL=index.mjs.map \ No newline at end of file diff --git a/plugins/hnsw/lib/index.mjs.map b/plugins/hnsw/lib/index.mjs.map new file mode 100644 index 00000000..18a8d668 --- /dev/null +++ b/plugins/hnsw/lib/index.mjs.map @@ -0,0 +1 @@ +{"version":3,"sources":["../src/index.ts"],"sourcesContent":["/**\n * Copyright 2024 The Fire Company\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\nimport { genkitPlugin } from '@genkit-ai/core';\nimport { defineFlow } from '@genkit-ai/flow';\n\nimport { PluginOptions } from './interfaces';\nimport { hnswIndexerAction, hnswRetrieverAction } from './actions';\nimport { indexerFlowConfig, retrieverflowConfig } from './config';\nimport { checkApiKey } from './utilities';\nimport { PLUGIN_NAME_INDEXER, PLUGIN_NAME_RETRIEVER } from './constants';\n\nexport const hnswIndexer = genkitPlugin(\n PLUGIN_NAME_INDEXER,\n async (pluginOptions: PluginOptions) => {\n checkApiKey(pluginOptions);\n defineFlow(indexerFlowConfig, (flowOptions) =>\n hnswIndexerAction(flowOptions, pluginOptions)\n );\n }\n);\n\nexport const hnswRetriever = genkitPlugin(\n PLUGIN_NAME_RETRIEVER,\n async (pluginOptions: PluginOptions) => {\n checkApiKey(pluginOptions);\n defineFlow(retrieverflowConfig, (flowOptions) =>\n hnswRetrieverAction(flowOptions, pluginOptions)\n );\n }\n);\n"],"mappings":"AAgBA,SAAS,oBAAoB;AAC7B,SAAS,kBAAkB;AAG3B,SAAS,mBAAmB,2BAA2B;AACvD,SAAS,mBAAmB,2BAA2B;AACvD,SAAS,mBAAmB;AAC5B,SAAS,qBAAqB,6BAA6B;AAEpD,MAAM,cAAc;AAAA,EACzB;AAAA,EACA,OAAO,kBAAiC;AACtC,gBAAY,aAAa;AACzB;AAAA,MAAW;AAAA,MAAmB,CAAC,gBAC7B,kBAAkB,aAAa,aAAa;AAAA,IAC9C;AAAA,EACF;AACF;AAEO,MAAM,gBAAgB;AAAA,EAC3B;AAAA,EACA,OAAO,kBAAiC;AACtC,gBAAY,aAAa;AACzB;AAAA,MAAW;AAAA,MAAqB,CAAC,gBAC/B,oBAAoB,aAAa,aAAa;AAAA,IAChD;AAAA,EACF;AACF;","names":[]} \ No newline at end of file diff --git a/plugins/hnsw/lib/indexer/index.d.mts b/plugins/hnsw/lib/indexer/index.d.mts new file mode 100644 index 00000000..dd15fa62 --- /dev/null +++ b/plugins/hnsw/lib/indexer/index.d.mts @@ -0,0 +1,21 @@ +import { IndexerFlowOptions, PluginOptions } from '../interfaces/index.mjs'; + +/** + * Copyright 2024 The Fire Company + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +declare const saveVectorIndexer: (flowOptions: IndexerFlowOptions, pluginOptions: PluginOptions) => Promise; + +export { saveVectorIndexer }; diff --git a/plugins/hnsw/lib/indexer/index.d.ts b/plugins/hnsw/lib/indexer/index.d.ts new file mode 100644 index 00000000..36517f24 --- /dev/null +++ b/plugins/hnsw/lib/indexer/index.d.ts @@ -0,0 +1,21 @@ +import { IndexerFlowOptions, PluginOptions } from '../interfaces/index.js'; + +/** + * Copyright 2024 The Fire Company + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +declare const saveVectorIndexer: (flowOptions: IndexerFlowOptions, pluginOptions: PluginOptions) => Promise; + +export { saveVectorIndexer }; diff --git a/plugins/hnsw/lib/indexer/index.js b/plugins/hnsw/lib/indexer/index.js new file mode 100644 index 00000000..366fbb55 --- /dev/null +++ b/plugins/hnsw/lib/indexer/index.js @@ -0,0 +1,103 @@ +"use strict"; +var __create = Object.create; +var __defProp = Object.defineProperty; +var __getOwnPropDesc = Object.getOwnPropertyDescriptor; +var __getOwnPropNames = Object.getOwnPropertyNames; +var __getProtoOf = Object.getPrototypeOf; +var __hasOwnProp = Object.prototype.hasOwnProperty; +var __export = (target, all) => { + for (var name in all) + __defProp(target, name, { get: all[name], enumerable: true }); +}; +var __copyProps = (to, from, except, desc) => { + if (from && typeof from === "object" || typeof from === "function") { + for (let key of __getOwnPropNames(from)) + if (!__hasOwnProp.call(to, key) && key !== except) + __defProp(to, key, { get: () => from[key], enumerable: !(desc = __getOwnPropDesc(from, key)) || desc.enumerable }); + } + return to; +}; +var __toESM = (mod, isNodeMode, target) => (target = mod != null ? __create(__getProtoOf(mod)) : {}, __copyProps( + // If the importer is in node compatibility mode or this is not an ESM + // file that has been converted to a CommonJS file using a Babel- + // compatible transform (i.e. "__esModule" has not been set), then set + // "default" to the CommonJS "module.exports" for node compatibility. + isNodeMode || !mod || !mod.__esModule ? __defProp(target, "default", { value: mod, enumerable: true }) : target, + mod +)); +var __toCommonJS = (mod) => __copyProps(__defProp({}, "__esModule", { value: true }), mod); +var indexer_exports = {}; +__export(indexer_exports, { + saveVectorIndexer: () => saveVectorIndexer +}); +module.exports = __toCommonJS(indexer_exports); +var import_google_genai = require("@langchain/google-genai"); +var import_glob = require("glob"); +var import_fs = __toESM(require("fs")); +var import_text_splitter = require("langchain/text_splitter"); +var import_vectorstores = require("langchain/vectorstores"); +var import_generative_ai = require("@google/generative-ai"); +var import_constants = require("../constants"); +const getFilesData = (files) => { + console.log( + `Added ${files.length} files to data. Splitting text into chunks...` + ); + const filesData = []; + for (const file of files) { + filesData.push(import_fs.default.readFileSync(file, "utf-8")); + } + return filesData; +}; +const getFiles = async (input) => { + try { + return (0, import_glob.glob)(input, { ignore: "node_modules/**" }); + } catch (error) { + console.error("Error fetching files:", error); + throw error; + } +}; +const getSplitter = (chunkSize, separator) => { + return new import_text_splitter.CharacterTextSplitter({ + chunkSize: chunkSize || 12720, + separator: separator || "\n" + }); +}; +const saveVectorStore = async (docs, apiKey, output) => { + console.log("Initializing Store..."); + const store = await import_vectorstores.HNSWLib.fromTexts( + docs, + docs.map((_, i) => ({ id: i })), + new import_google_genai.GoogleGenerativeAIEmbeddings({ + apiKey: apiKey || process.env.GOOGLE_API_KEY, + model: import_constants.EMBEDDING_MODEL, + modelName: import_constants.EMBEDDING_MODEL_NAME, + taskType: import_generative_ai.TaskType.RETRIEVAL_DOCUMENT, + title: import_constants.EMBEDDING_TITLE + }) + ); + console.log("Saving Vectorstore"); + await store.save(output); + return `VectorStore saved to ${output}`; +}; +const getVectorDocument = (filesData, textSplitter) => { + let docs = []; + for (const d of filesData) { + const docOutput = textSplitter.splitText(d); + docs = [...docs, ...docOutput]; + } + return docs.splice(docs.length - 4, 4); +}; +const saveVectorIndexer = async (flowOptions, pluginOptions) => { + const { dataPath, indexOutputPath, chunkSize, separator } = flowOptions; + const { apiKey } = pluginOptions; + const files = await getFiles(dataPath); + const filesData = getFilesData(files); + const textSplitter = getSplitter(chunkSize, separator); + const vectorDocument = getVectorDocument(filesData, textSplitter); + return saveVectorStore(vectorDocument, apiKey, indexOutputPath); +}; +// Annotate the CommonJS export names for ESM import in node: +0 && (module.exports = { + saveVectorIndexer +}); +//# sourceMappingURL=index.js.map \ No newline at end of file diff --git a/plugins/hnsw/lib/indexer/index.js.map b/plugins/hnsw/lib/indexer/index.js.map new file mode 100644 index 00000000..af93d576 --- /dev/null +++ b/plugins/hnsw/lib/indexer/index.js.map @@ -0,0 +1 @@ +{"version":3,"sources":["../../src/indexer/index.ts"],"sourcesContent":["/**\n * Copyright 2024 The Fire Company\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\nimport { GoogleGenerativeAIEmbeddings } from '@langchain/google-genai';\nimport { glob } from 'glob';\nimport fs from 'fs';\nimport { CharacterTextSplitter } from 'langchain/text_splitter';\nimport { HNSWLib } from 'langchain/vectorstores';\nimport { TaskType } from '@google/generative-ai';\n\nimport { IndexerFlowOptions, PluginOptions } from '../interfaces';\nimport {\n EMBEDDING_MODEL_NAME,\n EMBEDDING_MODEL,\n EMBEDDING_TITLE,\n} from '../constants';\n\nconst getFilesData = (files: string[]): string[] => {\n console.log(\n `Added ${files.length} files to data. Splitting text into chunks...`\n );\n const filesData: string[] = [];\n for (const file of files) {\n filesData.push(fs.readFileSync(file, 'utf-8'));\n }\n return filesData;\n};\n\nconst getFiles = async (input: string): Promise => {\n try {\n return glob(input, { ignore: 'node_modules/**' });\n } catch (error) {\n console.error('Error fetching files:', error);\n throw error;\n }\n};\n\nconst getSplitter = (\n chunkSize: number | undefined,\n separator: string | undefined\n) => {\n return new CharacterTextSplitter({\n chunkSize: chunkSize || 12720,\n separator: separator || '\\n',\n });\n};\n\nconst saveVectorStore = async (\n docs: string[],\n apiKey: string | undefined,\n output: string\n) => {\n console.log('Initializing Store...');\n const store = await HNSWLib.fromTexts(\n docs,\n docs.map((_: any, i: any) => ({ id: i })),\n new GoogleGenerativeAIEmbeddings({\n apiKey: apiKey || process.env.GOOGLE_API_KEY,\n model: EMBEDDING_MODEL,\n modelName: EMBEDDING_MODEL_NAME,\n taskType: TaskType.RETRIEVAL_DOCUMENT,\n title: EMBEDDING_TITLE,\n })\n );\n console.log('Saving Vectorstore');\n await store.save(output);\n return `VectorStore saved to ${output}`;\n};\n\nconst getVectorDocument = (\n filesData: string[],\n textSplitter: { splitText: (arg0: any) => any }\n) => {\n let docs: string[] = [];\n for (const d of filesData) {\n const docOutput = textSplitter.splitText(d);\n docs = [...docs, ...docOutput];\n }\n return docs.splice(docs.length - 4, 4);\n};\n\nconst saveVectorIndexer = async (\n flowOptions: IndexerFlowOptions,\n pluginOptions: PluginOptions\n) => {\n const { dataPath, indexOutputPath, chunkSize, separator } = flowOptions;\n const { apiKey } = pluginOptions;\n\n const files: string[] = await getFiles(dataPath);\n const filesData = getFilesData(files);\n const textSplitter = getSplitter(chunkSize, separator);\n const vectorDocument = getVectorDocument(filesData, textSplitter);\n\n return saveVectorStore(vectorDocument, apiKey, indexOutputPath);\n};\n\nexport { saveVectorIndexer };\n"],"mappings":";;;;;;;;;;;;;;;;;;;;;;;;;;;;AAAA;AAAA;AAAA;AAAA;AAAA;AAgBA,0BAA6C;AAC7C,kBAAqB;AACrB,gBAAe;AACf,2BAAsC;AACtC,0BAAwB;AACxB,2BAAyB;AAGzB,uBAIO;AAEP,MAAM,eAAe,CAAC,UAA8B;AAClD,UAAQ;AAAA,IACN,SAAS,MAAM,MAAM;AAAA,EACvB;AACA,QAAM,YAAsB,CAAC;AAC7B,aAAW,QAAQ,OAAO;AACxB,cAAU,KAAK,UAAAA,QAAG,aAAa,MAAM,OAAO,CAAC;AAAA,EAC/C;AACA,SAAO;AACT;AAEA,MAAM,WAAW,OAAO,UAAqC;AAC3D,MAAI;AACF,eAAO,kBAAK,OAAO,EAAE,QAAQ,kBAAkB,CAAC;AAAA,EAClD,SAAS,OAAO;AACd,YAAQ,MAAM,yBAAyB,KAAK;AAC5C,UAAM;AAAA,EACR;AACF;AAEA,MAAM,cAAc,CAClB,WACA,cACG;AACH,SAAO,IAAI,2CAAsB;AAAA,IAC/B,WAAW,aAAa;AAAA,IACxB,WAAW,aAAa;AAAA,EAC1B,CAAC;AACH;AAEA,MAAM,kBAAkB,OACtB,MACA,QACA,WACG;AACH,UAAQ,IAAI,uBAAuB;AACnC,QAAM,QAAQ,MAAM,4BAAQ;AAAA,IAC1B;AAAA,IACA,KAAK,IAAI,CAAC,GAAQ,OAAY,EAAE,IAAI,EAAE,EAAE;AAAA,IACxC,IAAI,iDAA6B;AAAA,MAC/B,QAAQ,UAAU,QAAQ,IAAI;AAAA,MAC9B,OAAO;AAAA,MACP,WAAW;AAAA,MACX,UAAU,8BAAS;AAAA,MACnB,OAAO;AAAA,IACT,CAAC;AAAA,EACH;AACA,UAAQ,IAAI,oBAAoB;AAChC,QAAM,MAAM,KAAK,MAAM;AACvB,SAAO,wBAAwB,MAAM;AACvC;AAEA,MAAM,oBAAoB,CACxB,WACA,iBACG;AACH,MAAI,OAAiB,CAAC;AACtB,aAAW,KAAK,WAAW;AACzB,UAAM,YAAY,aAAa,UAAU,CAAC;AAC1C,WAAO,CAAC,GAAG,MAAM,GAAG,SAAS;AAAA,EAC/B;AACA,SAAO,KAAK,OAAO,KAAK,SAAS,GAAG,CAAC;AACvC;AAEA,MAAM,oBAAoB,OACxB,aACA,kBACG;AACH,QAAM,EAAE,UAAU,iBAAiB,WAAW,UAAU,IAAI;AAC5D,QAAM,EAAE,OAAO,IAAI;AAEnB,QAAM,QAAkB,MAAM,SAAS,QAAQ;AAC/C,QAAM,YAAY,aAAa,KAAK;AACpC,QAAM,eAAe,YAAY,WAAW,SAAS;AACrD,QAAM,iBAAiB,kBAAkB,WAAW,YAAY;AAEhE,SAAO,gBAAgB,gBAAgB,QAAQ,eAAe;AAChE;","names":["fs"]} \ No newline at end of file diff --git a/plugins/hnsw/lib/indexer/index.mjs b/plugins/hnsw/lib/indexer/index.mjs new file mode 100644 index 00000000..5834140f --- /dev/null +++ b/plugins/hnsw/lib/indexer/index.mjs @@ -0,0 +1,73 @@ +import { GoogleGenerativeAIEmbeddings } from "@langchain/google-genai"; +import { glob } from "glob"; +import fs from "fs"; +import { CharacterTextSplitter } from "langchain/text_splitter"; +import { HNSWLib } from "langchain/vectorstores"; +import { TaskType } from "@google/generative-ai"; +import { + EMBEDDING_MODEL_NAME, + EMBEDDING_MODEL, + EMBEDDING_TITLE +} from "../constants"; +const getFilesData = (files) => { + console.log( + `Added ${files.length} files to data. Splitting text into chunks...` + ); + const filesData = []; + for (const file of files) { + filesData.push(fs.readFileSync(file, "utf-8")); + } + return filesData; +}; +const getFiles = async (input) => { + try { + return glob(input, { ignore: "node_modules/**" }); + } catch (error) { + console.error("Error fetching files:", error); + throw error; + } +}; +const getSplitter = (chunkSize, separator) => { + return new CharacterTextSplitter({ + chunkSize: chunkSize || 12720, + separator: separator || "\n" + }); +}; +const saveVectorStore = async (docs, apiKey, output) => { + console.log("Initializing Store..."); + const store = await HNSWLib.fromTexts( + docs, + docs.map((_, i) => ({ id: i })), + new GoogleGenerativeAIEmbeddings({ + apiKey: apiKey || process.env.GOOGLE_API_KEY, + model: EMBEDDING_MODEL, + modelName: EMBEDDING_MODEL_NAME, + taskType: TaskType.RETRIEVAL_DOCUMENT, + title: EMBEDDING_TITLE + }) + ); + console.log("Saving Vectorstore"); + await store.save(output); + return `VectorStore saved to ${output}`; +}; +const getVectorDocument = (filesData, textSplitter) => { + let docs = []; + for (const d of filesData) { + const docOutput = textSplitter.splitText(d); + docs = [...docs, ...docOutput]; + } + return docs.splice(docs.length - 4, 4); +}; +const saveVectorIndexer = async (flowOptions, pluginOptions) => { + const { dataPath, indexOutputPath, chunkSize, separator } = flowOptions; + const { apiKey } = pluginOptions; + const files = await getFiles(dataPath); + const filesData = getFilesData(files); + const textSplitter = getSplitter(chunkSize, separator); + const vectorDocument = getVectorDocument(filesData, textSplitter); + return saveVectorStore(vectorDocument, apiKey, indexOutputPath); +}; +export { + saveVectorIndexer +}; +//# sourceMappingURL=index.mjs.map \ No newline at end of file diff --git a/plugins/hnsw/lib/indexer/index.mjs.map b/plugins/hnsw/lib/indexer/index.mjs.map new file mode 100644 index 00000000..e4ace65c --- /dev/null +++ b/plugins/hnsw/lib/indexer/index.mjs.map @@ -0,0 +1 @@ +{"version":3,"sources":["../../src/indexer/index.ts"],"sourcesContent":["/**\n * Copyright 2024 The Fire Company\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\nimport { GoogleGenerativeAIEmbeddings } from '@langchain/google-genai';\nimport { glob } from 'glob';\nimport fs from 'fs';\nimport { CharacterTextSplitter } from 'langchain/text_splitter';\nimport { HNSWLib } from 'langchain/vectorstores';\nimport { TaskType } from '@google/generative-ai';\n\nimport { IndexerFlowOptions, PluginOptions } from '../interfaces';\nimport {\n EMBEDDING_MODEL_NAME,\n EMBEDDING_MODEL,\n EMBEDDING_TITLE,\n} from '../constants';\n\nconst getFilesData = (files: string[]): string[] => {\n console.log(\n `Added ${files.length} files to data. Splitting text into chunks...`\n );\n const filesData: string[] = [];\n for (const file of files) {\n filesData.push(fs.readFileSync(file, 'utf-8'));\n }\n return filesData;\n};\n\nconst getFiles = async (input: string): Promise => {\n try {\n return glob(input, { ignore: 'node_modules/**' });\n } catch (error) {\n console.error('Error fetching files:', error);\n throw error;\n }\n};\n\nconst getSplitter = (\n chunkSize: number | undefined,\n separator: string | undefined\n) => {\n return new CharacterTextSplitter({\n chunkSize: chunkSize || 12720,\n separator: separator || '\\n',\n });\n};\n\nconst saveVectorStore = async (\n docs: string[],\n apiKey: string | undefined,\n output: string\n) => {\n console.log('Initializing Store...');\n const store = await HNSWLib.fromTexts(\n docs,\n docs.map((_: any, i: any) => ({ id: i })),\n new GoogleGenerativeAIEmbeddings({\n apiKey: apiKey || process.env.GOOGLE_API_KEY,\n model: EMBEDDING_MODEL,\n modelName: EMBEDDING_MODEL_NAME,\n taskType: TaskType.RETRIEVAL_DOCUMENT,\n title: EMBEDDING_TITLE,\n })\n );\n console.log('Saving Vectorstore');\n await store.save(output);\n return `VectorStore saved to ${output}`;\n};\n\nconst getVectorDocument = (\n filesData: string[],\n textSplitter: { splitText: (arg0: any) => any }\n) => {\n let docs: string[] = [];\n for (const d of filesData) {\n const docOutput = textSplitter.splitText(d);\n docs = [...docs, ...docOutput];\n }\n return docs.splice(docs.length - 4, 4);\n};\n\nconst saveVectorIndexer = async (\n flowOptions: IndexerFlowOptions,\n pluginOptions: PluginOptions\n) => {\n const { dataPath, indexOutputPath, chunkSize, separator } = flowOptions;\n const { apiKey } = pluginOptions;\n\n const files: string[] = await getFiles(dataPath);\n const filesData = getFilesData(files);\n const textSplitter = getSplitter(chunkSize, separator);\n const vectorDocument = getVectorDocument(filesData, textSplitter);\n\n return saveVectorStore(vectorDocument, apiKey, indexOutputPath);\n};\n\nexport { saveVectorIndexer };\n"],"mappings":"AAgBA,SAAS,oCAAoC;AAC7C,SAAS,YAAY;AACrB,OAAO,QAAQ;AACf,SAAS,6BAA6B;AACtC,SAAS,eAAe;AACxB,SAAS,gBAAgB;AAGzB;AAAA,EACE;AAAA,EACA;AAAA,EACA;AAAA,OACK;AAEP,MAAM,eAAe,CAAC,UAA8B;AAClD,UAAQ;AAAA,IACN,SAAS,MAAM,MAAM;AAAA,EACvB;AACA,QAAM,YAAsB,CAAC;AAC7B,aAAW,QAAQ,OAAO;AACxB,cAAU,KAAK,GAAG,aAAa,MAAM,OAAO,CAAC;AAAA,EAC/C;AACA,SAAO;AACT;AAEA,MAAM,WAAW,OAAO,UAAqC;AAC3D,MAAI;AACF,WAAO,KAAK,OAAO,EAAE,QAAQ,kBAAkB,CAAC;AAAA,EAClD,SAAS,OAAO;AACd,YAAQ,MAAM,yBAAyB,KAAK;AAC5C,UAAM;AAAA,EACR;AACF;AAEA,MAAM,cAAc,CAClB,WACA,cACG;AACH,SAAO,IAAI,sBAAsB;AAAA,IAC/B,WAAW,aAAa;AAAA,IACxB,WAAW,aAAa;AAAA,EAC1B,CAAC;AACH;AAEA,MAAM,kBAAkB,OACtB,MACA,QACA,WACG;AACH,UAAQ,IAAI,uBAAuB;AACnC,QAAM,QAAQ,MAAM,QAAQ;AAAA,IAC1B;AAAA,IACA,KAAK,IAAI,CAAC,GAAQ,OAAY,EAAE,IAAI,EAAE,EAAE;AAAA,IACxC,IAAI,6BAA6B;AAAA,MAC/B,QAAQ,UAAU,QAAQ,IAAI;AAAA,MAC9B,OAAO;AAAA,MACP,WAAW;AAAA,MACX,UAAU,SAAS;AAAA,MACnB,OAAO;AAAA,IACT,CAAC;AAAA,EACH;AACA,UAAQ,IAAI,oBAAoB;AAChC,QAAM,MAAM,KAAK,MAAM;AACvB,SAAO,wBAAwB,MAAM;AACvC;AAEA,MAAM,oBAAoB,CACxB,WACA,iBACG;AACH,MAAI,OAAiB,CAAC;AACtB,aAAW,KAAK,WAAW;AACzB,UAAM,YAAY,aAAa,UAAU,CAAC;AAC1C,WAAO,CAAC,GAAG,MAAM,GAAG,SAAS;AAAA,EAC/B;AACA,SAAO,KAAK,OAAO,KAAK,SAAS,GAAG,CAAC;AACvC;AAEA,MAAM,oBAAoB,OACxB,aACA,kBACG;AACH,QAAM,EAAE,UAAU,iBAAiB,WAAW,UAAU,IAAI;AAC5D,QAAM,EAAE,OAAO,IAAI;AAEnB,QAAM,QAAkB,MAAM,SAAS,QAAQ;AAC/C,QAAM,YAAY,aAAa,KAAK;AACpC,QAAM,eAAe,YAAY,WAAW,SAAS;AACrD,QAAM,iBAAiB,kBAAkB,WAAW,YAAY;AAEhE,SAAO,gBAAgB,gBAAgB,QAAQ,eAAe;AAChE;","names":[]} \ No newline at end of file diff --git a/plugins/hnsw/lib/interfaces/index.d.mts b/plugins/hnsw/lib/interfaces/index.d.mts new file mode 100644 index 00000000..83e6ea8d --- /dev/null +++ b/plugins/hnsw/lib/interfaces/index.d.mts @@ -0,0 +1,35 @@ +/** + * Copyright 2024 The Fire Company + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +interface RetrieverFlowOptions { + prompt: string; + indexPath: string; + temperature?: number; + maxOutputTokens?: number; + topK?: number; + topP?: number; + stopSequences?: string[]; +} +interface IndexerFlowOptions { + dataPath: string; + indexOutputPath: string; + chunkSize?: number; + separator?: string; +} +interface PluginOptions { + apiKey?: string; +} + +export type { IndexerFlowOptions, PluginOptions, RetrieverFlowOptions }; diff --git a/plugins/hnsw/lib/interfaces/index.d.ts b/plugins/hnsw/lib/interfaces/index.d.ts new file mode 100644 index 00000000..83e6ea8d --- /dev/null +++ b/plugins/hnsw/lib/interfaces/index.d.ts @@ -0,0 +1,35 @@ +/** + * Copyright 2024 The Fire Company + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +interface RetrieverFlowOptions { + prompt: string; + indexPath: string; + temperature?: number; + maxOutputTokens?: number; + topK?: number; + topP?: number; + stopSequences?: string[]; +} +interface IndexerFlowOptions { + dataPath: string; + indexOutputPath: string; + chunkSize?: number; + separator?: string; +} +interface PluginOptions { + apiKey?: string; +} + +export type { IndexerFlowOptions, PluginOptions, RetrieverFlowOptions }; diff --git a/plugins/hnsw/lib/interfaces/index.js b/plugins/hnsw/lib/interfaces/index.js new file mode 100644 index 00000000..ee4ab391 --- /dev/null +++ b/plugins/hnsw/lib/interfaces/index.js @@ -0,0 +1,17 @@ +"use strict"; +var __defProp = Object.defineProperty; +var __getOwnPropDesc = Object.getOwnPropertyDescriptor; +var __getOwnPropNames = Object.getOwnPropertyNames; +var __hasOwnProp = Object.prototype.hasOwnProperty; +var __copyProps = (to, from, except, desc) => { + if (from && typeof from === "object" || typeof from === "function") { + for (let key of __getOwnPropNames(from)) + if (!__hasOwnProp.call(to, key) && key !== except) + __defProp(to, key, { get: () => from[key], enumerable: !(desc = __getOwnPropDesc(from, key)) || desc.enumerable }); + } + return to; +}; +var __toCommonJS = (mod) => __copyProps(__defProp({}, "__esModule", { value: true }), mod); +var interfaces_exports = {}; +module.exports = __toCommonJS(interfaces_exports); +//# sourceMappingURL=index.js.map \ No newline at end of file diff --git a/plugins/hnsw/lib/interfaces/index.js.map b/plugins/hnsw/lib/interfaces/index.js.map new file mode 100644 index 00000000..b56a0a4d --- /dev/null +++ b/plugins/hnsw/lib/interfaces/index.js.map @@ -0,0 +1 @@ +{"version":3,"sources":["../../src/interfaces/index.ts"],"sourcesContent":["/**\n * Copyright 2024 The Fire Company\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\nexport interface RetrieverFlowOptions {\n prompt: string;\n indexPath: string;\n temperature?: number;\n maxOutputTokens?: number;\n topK?: number;\n topP?: number;\n stopSequences?: string[];\n}\n\nexport interface IndexerFlowOptions {\n dataPath: string;\n indexOutputPath: string;\n chunkSize?: number;\n separator?: string;\n}\n\nexport interface PluginOptions {\n apiKey?: string;\n}\n"],"mappings":";;;;;;;;;;;;;;AAAA;AAAA;","names":[]} \ No newline at end of file diff --git a/plugins/hnsw/lib/interfaces/index.mjs b/plugins/hnsw/lib/interfaces/index.mjs new file mode 100644 index 00000000..4bf9eabc --- /dev/null +++ b/plugins/hnsw/lib/interfaces/index.mjs @@ -0,0 +1 @@ +//# sourceMappingURL=index.mjs.map \ No newline at end of file diff --git a/plugins/hnsw/lib/interfaces/index.mjs.map b/plugins/hnsw/lib/interfaces/index.mjs.map new file mode 100644 index 00000000..84c51b28 --- /dev/null +++ b/plugins/hnsw/lib/interfaces/index.mjs.map @@ -0,0 +1 @@ +{"version":3,"sources":[],"sourcesContent":[],"mappings":"","names":[]} \ No newline at end of file diff --git a/plugins/hnsw/lib/retriever/index.d.mts b/plugins/hnsw/lib/retriever/index.d.mts new file mode 100644 index 00000000..4a5b4d37 --- /dev/null +++ b/plugins/hnsw/lib/retriever/index.d.mts @@ -0,0 +1,21 @@ +import { RetrieverFlowOptions, PluginOptions } from '../interfaces/index.mjs'; + +/** + * Copyright 2024 The Fire Company + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +declare const retrieveResponseWithVector: (flowOptions: RetrieverFlowOptions, pluginOptions: PluginOptions) => Promise; + +export { retrieveResponseWithVector }; diff --git a/plugins/hnsw/lib/retriever/index.d.ts b/plugins/hnsw/lib/retriever/index.d.ts new file mode 100644 index 00000000..9c0b1d0e --- /dev/null +++ b/plugins/hnsw/lib/retriever/index.d.ts @@ -0,0 +1,21 @@ +import { RetrieverFlowOptions, PluginOptions } from '../interfaces/index.js'; + +/** + * Copyright 2024 The Fire Company + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +declare const retrieveResponseWithVector: (flowOptions: RetrieverFlowOptions, pluginOptions: PluginOptions) => Promise; + +export { retrieveResponseWithVector }; diff --git a/plugins/hnsw/lib/retriever/index.js b/plugins/hnsw/lib/retriever/index.js new file mode 100644 index 00000000..a5c2c82b --- /dev/null +++ b/plugins/hnsw/lib/retriever/index.js @@ -0,0 +1,92 @@ +"use strict"; +var __defProp = Object.defineProperty; +var __getOwnPropDesc = Object.getOwnPropertyDescriptor; +var __getOwnPropNames = Object.getOwnPropertyNames; +var __hasOwnProp = Object.prototype.hasOwnProperty; +var __export = (target, all) => { + for (var name in all) + __defProp(target, name, { get: all[name], enumerable: true }); +}; +var __copyProps = (to, from, except, desc) => { + if (from && typeof from === "object" || typeof from === "function") { + for (let key of __getOwnPropNames(from)) + if (!__hasOwnProp.call(to, key) && key !== except) + __defProp(to, key, { get: () => from[key], enumerable: !(desc = __getOwnPropDesc(from, key)) || desc.enumerable }); + } + return to; +}; +var __toCommonJS = (mod) => __copyProps(__defProp({}, "__esModule", { value: true }), mod); +var retriever_exports = {}; +__export(retriever_exports, { + retrieveResponseWithVector: () => retrieveResponseWithVector +}); +module.exports = __toCommonJS(retriever_exports); +var import_google_genai = require("@langchain/google-genai"); +var import_vectorstores = require("langchain/vectorstores"); +var import_ai = require("@genkit-ai/ai"); +var import_googleai = require("@genkit-ai/googleai"); +const generateHistories = (contexts) => { + const histories = []; + contexts.forEach((context) => { + histories.push({ + role: "user", + content: [{ text: context }] + }); + histories.push({ + role: "model", + content: [{ text: "Understood" }] + }); + }); + return histories; +}; +const initializeStore = async (vectorStorePath, apiKey) => { + const store = await import_vectorstores.HNSWLib.load( + vectorStorePath, + new import_google_genai.GoogleGenerativeAIEmbeddings({ + apiKey + }) + ); + return store; +}; +const getContextBasedOnPrompt = async (store, prompt) => { + const data = await store.similaritySearch(prompt, 1); + const context = []; + data.forEach((item, i) => { + context.push(`${item.pageContent}`); + }); + return context; +}; +const retrieveResponseWithVector = async (flowOptions, pluginOptions) => { + const { + prompt, + indexPath, + temperature, + maxOutputTokens, + topK, + topP, + stopSequences + } = flowOptions; + const { apiKey } = pluginOptions; + const store = await initializeStore(indexPath, apiKey); + const context = await getContextBasedOnPrompt(store, prompt); + const histories = generateHistories(context); + const retrievalConfig = { + temperature: temperature || 0.1, + maxOutputTokens: maxOutputTokens || 500, + topK: topK || 1, + topP: topP || 0, + stopSequences: stopSequences || [] + }; + const promptResult = await (0, import_ai.generate)({ + history: histories, + prompt, + model: import_googleai.geminiPro, + config: retrievalConfig + }); + return promptResult.text(); +}; +// Annotate the CommonJS export names for ESM import in node: +0 && (module.exports = { + retrieveResponseWithVector +}); +//# sourceMappingURL=index.js.map \ No newline at end of file diff --git a/plugins/hnsw/lib/retriever/index.js.map b/plugins/hnsw/lib/retriever/index.js.map new file mode 100644 index 00000000..7dc2bcd9 --- /dev/null +++ b/plugins/hnsw/lib/retriever/index.js.map @@ -0,0 +1 @@ +{"version":3,"sources":["../../src/retriever/index.ts"],"sourcesContent":["/**\n * Copyright 2024 The Fire Company\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\nimport { GoogleGenerativeAIEmbeddings } from '@langchain/google-genai';\nimport { HNSWLib } from 'langchain/vectorstores';\nimport { generate } from '@genkit-ai/ai';\nimport { geminiPro as geminiProModel } from '@genkit-ai/googleai';\n\nimport { RetrieverFlowOptions, PluginOptions } from '../interfaces';\n\nconst generateHistories = (contexts: any[]) => {\n const histories: any[] = [];\n contexts.forEach((context: string) => {\n histories.push({\n role: 'user',\n content: [{ text: context }],\n });\n histories.push({\n role: 'model',\n content: [{ text: 'Understood' }],\n });\n });\n return histories;\n};\n\nconst initializeStore = async (\n vectorStorePath: string,\n apiKey: string | undefined\n) => {\n const store = await HNSWLib.load(\n vectorStorePath,\n new GoogleGenerativeAIEmbeddings({\n apiKey,\n })\n );\n return store;\n};\n\nconst getContextBasedOnPrompt = async (store: HNSWLib, prompt: string) => {\n const data = await store.similaritySearch(prompt, 1);\n const context: string[] = [];\n data.forEach((item: { pageContent: any }, i: any) => {\n context.push(`${item.pageContent}`);\n });\n return context;\n};\n\nconst retrieveResponseWithVector = async (\n flowOptions: RetrieverFlowOptions,\n pluginOptions: PluginOptions\n) => {\n const {\n prompt,\n indexPath,\n temperature,\n maxOutputTokens,\n topK,\n topP,\n stopSequences,\n } = flowOptions;\n const { apiKey } = pluginOptions;\n\n const store = await initializeStore(indexPath, apiKey);\n const context = await getContextBasedOnPrompt(store, prompt);\n const histories = generateHistories(context);\n const retrievalConfig = {\n temperature: temperature || 0.1,\n maxOutputTokens: maxOutputTokens || 500,\n topK: topK || 1,\n topP: topP || 0,\n stopSequences: stopSequences || [],\n };\n const promptResult = await generate({\n history: histories,\n prompt,\n model: geminiProModel,\n config: retrievalConfig,\n });\n\n return promptResult.text();\n};\n\nexport { retrieveResponseWithVector };\n"],"mappings":";;;;;;;;;;;;;;;;;;AAAA;AAAA;AAAA;AAAA;AAAA;AAgBA,0BAA6C;AAC7C,0BAAwB;AACxB,gBAAyB;AACzB,sBAA4C;AAI5C,MAAM,oBAAoB,CAAC,aAAoB;AAC7C,QAAM,YAAmB,CAAC;AAC1B,WAAS,QAAQ,CAAC,YAAoB;AACpC,cAAU,KAAK;AAAA,MACb,MAAM;AAAA,MACN,SAAS,CAAC,EAAE,MAAM,QAAQ,CAAC;AAAA,IAC7B,CAAC;AACD,cAAU,KAAK;AAAA,MACb,MAAM;AAAA,MACN,SAAS,CAAC,EAAE,MAAM,aAAa,CAAC;AAAA,IAClC,CAAC;AAAA,EACH,CAAC;AACD,SAAO;AACT;AAEA,MAAM,kBAAkB,OACtB,iBACA,WACG;AACH,QAAM,QAAQ,MAAM,4BAAQ;AAAA,IAC1B;AAAA,IACA,IAAI,iDAA6B;AAAA,MAC/B;AAAA,IACF,CAAC;AAAA,EACH;AACA,SAAO;AACT;AAEA,MAAM,0BAA0B,OAAO,OAAgB,WAAmB;AACxE,QAAM,OAAO,MAAM,MAAM,iBAAiB,QAAQ,CAAC;AACnD,QAAM,UAAoB,CAAC;AAC3B,OAAK,QAAQ,CAAC,MAA4B,MAAW;AACnD,YAAQ,KAAK,GAAG,KAAK,WAAW,EAAE;AAAA,EACpC,CAAC;AACD,SAAO;AACT;AAEA,MAAM,6BAA6B,OACjC,aACA,kBACG;AACH,QAAM;AAAA,IACJ;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,EACF,IAAI;AACJ,QAAM,EAAE,OAAO,IAAI;AAEnB,QAAM,QAAQ,MAAM,gBAAgB,WAAW,MAAM;AACrD,QAAM,UAAU,MAAM,wBAAwB,OAAO,MAAM;AAC3D,QAAM,YAAY,kBAAkB,OAAO;AAC3C,QAAM,kBAAkB;AAAA,IACtB,aAAa,eAAe;AAAA,IAC5B,iBAAiB,mBAAmB;AAAA,IACpC,MAAM,QAAQ;AAAA,IACd,MAAM,QAAQ;AAAA,IACd,eAAe,iBAAiB,CAAC;AAAA,EACnC;AACA,QAAM,eAAe,UAAM,oBAAS;AAAA,IAClC,SAAS;AAAA,IACT;AAAA,IACA,OAAO,gBAAAA;AAAA,IACP,QAAQ;AAAA,EACV,CAAC;AAED,SAAO,aAAa,KAAK;AAC3B;","names":["geminiProModel"]} \ No newline at end of file diff --git a/plugins/hnsw/lib/retriever/index.mjs b/plugins/hnsw/lib/retriever/index.mjs new file mode 100644 index 00000000..44364be6 --- /dev/null +++ b/plugins/hnsw/lib/retriever/index.mjs @@ -0,0 +1,68 @@ +import { GoogleGenerativeAIEmbeddings } from "@langchain/google-genai"; +import { HNSWLib } from "langchain/vectorstores"; +import { generate } from "@genkit-ai/ai"; +import { geminiPro as geminiProModel } from "@genkit-ai/googleai"; +const generateHistories = (contexts) => { + const histories = []; + contexts.forEach((context) => { + histories.push({ + role: "user", + content: [{ text: context }] + }); + histories.push({ + role: "model", + content: [{ text: "Understood" }] + }); + }); + return histories; +}; +const initializeStore = async (vectorStorePath, apiKey) => { + const store = await HNSWLib.load( + vectorStorePath, + new GoogleGenerativeAIEmbeddings({ + apiKey + }) + ); + return store; +}; +const getContextBasedOnPrompt = async (store, prompt) => { + const data = await store.similaritySearch(prompt, 1); + const context = []; + data.forEach((item, i) => { + context.push(`${item.pageContent}`); + }); + return context; +}; +const retrieveResponseWithVector = async (flowOptions, pluginOptions) => { + const { + prompt, + indexPath, + temperature, + maxOutputTokens, + topK, + topP, + stopSequences + } = flowOptions; + const { apiKey } = pluginOptions; + const store = await initializeStore(indexPath, apiKey); + const context = await getContextBasedOnPrompt(store, prompt); + const histories = generateHistories(context); + const retrievalConfig = { + temperature: temperature || 0.1, + maxOutputTokens: maxOutputTokens || 500, + topK: topK || 1, + topP: topP || 0, + stopSequences: stopSequences || [] + }; + const promptResult = await generate({ + history: histories, + prompt, + model: geminiProModel, + config: retrievalConfig + }); + return promptResult.text(); +}; +export { + retrieveResponseWithVector +}; +//# sourceMappingURL=index.mjs.map \ No newline at end of file diff --git a/plugins/hnsw/lib/retriever/index.mjs.map b/plugins/hnsw/lib/retriever/index.mjs.map new file mode 100644 index 00000000..fa3512f8 --- /dev/null +++ b/plugins/hnsw/lib/retriever/index.mjs.map @@ -0,0 +1 @@ +{"version":3,"sources":["../../src/retriever/index.ts"],"sourcesContent":["/**\n * Copyright 2024 The Fire Company\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\nimport { GoogleGenerativeAIEmbeddings } from '@langchain/google-genai';\nimport { HNSWLib } from 'langchain/vectorstores';\nimport { generate } from '@genkit-ai/ai';\nimport { geminiPro as geminiProModel } from '@genkit-ai/googleai';\n\nimport { RetrieverFlowOptions, PluginOptions } from '../interfaces';\n\nconst generateHistories = (contexts: any[]) => {\n const histories: any[] = [];\n contexts.forEach((context: string) => {\n histories.push({\n role: 'user',\n content: [{ text: context }],\n });\n histories.push({\n role: 'model',\n content: [{ text: 'Understood' }],\n });\n });\n return histories;\n};\n\nconst initializeStore = async (\n vectorStorePath: string,\n apiKey: string | undefined\n) => {\n const store = await HNSWLib.load(\n vectorStorePath,\n new GoogleGenerativeAIEmbeddings({\n apiKey,\n })\n );\n return store;\n};\n\nconst getContextBasedOnPrompt = async (store: HNSWLib, prompt: string) => {\n const data = await store.similaritySearch(prompt, 1);\n const context: string[] = [];\n data.forEach((item: { pageContent: any }, i: any) => {\n context.push(`${item.pageContent}`);\n });\n return context;\n};\n\nconst retrieveResponseWithVector = async (\n flowOptions: RetrieverFlowOptions,\n pluginOptions: PluginOptions\n) => {\n const {\n prompt,\n indexPath,\n temperature,\n maxOutputTokens,\n topK,\n topP,\n stopSequences,\n } = flowOptions;\n const { apiKey } = pluginOptions;\n\n const store = await initializeStore(indexPath, apiKey);\n const context = await getContextBasedOnPrompt(store, prompt);\n const histories = generateHistories(context);\n const retrievalConfig = {\n temperature: temperature || 0.1,\n maxOutputTokens: maxOutputTokens || 500,\n topK: topK || 1,\n topP: topP || 0,\n stopSequences: stopSequences || [],\n };\n const promptResult = await generate({\n history: histories,\n prompt,\n model: geminiProModel,\n config: retrievalConfig,\n });\n\n return promptResult.text();\n};\n\nexport { retrieveResponseWithVector };\n"],"mappings":"AAgBA,SAAS,oCAAoC;AAC7C,SAAS,eAAe;AACxB,SAAS,gBAAgB;AACzB,SAAS,aAAa,sBAAsB;AAI5C,MAAM,oBAAoB,CAAC,aAAoB;AAC7C,QAAM,YAAmB,CAAC;AAC1B,WAAS,QAAQ,CAAC,YAAoB;AACpC,cAAU,KAAK;AAAA,MACb,MAAM;AAAA,MACN,SAAS,CAAC,EAAE,MAAM,QAAQ,CAAC;AAAA,IAC7B,CAAC;AACD,cAAU,KAAK;AAAA,MACb,MAAM;AAAA,MACN,SAAS,CAAC,EAAE,MAAM,aAAa,CAAC;AAAA,IAClC,CAAC;AAAA,EACH,CAAC;AACD,SAAO;AACT;AAEA,MAAM,kBAAkB,OACtB,iBACA,WACG;AACH,QAAM,QAAQ,MAAM,QAAQ;AAAA,IAC1B;AAAA,IACA,IAAI,6BAA6B;AAAA,MAC/B;AAAA,IACF,CAAC;AAAA,EACH;AACA,SAAO;AACT;AAEA,MAAM,0BAA0B,OAAO,OAAgB,WAAmB;AACxE,QAAM,OAAO,MAAM,MAAM,iBAAiB,QAAQ,CAAC;AACnD,QAAM,UAAoB,CAAC;AAC3B,OAAK,QAAQ,CAAC,MAA4B,MAAW;AACnD,YAAQ,KAAK,GAAG,KAAK,WAAW,EAAE;AAAA,EACpC,CAAC;AACD,SAAO;AACT;AAEA,MAAM,6BAA6B,OACjC,aACA,kBACG;AACH,QAAM;AAAA,IACJ;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,EACF,IAAI;AACJ,QAAM,EAAE,OAAO,IAAI;AAEnB,QAAM,QAAQ,MAAM,gBAAgB,WAAW,MAAM;AACrD,QAAM,UAAU,MAAM,wBAAwB,OAAO,MAAM;AAC3D,QAAM,YAAY,kBAAkB,OAAO;AAC3C,QAAM,kBAAkB;AAAA,IACtB,aAAa,eAAe;AAAA,IAC5B,iBAAiB,mBAAmB;AAAA,IACpC,MAAM,QAAQ;AAAA,IACd,MAAM,QAAQ;AAAA,IACd,eAAe,iBAAiB,CAAC;AAAA,EACnC;AACA,QAAM,eAAe,MAAM,SAAS;AAAA,IAClC,SAAS;AAAA,IACT;AAAA,IACA,OAAO;AAAA,IACP,QAAQ;AAAA,EACV,CAAC;AAED,SAAO,aAAa,KAAK;AAC3B;","names":[]} \ No newline at end of file diff --git a/plugins/hnsw/lib/types/index.d.mts b/plugins/hnsw/lib/types/index.d.mts new file mode 100644 index 00000000..b34bbe49 --- /dev/null +++ b/plugins/hnsw/lib/types/index.d.mts @@ -0,0 +1,21 @@ +import { PluginOptions } from '../interfaces/index.mjs'; + +/** + * Copyright 2024 The Fire Company + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +type PluginExecuteType = (arg0: any, arg1: PluginOptions) => Promise; + +export type { PluginExecuteType }; diff --git a/plugins/hnsw/lib/types/index.d.ts b/plugins/hnsw/lib/types/index.d.ts new file mode 100644 index 00000000..564bbf58 --- /dev/null +++ b/plugins/hnsw/lib/types/index.d.ts @@ -0,0 +1,21 @@ +import { PluginOptions } from '../interfaces/index.js'; + +/** + * Copyright 2024 The Fire Company + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +type PluginExecuteType = (arg0: any, arg1: PluginOptions) => Promise; + +export type { PluginExecuteType }; diff --git a/plugins/hnsw/lib/types/index.js b/plugins/hnsw/lib/types/index.js new file mode 100644 index 00000000..83193e64 --- /dev/null +++ b/plugins/hnsw/lib/types/index.js @@ -0,0 +1,17 @@ +"use strict"; +var __defProp = Object.defineProperty; +var __getOwnPropDesc = Object.getOwnPropertyDescriptor; +var __getOwnPropNames = Object.getOwnPropertyNames; +var __hasOwnProp = Object.prototype.hasOwnProperty; +var __copyProps = (to, from, except, desc) => { + if (from && typeof from === "object" || typeof from === "function") { + for (let key of __getOwnPropNames(from)) + if (!__hasOwnProp.call(to, key) && key !== except) + __defProp(to, key, { get: () => from[key], enumerable: !(desc = __getOwnPropDesc(from, key)) || desc.enumerable }); + } + return to; +}; +var __toCommonJS = (mod) => __copyProps(__defProp({}, "__esModule", { value: true }), mod); +var types_exports = {}; +module.exports = __toCommonJS(types_exports); +//# sourceMappingURL=index.js.map \ No newline at end of file diff --git a/plugins/hnsw/lib/types/index.js.map b/plugins/hnsw/lib/types/index.js.map new file mode 100644 index 00000000..1aeab1d4 --- /dev/null +++ b/plugins/hnsw/lib/types/index.js.map @@ -0,0 +1 @@ +{"version":3,"sources":["../../src/types/index.ts"],"sourcesContent":["/**\n * Copyright 2024 The Fire Company\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\nimport { PluginOptions } from './../interfaces';\n\nexport type PluginExecuteType = (\n arg0: any,\n arg1: PluginOptions\n) => Promise;\n"],"mappings":";;;;;;;;;;;;;;AAAA;AAAA;","names":[]} \ No newline at end of file diff --git a/plugins/hnsw/lib/types/index.mjs b/plugins/hnsw/lib/types/index.mjs new file mode 100644 index 00000000..4bf9eabc --- /dev/null +++ b/plugins/hnsw/lib/types/index.mjs @@ -0,0 +1 @@ +//# sourceMappingURL=index.mjs.map \ No newline at end of file diff --git a/plugins/hnsw/lib/types/index.mjs.map b/plugins/hnsw/lib/types/index.mjs.map new file mode 100644 index 00000000..84c51b28 --- /dev/null +++ b/plugins/hnsw/lib/types/index.mjs.map @@ -0,0 +1 @@ +{"version":3,"sources":[],"sourcesContent":[],"mappings":"","names":[]} \ No newline at end of file diff --git a/plugins/hnsw/lib/utilities/index.d.mts b/plugins/hnsw/lib/utilities/index.d.mts new file mode 100644 index 00000000..19e11dba --- /dev/null +++ b/plugins/hnsw/lib/utilities/index.d.mts @@ -0,0 +1,23 @@ +import { StatusName } from '@genkit-ai/core/lib/statusTypes'; +import { PluginOptions } from '../interfaces/index.mjs'; + +/** + * Copyright 2024 The Fire Company + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +declare const throwError: (status: StatusName, message: string) => never; +declare const checkApiKey: (pluginOptions: PluginOptions) => undefined; + +export { checkApiKey, throwError }; diff --git a/plugins/hnsw/lib/utilities/index.d.ts b/plugins/hnsw/lib/utilities/index.d.ts new file mode 100644 index 00000000..45c31099 --- /dev/null +++ b/plugins/hnsw/lib/utilities/index.d.ts @@ -0,0 +1,23 @@ +import { StatusName } from '@genkit-ai/core/lib/statusTypes'; +import { PluginOptions } from '../interfaces/index.js'; + +/** + * Copyright 2024 The Fire Company + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +declare const throwError: (status: StatusName, message: string) => never; +declare const checkApiKey: (pluginOptions: PluginOptions) => undefined; + +export { checkApiKey, throwError }; diff --git a/plugins/hnsw/lib/utilities/index.js b/plugins/hnsw/lib/utilities/index.js new file mode 100644 index 00000000..f31dfe4d --- /dev/null +++ b/plugins/hnsw/lib/utilities/index.js @@ -0,0 +1,43 @@ +"use strict"; +var __defProp = Object.defineProperty; +var __getOwnPropDesc = Object.getOwnPropertyDescriptor; +var __getOwnPropNames = Object.getOwnPropertyNames; +var __hasOwnProp = Object.prototype.hasOwnProperty; +var __export = (target, all) => { + for (var name in all) + __defProp(target, name, { get: all[name], enumerable: true }); +}; +var __copyProps = (to, from, except, desc) => { + if (from && typeof from === "object" || typeof from === "function") { + for (let key of __getOwnPropNames(from)) + if (!__hasOwnProp.call(to, key) && key !== except) + __defProp(to, key, { get: () => from[key], enumerable: !(desc = __getOwnPropDesc(from, key)) || desc.enumerable }); + } + return to; +}; +var __toCommonJS = (mod) => __copyProps(__defProp({}, "__esModule", { value: true }), mod); +var utilities_exports = {}; +__export(utilities_exports, { + checkApiKey: () => checkApiKey, + throwError: () => throwError +}); +module.exports = __toCommonJS(utilities_exports); +var import_core = require("@genkit-ai/core"); +var import_constants = require("../constants"); +const throwError = (status, message) => { + throw new import_core.GenkitError({ + status, + message + }); +}; +const checkApiKey = (pluginOptions) => { + const { apiKey } = pluginOptions; + if (!apiKey) + return throwError(import_constants.ERROR_INVALID_ARGUMENT, import_constants.ERROR_NO_API_KEY); +}; +// Annotate the CommonJS export names for ESM import in node: +0 && (module.exports = { + checkApiKey, + throwError +}); +//# sourceMappingURL=index.js.map \ No newline at end of file diff --git a/plugins/hnsw/lib/utilities/index.js.map b/plugins/hnsw/lib/utilities/index.js.map new file mode 100644 index 00000000..3babebdf --- /dev/null +++ b/plugins/hnsw/lib/utilities/index.js.map @@ -0,0 +1 @@ +{"version":3,"sources":["../../src/utilities/index.ts"],"sourcesContent":["/**\n * Copyright 2024 The Fire Company\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\nimport { GenkitError } from '@genkit-ai/core';\nimport { StatusName } from '@genkit-ai/core/lib/statusTypes';\n\nimport { PluginOptions } from '../interfaces';\n\nimport { ERROR_INVALID_ARGUMENT, ERROR_NO_API_KEY } from '../constants';\n\nexport const throwError = (status: StatusName, message: string) => {\n throw new GenkitError({\n status,\n message,\n });\n};\n\nexport const checkApiKey = (pluginOptions: PluginOptions) => {\n const { apiKey } = pluginOptions;\n if (!apiKey) return throwError(ERROR_INVALID_ARGUMENT, ERROR_NO_API_KEY);\n};\n"],"mappings":";;;;;;;;;;;;;;;;;;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAgBA,kBAA4B;AAK5B,uBAAyD;AAElD,MAAM,aAAa,CAAC,QAAoB,YAAoB;AACjE,QAAM,IAAI,wBAAY;AAAA,IACpB;AAAA,IACA;AAAA,EACF,CAAC;AACH;AAEO,MAAM,cAAc,CAAC,kBAAiC;AAC3D,QAAM,EAAE,OAAO,IAAI;AACnB,MAAI,CAAC;AAAQ,WAAO,WAAW,yCAAwB,iCAAgB;AACzE;","names":[]} \ No newline at end of file diff --git a/plugins/hnsw/lib/utilities/index.mjs b/plugins/hnsw/lib/utilities/index.mjs new file mode 100644 index 00000000..0015deec --- /dev/null +++ b/plugins/hnsw/lib/utilities/index.mjs @@ -0,0 +1,18 @@ +import { GenkitError } from "@genkit-ai/core"; +import { ERROR_INVALID_ARGUMENT, ERROR_NO_API_KEY } from "../constants"; +const throwError = (status, message) => { + throw new GenkitError({ + status, + message + }); +}; +const checkApiKey = (pluginOptions) => { + const { apiKey } = pluginOptions; + if (!apiKey) + return throwError(ERROR_INVALID_ARGUMENT, ERROR_NO_API_KEY); +}; +export { + checkApiKey, + throwError +}; +//# sourceMappingURL=index.mjs.map \ No newline at end of file diff --git a/plugins/hnsw/lib/utilities/index.mjs.map b/plugins/hnsw/lib/utilities/index.mjs.map new file mode 100644 index 00000000..bb385390 --- /dev/null +++ b/plugins/hnsw/lib/utilities/index.mjs.map @@ -0,0 +1 @@ +{"version":3,"sources":["../../src/utilities/index.ts"],"sourcesContent":["/**\n * Copyright 2024 The Fire Company\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\nimport { GenkitError } from '@genkit-ai/core';\nimport { StatusName } from '@genkit-ai/core/lib/statusTypes';\n\nimport { PluginOptions } from '../interfaces';\n\nimport { ERROR_INVALID_ARGUMENT, ERROR_NO_API_KEY } from '../constants';\n\nexport const throwError = (status: StatusName, message: string) => {\n throw new GenkitError({\n status,\n message,\n });\n};\n\nexport const checkApiKey = (pluginOptions: PluginOptions) => {\n const { apiKey } = pluginOptions;\n if (!apiKey) return throwError(ERROR_INVALID_ARGUMENT, ERROR_NO_API_KEY);\n};\n"],"mappings":"AAgBA,SAAS,mBAAmB;AAK5B,SAAS,wBAAwB,wBAAwB;AAElD,MAAM,aAAa,CAAC,QAAoB,YAAoB;AACjE,QAAM,IAAI,YAAY;AAAA,IACpB;AAAA,IACA;AAAA,EACF,CAAC;AACH;AAEO,MAAM,cAAc,CAAC,kBAAiC;AAC3D,QAAM,EAAE,OAAO,IAAI;AACnB,MAAI,CAAC;AAAQ,WAAO,WAAW,wBAAwB,gBAAgB;AACzE;","names":[]} \ No newline at end of file diff --git a/plugins/openai/.gitignore b/plugins/openai/.gitignore index d83aca04..f04ecb97 100644 --- a/plugins/openai/.gitignore +++ b/plugins/openai/.gitignore @@ -1,3 +1,3 @@ -lib/ +# lib/ node_modules/ coverage/ diff --git a/plugins/openai/lib/chunk-WFI2LP4G.mjs b/plugins/openai/lib/chunk-WFI2LP4G.mjs new file mode 100644 index 00000000..e9098ac9 --- /dev/null +++ b/plugins/openai/lib/chunk-WFI2LP4G.mjs @@ -0,0 +1,51 @@ +var __defProp = Object.defineProperty; +var __defProps = Object.defineProperties; +var __getOwnPropDescs = Object.getOwnPropertyDescriptors; +var __getOwnPropSymbols = Object.getOwnPropertySymbols; +var __hasOwnProp = Object.prototype.hasOwnProperty; +var __propIsEnum = Object.prototype.propertyIsEnumerable; +var __knownSymbol = (name, symbol) => { + return (symbol = Symbol[name]) ? symbol : Symbol.for("Symbol." + name); +}; +var __defNormalProp = (obj, key, value) => key in obj ? __defProp(obj, key, { enumerable: true, configurable: true, writable: true, value }) : obj[key] = value; +var __spreadValues = (a, b) => { + for (var prop in b || (b = {})) + if (__hasOwnProp.call(b, prop)) + __defNormalProp(a, prop, b[prop]); + if (__getOwnPropSymbols) + for (var prop of __getOwnPropSymbols(b)) { + if (__propIsEnum.call(b, prop)) + __defNormalProp(a, prop, b[prop]); + } + return a; +}; +var __spreadProps = (a, b) => __defProps(a, __getOwnPropDescs(b)); +var __async = (__this, __arguments, generator) => { + return new Promise((resolve, reject) => { + var fulfilled = (value) => { + try { + step(generator.next(value)); + } catch (e) { + reject(e); + } + }; + var rejected = (value) => { + try { + step(generator.throw(value)); + } catch (e) { + reject(e); + } + }; + var step = (x) => x.done ? resolve(x.value) : Promise.resolve(x.value).then(fulfilled, rejected); + step((generator = generator.apply(__this, __arguments)).next()); + }); +}; +var __forAwait = (obj, it, method) => (it = obj[__knownSymbol("asyncIterator")]) ? it.call(obj) : (obj = obj[__knownSymbol("iterator")](), it = {}, method = (key, fn) => (fn = obj[key]) && (it[key] = (arg) => new Promise((yes, no, done) => (arg = fn.call(obj, arg), done = arg.done, Promise.resolve(arg.value).then((value) => yes({ value, done }), no)))), method("next"), method("return"), it); + +export { + __spreadValues, + __spreadProps, + __async, + __forAwait +}; +//# sourceMappingURL=chunk-WFI2LP4G.mjs.map \ No newline at end of file diff --git a/plugins/openai/lib/chunk-WFI2LP4G.mjs.map b/plugins/openai/lib/chunk-WFI2LP4G.mjs.map new file mode 100644 index 00000000..84c51b28 --- /dev/null +++ b/plugins/openai/lib/chunk-WFI2LP4G.mjs.map @@ -0,0 +1 @@ +{"version":3,"sources":[],"sourcesContent":[],"mappings":"","names":[]} \ No newline at end of file diff --git a/plugins/openai/lib/dalle.d.mts b/plugins/openai/lib/dalle.d.mts new file mode 100644 index 00000000..eb564b82 --- /dev/null +++ b/plugins/openai/lib/dalle.d.mts @@ -0,0 +1,84 @@ +import * as _genkit_ai_ai_model from '@genkit-ai/ai/model'; +import { ModelAction } from '@genkit-ai/ai/model'; +import OpenAI from 'openai'; +import { z } from 'zod'; + +declare const DallE3ConfigSchema: z.ZodObject; + temperature: z.ZodOptional; + maxOutputTokens: z.ZodOptional; + topK: z.ZodOptional; + topP: z.ZodOptional; + stopSequences: z.ZodOptional>; +}, { + size: z.ZodOptional>; + style: z.ZodOptional>; + user: z.ZodOptional; + quality: z.ZodOptional>; + response_format: z.ZodOptional>; +}>, "strip", z.ZodTypeAny, { + size?: "1024x1024" | "1792x1024" | "1024x1792" | undefined; + style?: "vivid" | "natural" | undefined; + user?: string | undefined; + quality?: "standard" | "hd" | undefined; + response_format?: "b64_json" | "url" | undefined; + version?: string | undefined; + temperature?: number | undefined; + maxOutputTokens?: number | undefined; + topK?: number | undefined; + topP?: number | undefined; + stopSequences?: string[] | undefined; +}, { + size?: "1024x1024" | "1792x1024" | "1024x1792" | undefined; + style?: "vivid" | "natural" | undefined; + user?: string | undefined; + quality?: "standard" | "hd" | undefined; + response_format?: "b64_json" | "url" | undefined; + version?: string | undefined; + temperature?: number | undefined; + maxOutputTokens?: number | undefined; + topK?: number | undefined; + topP?: number | undefined; + stopSequences?: string[] | undefined; +}>; +declare const dallE3: _genkit_ai_ai_model.ModelReference; + temperature: z.ZodOptional; + maxOutputTokens: z.ZodOptional; + topK: z.ZodOptional; + topP: z.ZodOptional; + stopSequences: z.ZodOptional>; +}, { + size: z.ZodOptional>; + style: z.ZodOptional>; + user: z.ZodOptional; + quality: z.ZodOptional>; + response_format: z.ZodOptional>; +}>, "strip", z.ZodTypeAny, { + size?: "1024x1024" | "1792x1024" | "1024x1792" | undefined; + style?: "vivid" | "natural" | undefined; + user?: string | undefined; + quality?: "standard" | "hd" | undefined; + response_format?: "b64_json" | "url" | undefined; + version?: string | undefined; + temperature?: number | undefined; + maxOutputTokens?: number | undefined; + topK?: number | undefined; + topP?: number | undefined; + stopSequences?: string[] | undefined; +}, { + size?: "1024x1024" | "1792x1024" | "1024x1792" | undefined; + style?: "vivid" | "natural" | undefined; + user?: string | undefined; + quality?: "standard" | "hd" | undefined; + response_format?: "b64_json" | "url" | undefined; + version?: string | undefined; + temperature?: number | undefined; + maxOutputTokens?: number | undefined; + topK?: number | undefined; + topP?: number | undefined; + stopSequences?: string[] | undefined; +}>>; +declare function dallE3Model(client: OpenAI): ModelAction; + +export { DallE3ConfigSchema, dallE3, dallE3Model }; diff --git a/plugins/openai/lib/dalle.d.ts b/plugins/openai/lib/dalle.d.ts new file mode 100644 index 00000000..eb564b82 --- /dev/null +++ b/plugins/openai/lib/dalle.d.ts @@ -0,0 +1,84 @@ +import * as _genkit_ai_ai_model from '@genkit-ai/ai/model'; +import { ModelAction } from '@genkit-ai/ai/model'; +import OpenAI from 'openai'; +import { z } from 'zod'; + +declare const DallE3ConfigSchema: z.ZodObject; + temperature: z.ZodOptional; + maxOutputTokens: z.ZodOptional; + topK: z.ZodOptional; + topP: z.ZodOptional; + stopSequences: z.ZodOptional>; +}, { + size: z.ZodOptional>; + style: z.ZodOptional>; + user: z.ZodOptional; + quality: z.ZodOptional>; + response_format: z.ZodOptional>; +}>, "strip", z.ZodTypeAny, { + size?: "1024x1024" | "1792x1024" | "1024x1792" | undefined; + style?: "vivid" | "natural" | undefined; + user?: string | undefined; + quality?: "standard" | "hd" | undefined; + response_format?: "b64_json" | "url" | undefined; + version?: string | undefined; + temperature?: number | undefined; + maxOutputTokens?: number | undefined; + topK?: number | undefined; + topP?: number | undefined; + stopSequences?: string[] | undefined; +}, { + size?: "1024x1024" | "1792x1024" | "1024x1792" | undefined; + style?: "vivid" | "natural" | undefined; + user?: string | undefined; + quality?: "standard" | "hd" | undefined; + response_format?: "b64_json" | "url" | undefined; + version?: string | undefined; + temperature?: number | undefined; + maxOutputTokens?: number | undefined; + topK?: number | undefined; + topP?: number | undefined; + stopSequences?: string[] | undefined; +}>; +declare const dallE3: _genkit_ai_ai_model.ModelReference; + temperature: z.ZodOptional; + maxOutputTokens: z.ZodOptional; + topK: z.ZodOptional; + topP: z.ZodOptional; + stopSequences: z.ZodOptional>; +}, { + size: z.ZodOptional>; + style: z.ZodOptional>; + user: z.ZodOptional; + quality: z.ZodOptional>; + response_format: z.ZodOptional>; +}>, "strip", z.ZodTypeAny, { + size?: "1024x1024" | "1792x1024" | "1024x1792" | undefined; + style?: "vivid" | "natural" | undefined; + user?: string | undefined; + quality?: "standard" | "hd" | undefined; + response_format?: "b64_json" | "url" | undefined; + version?: string | undefined; + temperature?: number | undefined; + maxOutputTokens?: number | undefined; + topK?: number | undefined; + topP?: number | undefined; + stopSequences?: string[] | undefined; +}, { + size?: "1024x1024" | "1792x1024" | "1024x1792" | undefined; + style?: "vivid" | "natural" | undefined; + user?: string | undefined; + quality?: "standard" | "hd" | undefined; + response_format?: "b64_json" | "url" | undefined; + version?: string | undefined; + temperature?: number | undefined; + maxOutputTokens?: number | undefined; + topK?: number | undefined; + topP?: number | undefined; + stopSequences?: string[] | undefined; +}>>; +declare function dallE3Model(client: OpenAI): ModelAction; + +export { DallE3ConfigSchema, dallE3, dallE3Model }; diff --git a/plugins/openai/lib/dalle.js b/plugins/openai/lib/dalle.js new file mode 100644 index 00000000..71c6a4da --- /dev/null +++ b/plugins/openai/lib/dalle.js @@ -0,0 +1,146 @@ +"use strict"; +var __defProp = Object.defineProperty; +var __defProps = Object.defineProperties; +var __getOwnPropDesc = Object.getOwnPropertyDescriptor; +var __getOwnPropDescs = Object.getOwnPropertyDescriptors; +var __getOwnPropNames = Object.getOwnPropertyNames; +var __getOwnPropSymbols = Object.getOwnPropertySymbols; +var __hasOwnProp = Object.prototype.hasOwnProperty; +var __propIsEnum = Object.prototype.propertyIsEnumerable; +var __defNormalProp = (obj, key, value) => key in obj ? __defProp(obj, key, { enumerable: true, configurable: true, writable: true, value }) : obj[key] = value; +var __spreadValues = (a, b) => { + for (var prop in b || (b = {})) + if (__hasOwnProp.call(b, prop)) + __defNormalProp(a, prop, b[prop]); + if (__getOwnPropSymbols) + for (var prop of __getOwnPropSymbols(b)) { + if (__propIsEnum.call(b, prop)) + __defNormalProp(a, prop, b[prop]); + } + return a; +}; +var __spreadProps = (a, b) => __defProps(a, __getOwnPropDescs(b)); +var __export = (target, all) => { + for (var name in all) + __defProp(target, name, { get: all[name], enumerable: true }); +}; +var __copyProps = (to, from, except, desc) => { + if (from && typeof from === "object" || typeof from === "function") { + for (let key of __getOwnPropNames(from)) + if (!__hasOwnProp.call(to, key) && key !== except) + __defProp(to, key, { get: () => from[key], enumerable: !(desc = __getOwnPropDesc(from, key)) || desc.enumerable }); + } + return to; +}; +var __toCommonJS = (mod) => __copyProps(__defProp({}, "__esModule", { value: true }), mod); +var __async = (__this, __arguments, generator) => { + return new Promise((resolve, reject) => { + var fulfilled = (value) => { + try { + step(generator.next(value)); + } catch (e) { + reject(e); + } + }; + var rejected = (value) => { + try { + step(generator.throw(value)); + } catch (e) { + reject(e); + } + }; + var step = (x) => x.done ? resolve(x.value) : Promise.resolve(x.value).then(fulfilled, rejected); + step((generator = generator.apply(__this, __arguments)).next()); + }); +}; +var dalle_exports = {}; +__export(dalle_exports, { + DallE3ConfigSchema: () => DallE3ConfigSchema, + dallE3: () => dallE3, + dallE3Model: () => dallE3Model +}); +module.exports = __toCommonJS(dalle_exports); +var import_ai = require("@genkit-ai/ai"); +var import_model = require("@genkit-ai/ai/model"); +var import_zod = require("zod"); +const DallE3ConfigSchema = import_model.GenerationCommonConfigSchema.extend({ + size: import_zod.z.enum(["1024x1024", "1792x1024", "1024x1792"]).optional(), + style: import_zod.z.enum(["vivid", "natural"]).optional(), + user: import_zod.z.string().optional(), + quality: import_zod.z.enum(["standard", "hd"]).optional(), + response_format: import_zod.z.enum(["b64_json", "url"]).optional() +}); +const dallE3 = (0, import_model.modelRef)({ + name: "openai/dall-e-3", + info: { + label: "OpenAI - DALL-E 3", + supports: { + media: false, + output: ["media"], + multiturn: false, + systemRole: false, + tools: false + } + }, + configSchema: DallE3ConfigSchema +}); +function toDallE3Request(request) { + var _a, _b, _c, _d, _e; + const options = { + model: "dall-e-3", + prompt: new import_ai.Message(request.messages[0]).text(), + n: request.candidates || 1, + size: (_a = request.config) == null ? void 0 : _a.size, + style: (_b = request.config) == null ? void 0 : _b.style, + user: (_c = request.config) == null ? void 0 : _c.user, + quality: (_d = request.config) == null ? void 0 : _d.quality, + response_format: ((_e = request.config) == null ? void 0 : _e.response_format) || "b64_json" + }; + for (const k in options) { + if (options[k] === void 0) { + delete options[k]; + } + } + return options; +} +function toGenerateResponse(result) { + const candidates = result.data.map( + (image, index) => ({ + index, + finishReason: "stop", + custom: { revisedPrompt: image.revised_prompt }, + message: { + role: "model", + content: [ + { + media: { + contentType: "image/png", + url: image.url || `data:image/png;base64,${image.b64_json}` + } + } + ] + } + }) + ); + return { candidates }; +} +function dallE3Model(client) { + return (0, import_model.defineModel)( + __spreadProps(__spreadValues({ + name: dallE3.name + }, dallE3.info), { + configSchema: dallE3.configSchema + }), + (request) => __async(this, null, function* () { + const result = yield client.images.generate(toDallE3Request(request)); + return toGenerateResponse(result); + }) + ); +} +// Annotate the CommonJS export names for ESM import in node: +0 && (module.exports = { + DallE3ConfigSchema, + dallE3, + dallE3Model +}); +//# sourceMappingURL=dalle.js.map \ No newline at end of file diff --git a/plugins/openai/lib/dalle.js.map b/plugins/openai/lib/dalle.js.map new file mode 100644 index 00000000..0159b0c9 --- /dev/null +++ b/plugins/openai/lib/dalle.js.map @@ -0,0 +1 @@ +{"version":3,"sources":["../src/dalle.ts"],"sourcesContent":["/**\n * Copyright 2024 The Fire Company\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\nimport { Message } from '@genkit-ai/ai';\nimport {\n GenerationCommonConfigSchema,\n defineModel,\n modelRef,\n type GenerateRequest,\n type GenerateResponseData,\n type ModelAction,\n} from '@genkit-ai/ai/model';\nimport OpenAI from 'openai';\nimport {\n type ImageGenerateParams,\n type ImagesResponse,\n} from 'openai/resources/images.mjs';\nimport { z } from 'zod';\n\nexport const DallE3ConfigSchema = GenerationCommonConfigSchema.extend({\n size: z.enum(['1024x1024', '1792x1024', '1024x1792']).optional(),\n style: z.enum(['vivid', 'natural']).optional(),\n user: z.string().optional(),\n quality: z.enum(['standard', 'hd']).optional(),\n response_format: z.enum(['b64_json', 'url']).optional(),\n});\n\nexport const dallE3 = modelRef({\n name: 'openai/dall-e-3',\n info: {\n label: 'OpenAI - DALL-E 3',\n supports: {\n media: false,\n output: ['media'],\n multiturn: false,\n systemRole: false,\n tools: false,\n },\n },\n configSchema: DallE3ConfigSchema,\n});\n\nfunction toDallE3Request(\n request: GenerateRequest\n): ImageGenerateParams {\n const options = {\n model: 'dall-e-3',\n prompt: new Message(request.messages[0]).text(),\n n: request.candidates || 1,\n size: request.config?.size,\n style: request.config?.style,\n user: request.config?.user,\n quality: request.config?.quality,\n response_format: request.config?.response_format || 'b64_json',\n };\n for (const k in options) {\n if (options[k] === undefined) {\n delete options[k];\n }\n }\n return options;\n}\n\nfunction toGenerateResponse(result: ImagesResponse): GenerateResponseData {\n const candidates: GenerateResponseData['candidates'] = result.data.map(\n (image, index) => ({\n index: index,\n finishReason: 'stop',\n custom: { revisedPrompt: image.revised_prompt },\n message: {\n role: 'model',\n content: [\n {\n media: {\n contentType: 'image/png',\n url: image.url || `data:image/png;base64,${image.b64_json}`,\n },\n },\n ],\n },\n })\n );\n return { candidates };\n}\n\nexport function dallE3Model(\n client: OpenAI\n): ModelAction {\n return defineModel(\n {\n name: dallE3.name,\n ...dallE3.info,\n configSchema: dallE3.configSchema,\n },\n async (request) => {\n const result = await client.images.generate(toDallE3Request(request));\n return toGenerateResponse(result);\n }\n );\n}\n"],"mappings":";;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAgBA,gBAAwB;AACxB,mBAOO;AAMP,iBAAkB;AAEX,MAAM,qBAAqB,0CAA6B,OAAO;AAAA,EACpE,MAAM,aAAE,KAAK,CAAC,aAAa,aAAa,WAAW,CAAC,EAAE,SAAS;AAAA,EAC/D,OAAO,aAAE,KAAK,CAAC,SAAS,SAAS,CAAC,EAAE,SAAS;AAAA,EAC7C,MAAM,aAAE,OAAO,EAAE,SAAS;AAAA,EAC1B,SAAS,aAAE,KAAK,CAAC,YAAY,IAAI,CAAC,EAAE,SAAS;AAAA,EAC7C,iBAAiB,aAAE,KAAK,CAAC,YAAY,KAAK,CAAC,EAAE,SAAS;AACxD,CAAC;AAEM,MAAM,aAAS,uBAAS;AAAA,EAC7B,MAAM;AAAA,EACN,MAAM;AAAA,IACJ,OAAO;AAAA,IACP,UAAU;AAAA,MACR,OAAO;AAAA,MACP,QAAQ,CAAC,OAAO;AAAA,MAChB,WAAW;AAAA,MACX,YAAY;AAAA,MACZ,OAAO;AAAA,IACT;AAAA,EACF;AAAA,EACA,cAAc;AAChB,CAAC;AAED,SAAS,gBACP,SACqB;AAzDvB;AA0DE,QAAM,UAAU;AAAA,IACd,OAAO;AAAA,IACP,QAAQ,IAAI,kBAAQ,QAAQ,SAAS,CAAC,CAAC,EAAE,KAAK;AAAA,IAC9C,GAAG,QAAQ,cAAc;AAAA,IACzB,OAAM,aAAQ,WAAR,mBAAgB;AAAA,IACtB,QAAO,aAAQ,WAAR,mBAAgB;AAAA,IACvB,OAAM,aAAQ,WAAR,mBAAgB;AAAA,IACtB,UAAS,aAAQ,WAAR,mBAAgB;AAAA,IACzB,mBAAiB,aAAQ,WAAR,mBAAgB,oBAAmB;AAAA,EACtD;AACA,aAAW,KAAK,SAAS;AACvB,QAAI,QAAQ,CAAC,MAAM,QAAW;AAC5B,aAAO,QAAQ,CAAC;AAAA,IAClB;AAAA,EACF;AACA,SAAO;AACT;AAEA,SAAS,mBAAmB,QAA8C;AACxE,QAAM,aAAiD,OAAO,KAAK;AAAA,IACjE,CAAC,OAAO,WAAW;AAAA,MACjB;AAAA,MACA,cAAc;AAAA,MACd,QAAQ,EAAE,eAAe,MAAM,eAAe;AAAA,MAC9C,SAAS;AAAA,QACP,MAAM;AAAA,QACN,SAAS;AAAA,UACP;AAAA,YACE,OAAO;AAAA,cACL,aAAa;AAAA,cACb,KAAK,MAAM,OAAO,yBAAyB,MAAM,QAAQ;AAAA,YAC3D;AAAA,UACF;AAAA,QACF;AAAA,MACF;AAAA,IACF;AAAA,EACF;AACA,SAAO,EAAE,WAAW;AACtB;AAEO,SAAS,YACd,QACwC;AACxC,aAAO;AAAA,IACL;AAAA,MACE,MAAM,OAAO;AAAA,OACV,OAAO,OAFZ;AAAA,MAGE,cAAc,OAAO;AAAA,IACvB;AAAA,IACA,CAAO,YAAY;AACjB,YAAM,SAAS,MAAM,OAAO,OAAO,SAAS,gBAAgB,OAAO,CAAC;AACpE,aAAO,mBAAmB,MAAM;AAAA,IAClC;AAAA,EACF;AACF;","names":[]} \ No newline at end of file diff --git a/plugins/openai/lib/dalle.mjs b/plugins/openai/lib/dalle.mjs new file mode 100644 index 00000000..baecd004 --- /dev/null +++ b/plugins/openai/lib/dalle.mjs @@ -0,0 +1,92 @@ +import { + __async, + __spreadProps, + __spreadValues +} from "./chunk-WFI2LP4G.mjs"; +import { Message } from "@genkit-ai/ai"; +import { + GenerationCommonConfigSchema, + defineModel, + modelRef +} from "@genkit-ai/ai/model"; +import { z } from "zod"; +const DallE3ConfigSchema = GenerationCommonConfigSchema.extend({ + size: z.enum(["1024x1024", "1792x1024", "1024x1792"]).optional(), + style: z.enum(["vivid", "natural"]).optional(), + user: z.string().optional(), + quality: z.enum(["standard", "hd"]).optional(), + response_format: z.enum(["b64_json", "url"]).optional() +}); +const dallE3 = modelRef({ + name: "openai/dall-e-3", + info: { + label: "OpenAI - DALL-E 3", + supports: { + media: false, + output: ["media"], + multiturn: false, + systemRole: false, + tools: false + } + }, + configSchema: DallE3ConfigSchema +}); +function toDallE3Request(request) { + var _a, _b, _c, _d, _e; + const options = { + model: "dall-e-3", + prompt: new Message(request.messages[0]).text(), + n: request.candidates || 1, + size: (_a = request.config) == null ? void 0 : _a.size, + style: (_b = request.config) == null ? void 0 : _b.style, + user: (_c = request.config) == null ? void 0 : _c.user, + quality: (_d = request.config) == null ? void 0 : _d.quality, + response_format: ((_e = request.config) == null ? void 0 : _e.response_format) || "b64_json" + }; + for (const k in options) { + if (options[k] === void 0) { + delete options[k]; + } + } + return options; +} +function toGenerateResponse(result) { + const candidates = result.data.map( + (image, index) => ({ + index, + finishReason: "stop", + custom: { revisedPrompt: image.revised_prompt }, + message: { + role: "model", + content: [ + { + media: { + contentType: "image/png", + url: image.url || `data:image/png;base64,${image.b64_json}` + } + } + ] + } + }) + ); + return { candidates }; +} +function dallE3Model(client) { + return defineModel( + __spreadProps(__spreadValues({ + name: dallE3.name + }, dallE3.info), { + configSchema: dallE3.configSchema + }), + (request) => __async(this, null, function* () { + const result = yield client.images.generate(toDallE3Request(request)); + return toGenerateResponse(result); + }) + ); +} +export { + DallE3ConfigSchema, + dallE3, + dallE3Model +}; +//# sourceMappingURL=dalle.mjs.map \ No newline at end of file diff --git a/plugins/openai/lib/dalle.mjs.map b/plugins/openai/lib/dalle.mjs.map new file mode 100644 index 00000000..c3d29f05 --- /dev/null +++ b/plugins/openai/lib/dalle.mjs.map @@ -0,0 +1 @@ +{"version":3,"sources":["../src/dalle.ts"],"sourcesContent":["/**\n * Copyright 2024 The Fire Company\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\nimport { Message } from '@genkit-ai/ai';\nimport {\n GenerationCommonConfigSchema,\n defineModel,\n modelRef,\n type GenerateRequest,\n type GenerateResponseData,\n type ModelAction,\n} from '@genkit-ai/ai/model';\nimport OpenAI from 'openai';\nimport {\n type ImageGenerateParams,\n type ImagesResponse,\n} from 'openai/resources/images.mjs';\nimport { z } from 'zod';\n\nexport const DallE3ConfigSchema = GenerationCommonConfigSchema.extend({\n size: z.enum(['1024x1024', '1792x1024', '1024x1792']).optional(),\n style: z.enum(['vivid', 'natural']).optional(),\n user: z.string().optional(),\n quality: z.enum(['standard', 'hd']).optional(),\n response_format: z.enum(['b64_json', 'url']).optional(),\n});\n\nexport const dallE3 = modelRef({\n name: 'openai/dall-e-3',\n info: {\n label: 'OpenAI - DALL-E 3',\n supports: {\n media: false,\n output: ['media'],\n multiturn: false,\n systemRole: false,\n tools: false,\n },\n },\n configSchema: DallE3ConfigSchema,\n});\n\nfunction toDallE3Request(\n request: GenerateRequest\n): ImageGenerateParams {\n const options = {\n model: 'dall-e-3',\n prompt: new Message(request.messages[0]).text(),\n n: request.candidates || 1,\n size: request.config?.size,\n style: request.config?.style,\n user: request.config?.user,\n quality: request.config?.quality,\n response_format: request.config?.response_format || 'b64_json',\n };\n for (const k in options) {\n if (options[k] === undefined) {\n delete options[k];\n }\n }\n return options;\n}\n\nfunction toGenerateResponse(result: ImagesResponse): GenerateResponseData {\n const candidates: GenerateResponseData['candidates'] = result.data.map(\n (image, index) => ({\n index: index,\n finishReason: 'stop',\n custom: { revisedPrompt: image.revised_prompt },\n message: {\n role: 'model',\n content: [\n {\n media: {\n contentType: 'image/png',\n url: image.url || `data:image/png;base64,${image.b64_json}`,\n },\n },\n ],\n },\n })\n );\n return { candidates };\n}\n\nexport function dallE3Model(\n client: OpenAI\n): ModelAction {\n return defineModel(\n {\n name: dallE3.name,\n ...dallE3.info,\n configSchema: dallE3.configSchema,\n },\n async (request) => {\n const result = await client.images.generate(toDallE3Request(request));\n return toGenerateResponse(result);\n }\n );\n}\n"],"mappings":";;;;;AAgBA,SAAS,eAAe;AACxB;AAAA,EACE;AAAA,EACA;AAAA,EACA;AAAA,OAIK;AAMP,SAAS,SAAS;AAEX,MAAM,qBAAqB,6BAA6B,OAAO;AAAA,EACpE,MAAM,EAAE,KAAK,CAAC,aAAa,aAAa,WAAW,CAAC,EAAE,SAAS;AAAA,EAC/D,OAAO,EAAE,KAAK,CAAC,SAAS,SAAS,CAAC,EAAE,SAAS;AAAA,EAC7C,MAAM,EAAE,OAAO,EAAE,SAAS;AAAA,EAC1B,SAAS,EAAE,KAAK,CAAC,YAAY,IAAI,CAAC,EAAE,SAAS;AAAA,EAC7C,iBAAiB,EAAE,KAAK,CAAC,YAAY,KAAK,CAAC,EAAE,SAAS;AACxD,CAAC;AAEM,MAAM,SAAS,SAAS;AAAA,EAC7B,MAAM;AAAA,EACN,MAAM;AAAA,IACJ,OAAO;AAAA,IACP,UAAU;AAAA,MACR,OAAO;AAAA,MACP,QAAQ,CAAC,OAAO;AAAA,MAChB,WAAW;AAAA,MACX,YAAY;AAAA,MACZ,OAAO;AAAA,IACT;AAAA,EACF;AAAA,EACA,cAAc;AAChB,CAAC;AAED,SAAS,gBACP,SACqB;AAzDvB;AA0DE,QAAM,UAAU;AAAA,IACd,OAAO;AAAA,IACP,QAAQ,IAAI,QAAQ,QAAQ,SAAS,CAAC,CAAC,EAAE,KAAK;AAAA,IAC9C,GAAG,QAAQ,cAAc;AAAA,IACzB,OAAM,aAAQ,WAAR,mBAAgB;AAAA,IACtB,QAAO,aAAQ,WAAR,mBAAgB;AAAA,IACvB,OAAM,aAAQ,WAAR,mBAAgB;AAAA,IACtB,UAAS,aAAQ,WAAR,mBAAgB;AAAA,IACzB,mBAAiB,aAAQ,WAAR,mBAAgB,oBAAmB;AAAA,EACtD;AACA,aAAW,KAAK,SAAS;AACvB,QAAI,QAAQ,CAAC,MAAM,QAAW;AAC5B,aAAO,QAAQ,CAAC;AAAA,IAClB;AAAA,EACF;AACA,SAAO;AACT;AAEA,SAAS,mBAAmB,QAA8C;AACxE,QAAM,aAAiD,OAAO,KAAK;AAAA,IACjE,CAAC,OAAO,WAAW;AAAA,MACjB;AAAA,MACA,cAAc;AAAA,MACd,QAAQ,EAAE,eAAe,MAAM,eAAe;AAAA,MAC9C,SAAS;AAAA,QACP,MAAM;AAAA,QACN,SAAS;AAAA,UACP;AAAA,YACE,OAAO;AAAA,cACL,aAAa;AAAA,cACb,KAAK,MAAM,OAAO,yBAAyB,MAAM,QAAQ;AAAA,YAC3D;AAAA,UACF;AAAA,QACF;AAAA,MACF;AAAA,IACF;AAAA,EACF;AACA,SAAO,EAAE,WAAW;AACtB;AAEO,SAAS,YACd,QACwC;AACxC,SAAO;AAAA,IACL;AAAA,MACE,MAAM,OAAO;AAAA,OACV,OAAO,OAFZ;AAAA,MAGE,cAAc,OAAO;AAAA,IACvB;AAAA,IACA,CAAO,YAAY;AACjB,YAAM,SAAS,MAAM,OAAO,OAAO,SAAS,gBAAgB,OAAO,CAAC;AACpE,aAAO,mBAAmB,MAAM;AAAA,IAClC;AAAA,EACF;AACF;","names":[]} \ No newline at end of file diff --git a/plugins/openai/lib/embedder-DTnK2FJN.d.ts b/plugins/openai/lib/embedder-DTnK2FJN.d.ts new file mode 100644 index 00000000..5d11c5ab --- /dev/null +++ b/plugins/openai/lib/embedder-DTnK2FJN.d.ts @@ -0,0 +1,157 @@ +import * as _genkit_ai_ai_embedder from '@genkit-ai/ai/embedder'; +import { z } from 'zod'; +import { Plugin } from '@genkit-ai/core'; +import './dalle.js'; +import './gpt.js'; +import './tts.js'; +import './whisper.js'; + +/** + * Copyright 2024 The Fire Company + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +interface PluginOptions { + apiKey?: string; +} +/** + * This module provides an interface to the OpenAI models through the Genkit + * plugin system. It allows users to interact with various models by providing + * an API key and optional configuration. + * + * The main export is the `openai` plugin, which can be configured with an API + * key either directly or through environment variables. It initializes the + * OpenAI client and makes available the models for use. + * + * Exports: + * - gpt4o: Reference to the GPT-4o model. + * - gpt4oMini: Reference to the GPT-4o-mini model. + * - gpt4Turbo: Reference to the GPT-4 Turbo model. + * - gpt4Vision: Reference to the GPT-4 Vision model. + * - gpt4: Reference to the GPT-4 model. + * - gpt35Turbo: Reference to the GPT-3.5 Turbo model. + * - dallE3: Reference to the DALL-E 3 model. + * - tts1: Reference to the Text-to-speech 1 model. + * - tts1Hd: Reference to the Text-to-speech 1 HD model. + * - whisper: Reference to the Whisper model. + * - textEmbedding3Large: Reference to the Text Embedding Large model. + * - textEmbedding3Small: Reference to the Text Embedding Small model. + * - textEmbeddingAda002: Reference to the Ada model. + * - openai: The main plugin function to interact with OpenAI. + * + * Usage: + * To use the models, initialize the openai plugin inside `configureGenkit` and + * pass the configuration options. If no API key is provided in the options, the + * environment variable `OPENAI_API_KEY` must be set. + * + * Example: + * ``` + * import openai from 'genkitx-openai'; + * + * export default configureGenkit({ + * plugins: [ + * openai({ apiKey: 'your-api-key' }) + * ... // other plugins + * ] + * }); + * ``` + */ +declare const openAI: Plugin<[PluginOptions] | []>; + +declare const TextEmbeddingConfigSchema: z.ZodObject<{ + dimensions: z.ZodOptional; + encodingFormat: z.ZodOptional, z.ZodLiteral<"base64">]>>; +}, "strip", z.ZodTypeAny, { + dimensions?: number | undefined; + encodingFormat?: "float" | "base64" | undefined; +}, { + dimensions?: number | undefined; + encodingFormat?: "float" | "base64" | undefined; +}>; +type TextEmbeddingGeckoConfig = z.infer; +declare const TextEmbeddingInputSchema: z.ZodString; +declare const textEmbedding3Small: _genkit_ai_ai_embedder.EmbedderReference; + encodingFormat: z.ZodOptional, z.ZodLiteral<"base64">]>>; +}, "strip", z.ZodTypeAny, { + dimensions?: number | undefined; + encodingFormat?: "float" | "base64" | undefined; +}, { + dimensions?: number | undefined; + encodingFormat?: "float" | "base64" | undefined; +}>>; +declare const textEmbedding3Large: _genkit_ai_ai_embedder.EmbedderReference; + encodingFormat: z.ZodOptional, z.ZodLiteral<"base64">]>>; +}, "strip", z.ZodTypeAny, { + dimensions?: number | undefined; + encodingFormat?: "float" | "base64" | undefined; +}, { + dimensions?: number | undefined; + encodingFormat?: "float" | "base64" | undefined; +}>>; +declare const textEmbeddingAda002: _genkit_ai_ai_embedder.EmbedderReference; + encodingFormat: z.ZodOptional, z.ZodLiteral<"base64">]>>; +}, "strip", z.ZodTypeAny, { + dimensions?: number | undefined; + encodingFormat?: "float" | "base64" | undefined; +}, { + dimensions?: number | undefined; + encodingFormat?: "float" | "base64" | undefined; +}>>; +declare const SUPPORTED_EMBEDDING_MODELS: { + 'text-embedding-3-small': _genkit_ai_ai_embedder.EmbedderReference; + encodingFormat: z.ZodOptional, z.ZodLiteral<"base64">]>>; + }, "strip", z.ZodTypeAny, { + dimensions?: number | undefined; + encodingFormat?: "float" | "base64" | undefined; + }, { + dimensions?: number | undefined; + encodingFormat?: "float" | "base64" | undefined; + }>>; + 'text-embedding-3-large': _genkit_ai_ai_embedder.EmbedderReference; + encodingFormat: z.ZodOptional, z.ZodLiteral<"base64">]>>; + }, "strip", z.ZodTypeAny, { + dimensions?: number | undefined; + encodingFormat?: "float" | "base64" | undefined; + }, { + dimensions?: number | undefined; + encodingFormat?: "float" | "base64" | undefined; + }>>; + 'text-embedding-ada-002': _genkit_ai_ai_embedder.EmbedderReference; + encodingFormat: z.ZodOptional, z.ZodLiteral<"base64">]>>; + }, "strip", z.ZodTypeAny, { + dimensions?: number | undefined; + encodingFormat?: "float" | "base64" | undefined; + }, { + dimensions?: number | undefined; + encodingFormat?: "float" | "base64" | undefined; + }>>; +}; +declare function openaiEmbedder(name: string, options?: PluginOptions): _genkit_ai_ai_embedder.EmbedderAction; + encodingFormat: z.ZodOptional, z.ZodLiteral<"base64">]>>; +}, "strip", z.ZodTypeAny, { + dimensions?: number | undefined; + encodingFormat?: "float" | "base64" | undefined; +}, { + dimensions?: number | undefined; + encodingFormat?: "float" | "base64" | undefined; +}>>; + +export { type PluginOptions as P, SUPPORTED_EMBEDDING_MODELS as S, TextEmbeddingConfigSchema as T, textEmbedding3Small as a, textEmbeddingAda002 as b, type TextEmbeddingGeckoConfig as c, TextEmbeddingInputSchema as d, openaiEmbedder as e, openAI as o, textEmbedding3Large as t }; diff --git a/plugins/openai/lib/embedder-DZYwphxr.d.mts b/plugins/openai/lib/embedder-DZYwphxr.d.mts new file mode 100644 index 00000000..a1f2859f --- /dev/null +++ b/plugins/openai/lib/embedder-DZYwphxr.d.mts @@ -0,0 +1,157 @@ +import * as _genkit_ai_ai_embedder from '@genkit-ai/ai/embedder'; +import { z } from 'zod'; +import { Plugin } from '@genkit-ai/core'; +import './dalle.mjs'; +import './gpt.mjs'; +import './tts.mjs'; +import './whisper.mjs'; + +/** + * Copyright 2024 The Fire Company + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +interface PluginOptions { + apiKey?: string; +} +/** + * This module provides an interface to the OpenAI models through the Genkit + * plugin system. It allows users to interact with various models by providing + * an API key and optional configuration. + * + * The main export is the `openai` plugin, which can be configured with an API + * key either directly or through environment variables. It initializes the + * OpenAI client and makes available the models for use. + * + * Exports: + * - gpt4o: Reference to the GPT-4o model. + * - gpt4oMini: Reference to the GPT-4o-mini model. + * - gpt4Turbo: Reference to the GPT-4 Turbo model. + * - gpt4Vision: Reference to the GPT-4 Vision model. + * - gpt4: Reference to the GPT-4 model. + * - gpt35Turbo: Reference to the GPT-3.5 Turbo model. + * - dallE3: Reference to the DALL-E 3 model. + * - tts1: Reference to the Text-to-speech 1 model. + * - tts1Hd: Reference to the Text-to-speech 1 HD model. + * - whisper: Reference to the Whisper model. + * - textEmbedding3Large: Reference to the Text Embedding Large model. + * - textEmbedding3Small: Reference to the Text Embedding Small model. + * - textEmbeddingAda002: Reference to the Ada model. + * - openai: The main plugin function to interact with OpenAI. + * + * Usage: + * To use the models, initialize the openai plugin inside `configureGenkit` and + * pass the configuration options. If no API key is provided in the options, the + * environment variable `OPENAI_API_KEY` must be set. + * + * Example: + * ``` + * import openai from 'genkitx-openai'; + * + * export default configureGenkit({ + * plugins: [ + * openai({ apiKey: 'your-api-key' }) + * ... // other plugins + * ] + * }); + * ``` + */ +declare const openAI: Plugin<[PluginOptions] | []>; + +declare const TextEmbeddingConfigSchema: z.ZodObject<{ + dimensions: z.ZodOptional; + encodingFormat: z.ZodOptional, z.ZodLiteral<"base64">]>>; +}, "strip", z.ZodTypeAny, { + dimensions?: number | undefined; + encodingFormat?: "float" | "base64" | undefined; +}, { + dimensions?: number | undefined; + encodingFormat?: "float" | "base64" | undefined; +}>; +type TextEmbeddingGeckoConfig = z.infer; +declare const TextEmbeddingInputSchema: z.ZodString; +declare const textEmbedding3Small: _genkit_ai_ai_embedder.EmbedderReference; + encodingFormat: z.ZodOptional, z.ZodLiteral<"base64">]>>; +}, "strip", z.ZodTypeAny, { + dimensions?: number | undefined; + encodingFormat?: "float" | "base64" | undefined; +}, { + dimensions?: number | undefined; + encodingFormat?: "float" | "base64" | undefined; +}>>; +declare const textEmbedding3Large: _genkit_ai_ai_embedder.EmbedderReference; + encodingFormat: z.ZodOptional, z.ZodLiteral<"base64">]>>; +}, "strip", z.ZodTypeAny, { + dimensions?: number | undefined; + encodingFormat?: "float" | "base64" | undefined; +}, { + dimensions?: number | undefined; + encodingFormat?: "float" | "base64" | undefined; +}>>; +declare const textEmbeddingAda002: _genkit_ai_ai_embedder.EmbedderReference; + encodingFormat: z.ZodOptional, z.ZodLiteral<"base64">]>>; +}, "strip", z.ZodTypeAny, { + dimensions?: number | undefined; + encodingFormat?: "float" | "base64" | undefined; +}, { + dimensions?: number | undefined; + encodingFormat?: "float" | "base64" | undefined; +}>>; +declare const SUPPORTED_EMBEDDING_MODELS: { + 'text-embedding-3-small': _genkit_ai_ai_embedder.EmbedderReference; + encodingFormat: z.ZodOptional, z.ZodLiteral<"base64">]>>; + }, "strip", z.ZodTypeAny, { + dimensions?: number | undefined; + encodingFormat?: "float" | "base64" | undefined; + }, { + dimensions?: number | undefined; + encodingFormat?: "float" | "base64" | undefined; + }>>; + 'text-embedding-3-large': _genkit_ai_ai_embedder.EmbedderReference; + encodingFormat: z.ZodOptional, z.ZodLiteral<"base64">]>>; + }, "strip", z.ZodTypeAny, { + dimensions?: number | undefined; + encodingFormat?: "float" | "base64" | undefined; + }, { + dimensions?: number | undefined; + encodingFormat?: "float" | "base64" | undefined; + }>>; + 'text-embedding-ada-002': _genkit_ai_ai_embedder.EmbedderReference; + encodingFormat: z.ZodOptional, z.ZodLiteral<"base64">]>>; + }, "strip", z.ZodTypeAny, { + dimensions?: number | undefined; + encodingFormat?: "float" | "base64" | undefined; + }, { + dimensions?: number | undefined; + encodingFormat?: "float" | "base64" | undefined; + }>>; +}; +declare function openaiEmbedder(name: string, options?: PluginOptions): _genkit_ai_ai_embedder.EmbedderAction; + encodingFormat: z.ZodOptional, z.ZodLiteral<"base64">]>>; +}, "strip", z.ZodTypeAny, { + dimensions?: number | undefined; + encodingFormat?: "float" | "base64" | undefined; +}, { + dimensions?: number | undefined; + encodingFormat?: "float" | "base64" | undefined; +}>>; + +export { type PluginOptions as P, SUPPORTED_EMBEDDING_MODELS as S, TextEmbeddingConfigSchema as T, textEmbedding3Small as a, textEmbeddingAda002 as b, type TextEmbeddingGeckoConfig as c, TextEmbeddingInputSchema as d, openaiEmbedder as e, openAI as o, textEmbedding3Large as t }; diff --git a/plugins/openai/lib/embedder.d.mts b/plugins/openai/lib/embedder.d.mts new file mode 100644 index 00000000..631b740d --- /dev/null +++ b/plugins/openai/lib/embedder.d.mts @@ -0,0 +1,11 @@ +import '@genkit-ai/ai/embedder'; +import 'zod'; +export { S as SUPPORTED_EMBEDDING_MODELS, T as TextEmbeddingConfigSchema, c as TextEmbeddingGeckoConfig, d as TextEmbeddingInputSchema, e as openaiEmbedder, t as textEmbedding3Large, a as textEmbedding3Small, b as textEmbeddingAda002 } from './embedder-DZYwphxr.mjs'; +import '@genkit-ai/core'; +import './dalle.mjs'; +import '@genkit-ai/ai/model'; +import 'openai'; +import './gpt.mjs'; +import 'openai/resources/index.mjs'; +import './tts.mjs'; +import './whisper.mjs'; diff --git a/plugins/openai/lib/embedder.d.ts b/plugins/openai/lib/embedder.d.ts new file mode 100644 index 00000000..eae164f7 --- /dev/null +++ b/plugins/openai/lib/embedder.d.ts @@ -0,0 +1,11 @@ +import '@genkit-ai/ai/embedder'; +import 'zod'; +export { S as SUPPORTED_EMBEDDING_MODELS, T as TextEmbeddingConfigSchema, c as TextEmbeddingGeckoConfig, d as TextEmbeddingInputSchema, e as openaiEmbedder, t as textEmbedding3Large, a as textEmbedding3Small, b as textEmbeddingAda002 } from './embedder-DTnK2FJN.js'; +import '@genkit-ai/core'; +import './dalle.js'; +import '@genkit-ai/ai/model'; +import 'openai'; +import './gpt.js'; +import 'openai/resources/index.mjs'; +import './tts.js'; +import './whisper.js'; diff --git a/plugins/openai/lib/embedder.js b/plugins/openai/lib/embedder.js new file mode 100644 index 00000000..c5715a6d --- /dev/null +++ b/plugins/openai/lib/embedder.js @@ -0,0 +1,145 @@ +"use strict"; +var __create = Object.create; +var __defProp = Object.defineProperty; +var __getOwnPropDesc = Object.getOwnPropertyDescriptor; +var __getOwnPropNames = Object.getOwnPropertyNames; +var __getProtoOf = Object.getPrototypeOf; +var __hasOwnProp = Object.prototype.hasOwnProperty; +var __export = (target, all) => { + for (var name in all) + __defProp(target, name, { get: all[name], enumerable: true }); +}; +var __copyProps = (to, from, except, desc) => { + if (from && typeof from === "object" || typeof from === "function") { + for (let key of __getOwnPropNames(from)) + if (!__hasOwnProp.call(to, key) && key !== except) + __defProp(to, key, { get: () => from[key], enumerable: !(desc = __getOwnPropDesc(from, key)) || desc.enumerable }); + } + return to; +}; +var __toESM = (mod, isNodeMode, target) => (target = mod != null ? __create(__getProtoOf(mod)) : {}, __copyProps( + // If the importer is in node compatibility mode or this is not an ESM + // file that has been converted to a CommonJS file using a Babel- + // compatible transform (i.e. "__esModule" has not been set), then set + // "default" to the CommonJS "module.exports" for node compatibility. + isNodeMode || !mod || !mod.__esModule ? __defProp(target, "default", { value: mod, enumerable: true }) : target, + mod +)); +var __toCommonJS = (mod) => __copyProps(__defProp({}, "__esModule", { value: true }), mod); +var __async = (__this, __arguments, generator) => { + return new Promise((resolve, reject) => { + var fulfilled = (value) => { + try { + step(generator.next(value)); + } catch (e) { + reject(e); + } + }; + var rejected = (value) => { + try { + step(generator.throw(value)); + } catch (e) { + reject(e); + } + }; + var step = (x) => x.done ? resolve(x.value) : Promise.resolve(x.value).then(fulfilled, rejected); + step((generator = generator.apply(__this, __arguments)).next()); + }); +}; +var embedder_exports = {}; +__export(embedder_exports, { + SUPPORTED_EMBEDDING_MODELS: () => SUPPORTED_EMBEDDING_MODELS, + TextEmbeddingConfigSchema: () => TextEmbeddingConfigSchema, + TextEmbeddingInputSchema: () => TextEmbeddingInputSchema, + openaiEmbedder: () => openaiEmbedder, + textEmbedding3Large: () => textEmbedding3Large, + textEmbedding3Small: () => textEmbedding3Small, + textEmbeddingAda002: () => textEmbeddingAda002 +}); +module.exports = __toCommonJS(embedder_exports); +var import_embedder = require("@genkit-ai/ai/embedder"); +var import_openai = __toESM(require("openai")); +var import_zod = require("zod"); +const TextEmbeddingConfigSchema = import_zod.z.object({ + dimensions: import_zod.z.number().optional(), + encodingFormat: import_zod.z.union([import_zod.z.literal("float"), import_zod.z.literal("base64")]).optional() +}); +const TextEmbeddingInputSchema = import_zod.z.string(); +const textEmbedding3Small = (0, import_embedder.embedderRef)({ + name: "openai/text-embedding-3-small", + configSchema: TextEmbeddingConfigSchema, + info: { + dimensions: 1536, + label: "Open AI - Text Embedding 3 Small", + supports: { + input: ["text"] + } + } +}); +const textEmbedding3Large = (0, import_embedder.embedderRef)({ + name: "openai/text-embedding-3-large", + configSchema: TextEmbeddingConfigSchema, + info: { + dimensions: 3072, + label: "Open AI - Text Embedding 3 Large", + supports: { + input: ["text"] + } + } +}); +const textEmbeddingAda002 = (0, import_embedder.embedderRef)({ + name: "openai/text-embedding-ada-002", + configSchema: TextEmbeddingConfigSchema, + info: { + dimensions: 1536, + label: "Open AI - Text Embedding ADA 002", + supports: { + input: ["text"] + } + } +}); +const SUPPORTED_EMBEDDING_MODELS = { + "text-embedding-3-small": textEmbedding3Small, + "text-embedding-3-large": textEmbedding3Large, + "text-embedding-ada-002": textEmbeddingAda002 +}; +function openaiEmbedder(name, options) { + let apiKey = (options == null ? void 0 : options.apiKey) || process.env.OPENAI_API_KEY; + if (!apiKey) + throw new Error( + "please pass in the API key or set the OPENAI_API_KEY environment variable" + ); + const model = SUPPORTED_EMBEDDING_MODELS[name]; + if (!model) + throw new Error(`Unsupported model: ${name}`); + const client = new import_openai.default({ apiKey }); + return (0, import_embedder.defineEmbedder)( + { + info: model.info, + configSchema: TextEmbeddingConfigSchema, + name: model.name + }, + (input, options2) => __async(this, null, function* () { + const embeddings = yield client.embeddings.create({ + model: name, + input: input.map((d) => d.text()), + dimensions: options2 == null ? void 0 : options2.dimensions, + encoding_format: options2 == null ? void 0 : options2.encodingFormat + }); + return { + embeddings: embeddings.data.map((d) => ({ embedding: d.embedding })) + }; + }) + ); +} +// Annotate the CommonJS export names for ESM import in node: +0 && (module.exports = { + SUPPORTED_EMBEDDING_MODELS, + TextEmbeddingConfigSchema, + TextEmbeddingInputSchema, + openaiEmbedder, + textEmbedding3Large, + textEmbedding3Small, + textEmbeddingAda002 +}); +//# sourceMappingURL=embedder.js.map \ No newline at end of file diff --git a/plugins/openai/lib/embedder.js.map b/plugins/openai/lib/embedder.js.map new file mode 100644 index 00000000..fd053bab --- /dev/null +++ b/plugins/openai/lib/embedder.js.map @@ -0,0 +1 @@ +{"version":3,"sources":["../src/embedder.ts"],"sourcesContent":["/**\n * Copyright 2024 The Fire Company\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\nimport { defineEmbedder, embedderRef } from '@genkit-ai/ai/embedder';\nimport OpenAI from 'openai';\nimport { z } from 'zod';\nimport { type PluginOptions } from './index.js';\n\nexport const TextEmbeddingConfigSchema = z.object({\n dimensions: z.number().optional(),\n encodingFormat: z.union([z.literal('float'), z.literal('base64')]).optional(),\n});\n\nexport type TextEmbeddingGeckoConfig = z.infer<\n typeof TextEmbeddingConfigSchema\n>;\n\nexport const TextEmbeddingInputSchema = z.string();\n\nexport const textEmbedding3Small = embedderRef({\n name: 'openai/text-embedding-3-small',\n configSchema: TextEmbeddingConfigSchema,\n info: {\n dimensions: 1536,\n label: 'Open AI - Text Embedding 3 Small',\n supports: {\n input: ['text'],\n },\n },\n});\n\nexport const textEmbedding3Large = embedderRef({\n name: 'openai/text-embedding-3-large',\n configSchema: TextEmbeddingConfigSchema,\n info: {\n dimensions: 3072,\n label: 'Open AI - Text Embedding 3 Large',\n supports: {\n input: ['text'],\n },\n },\n});\n\nexport const textEmbeddingAda002 = embedderRef({\n name: 'openai/text-embedding-ada-002',\n configSchema: TextEmbeddingConfigSchema,\n info: {\n dimensions: 1536,\n label: 'Open AI - Text Embedding ADA 002',\n supports: {\n input: ['text'],\n },\n },\n});\n\nexport const SUPPORTED_EMBEDDING_MODELS = {\n 'text-embedding-3-small': textEmbedding3Small,\n 'text-embedding-3-large': textEmbedding3Large,\n 'text-embedding-ada-002': textEmbeddingAda002,\n};\n\nexport function openaiEmbedder(name: string, options?: PluginOptions) {\n let apiKey = options?.apiKey || process.env.OPENAI_API_KEY;\n if (!apiKey)\n throw new Error(\n 'please pass in the API key or set the OPENAI_API_KEY environment variable'\n );\n const model = SUPPORTED_EMBEDDING_MODELS[name];\n if (!model) throw new Error(`Unsupported model: ${name}`);\n\n const client = new OpenAI({ apiKey });\n return defineEmbedder(\n {\n info: model.info!,\n configSchema: TextEmbeddingConfigSchema,\n name: model.name,\n },\n async (input, options) => {\n const embeddings = await client.embeddings.create({\n model: name,\n input: input.map((d) => d.text()),\n dimensions: options?.dimensions,\n encoding_format: options?.encodingFormat,\n });\n return {\n embeddings: embeddings.data.map((d) => ({ embedding: d.embedding })),\n };\n }\n );\n}\n"],"mappings":";;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAgBA,sBAA4C;AAC5C,oBAAmB;AACnB,iBAAkB;AAGX,MAAM,4BAA4B,aAAE,OAAO;AAAA,EAChD,YAAY,aAAE,OAAO,EAAE,SAAS;AAAA,EAChC,gBAAgB,aAAE,MAAM,CAAC,aAAE,QAAQ,OAAO,GAAG,aAAE,QAAQ,QAAQ,CAAC,CAAC,EAAE,SAAS;AAC9E,CAAC;AAMM,MAAM,2BAA2B,aAAE,OAAO;AAE1C,MAAM,0BAAsB,6BAAY;AAAA,EAC7C,MAAM;AAAA,EACN,cAAc;AAAA,EACd,MAAM;AAAA,IACJ,YAAY;AAAA,IACZ,OAAO;AAAA,IACP,UAAU;AAAA,MACR,OAAO,CAAC,MAAM;AAAA,IAChB;AAAA,EACF;AACF,CAAC;AAEM,MAAM,0BAAsB,6BAAY;AAAA,EAC7C,MAAM;AAAA,EACN,cAAc;AAAA,EACd,MAAM;AAAA,IACJ,YAAY;AAAA,IACZ,OAAO;AAAA,IACP,UAAU;AAAA,MACR,OAAO,CAAC,MAAM;AAAA,IAChB;AAAA,EACF;AACF,CAAC;AAEM,MAAM,0BAAsB,6BAAY;AAAA,EAC7C,MAAM;AAAA,EACN,cAAc;AAAA,EACd,MAAM;AAAA,IACJ,YAAY;AAAA,IACZ,OAAO;AAAA,IACP,UAAU;AAAA,MACR,OAAO,CAAC,MAAM;AAAA,IAChB;AAAA,EACF;AACF,CAAC;AAEM,MAAM,6BAA6B;AAAA,EACxC,0BAA0B;AAAA,EAC1B,0BAA0B;AAAA,EAC1B,0BAA0B;AAC5B;AAEO,SAAS,eAAe,MAAc,SAAyB;AACpE,MAAI,UAAS,mCAAS,WAAU,QAAQ,IAAI;AAC5C,MAAI,CAAC;AACH,UAAM,IAAI;AAAA,MACR;AAAA,IACF;AACF,QAAM,QAAQ,2BAA2B,IAAI;AAC7C,MAAI,CAAC;AAAO,UAAM,IAAI,MAAM,sBAAsB,IAAI,EAAE;AAExD,QAAM,SAAS,IAAI,cAAAA,QAAO,EAAE,OAAO,CAAC;AACpC,aAAO;AAAA,IACL;AAAA,MACE,MAAM,MAAM;AAAA,MACZ,cAAc;AAAA,MACd,MAAM,MAAM;AAAA,IACd;AAAA,IACA,CAAO,OAAOC,aAAY;AACxB,YAAM,aAAa,MAAM,OAAO,WAAW,OAAO;AAAA,QAChD,OAAO;AAAA,QACP,OAAO,MAAM,IAAI,CAAC,MAAM,EAAE,KAAK,CAAC;AAAA,QAChC,YAAYA,YAAA,gBAAAA,SAAS;AAAA,QACrB,iBAAiBA,YAAA,gBAAAA,SAAS;AAAA,MAC5B,CAAC;AACD,aAAO;AAAA,QACL,YAAY,WAAW,KAAK,IAAI,CAAC,OAAO,EAAE,WAAW,EAAE,UAAU,EAAE;AAAA,MACrE;AAAA,IACF;AAAA,EACF;AACF;","names":["OpenAI","options"]} \ No newline at end of file diff --git a/plugins/openai/lib/embedder.mjs b/plugins/openai/lib/embedder.mjs new file mode 100644 index 00000000..3d096d69 --- /dev/null +++ b/plugins/openai/lib/embedder.mjs @@ -0,0 +1,88 @@ +import { + __async +} from "./chunk-WFI2LP4G.mjs"; +import { defineEmbedder, embedderRef } from "@genkit-ai/ai/embedder"; +import OpenAI from "openai"; +import { z } from "zod"; +const TextEmbeddingConfigSchema = z.object({ + dimensions: z.number().optional(), + encodingFormat: z.union([z.literal("float"), z.literal("base64")]).optional() +}); +const TextEmbeddingInputSchema = z.string(); +const textEmbedding3Small = embedderRef({ + name: "openai/text-embedding-3-small", + configSchema: TextEmbeddingConfigSchema, + info: { + dimensions: 1536, + label: "Open AI - Text Embedding 3 Small", + supports: { + input: ["text"] + } + } +}); +const textEmbedding3Large = embedderRef({ + name: "openai/text-embedding-3-large", + configSchema: TextEmbeddingConfigSchema, + info: { + dimensions: 3072, + label: "Open AI - Text Embedding 3 Large", + supports: { + input: ["text"] + } + } +}); +const textEmbeddingAda002 = embedderRef({ + name: "openai/text-embedding-ada-002", + configSchema: TextEmbeddingConfigSchema, + info: { + dimensions: 1536, + label: "Open AI - Text Embedding ADA 002", + supports: { + input: ["text"] + } + } +}); +const SUPPORTED_EMBEDDING_MODELS = { + "text-embedding-3-small": textEmbedding3Small, + "text-embedding-3-large": textEmbedding3Large, + "text-embedding-ada-002": textEmbeddingAda002 +}; +function openaiEmbedder(name, options) { + let apiKey = (options == null ? void 0 : options.apiKey) || process.env.OPENAI_API_KEY; + if (!apiKey) + throw new Error( + "please pass in the API key or set the OPENAI_API_KEY environment variable" + ); + const model = SUPPORTED_EMBEDDING_MODELS[name]; + if (!model) + throw new Error(`Unsupported model: ${name}`); + const client = new OpenAI({ apiKey }); + return defineEmbedder( + { + info: model.info, + configSchema: TextEmbeddingConfigSchema, + name: model.name + }, + (input, options2) => __async(this, null, function* () { + const embeddings = yield client.embeddings.create({ + model: name, + input: input.map((d) => d.text()), + dimensions: options2 == null ? void 0 : options2.dimensions, + encoding_format: options2 == null ? void 0 : options2.encodingFormat + }); + return { + embeddings: embeddings.data.map((d) => ({ embedding: d.embedding })) + }; + }) + ); +} +export { + SUPPORTED_EMBEDDING_MODELS, + TextEmbeddingConfigSchema, + TextEmbeddingInputSchema, + openaiEmbedder, + textEmbedding3Large, + textEmbedding3Small, + textEmbeddingAda002 +}; +//# sourceMappingURL=embedder.mjs.map \ No newline at end of file diff --git a/plugins/openai/lib/embedder.mjs.map b/plugins/openai/lib/embedder.mjs.map new file mode 100644 index 00000000..a41af74f --- /dev/null +++ b/plugins/openai/lib/embedder.mjs.map @@ -0,0 +1 @@ +{"version":3,"sources":["../src/embedder.ts"],"sourcesContent":["/**\n * Copyright 2024 The Fire Company\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\nimport { defineEmbedder, embedderRef } from '@genkit-ai/ai/embedder';\nimport OpenAI from 'openai';\nimport { z } from 'zod';\nimport { type PluginOptions } from './index.js';\n\nexport const TextEmbeddingConfigSchema = z.object({\n dimensions: z.number().optional(),\n encodingFormat: z.union([z.literal('float'), z.literal('base64')]).optional(),\n});\n\nexport type TextEmbeddingGeckoConfig = z.infer<\n typeof TextEmbeddingConfigSchema\n>;\n\nexport const TextEmbeddingInputSchema = z.string();\n\nexport const textEmbedding3Small = embedderRef({\n name: 'openai/text-embedding-3-small',\n configSchema: TextEmbeddingConfigSchema,\n info: {\n dimensions: 1536,\n label: 'Open AI - Text Embedding 3 Small',\n supports: {\n input: ['text'],\n },\n },\n});\n\nexport const textEmbedding3Large = embedderRef({\n name: 'openai/text-embedding-3-large',\n configSchema: TextEmbeddingConfigSchema,\n info: {\n dimensions: 3072,\n label: 'Open AI - Text Embedding 3 Large',\n supports: {\n input: ['text'],\n },\n },\n});\n\nexport const textEmbeddingAda002 = embedderRef({\n name: 'openai/text-embedding-ada-002',\n configSchema: TextEmbeddingConfigSchema,\n info: {\n dimensions: 1536,\n label: 'Open AI - Text Embedding ADA 002',\n supports: {\n input: ['text'],\n },\n },\n});\n\nexport const SUPPORTED_EMBEDDING_MODELS = {\n 'text-embedding-3-small': textEmbedding3Small,\n 'text-embedding-3-large': textEmbedding3Large,\n 'text-embedding-ada-002': textEmbeddingAda002,\n};\n\nexport function openaiEmbedder(name: string, options?: PluginOptions) {\n let apiKey = options?.apiKey || process.env.OPENAI_API_KEY;\n if (!apiKey)\n throw new Error(\n 'please pass in the API key or set the OPENAI_API_KEY environment variable'\n );\n const model = SUPPORTED_EMBEDDING_MODELS[name];\n if (!model) throw new Error(`Unsupported model: ${name}`);\n\n const client = new OpenAI({ apiKey });\n return defineEmbedder(\n {\n info: model.info!,\n configSchema: TextEmbeddingConfigSchema,\n name: model.name,\n },\n async (input, options) => {\n const embeddings = await client.embeddings.create({\n model: name,\n input: input.map((d) => d.text()),\n dimensions: options?.dimensions,\n encoding_format: options?.encodingFormat,\n });\n return {\n embeddings: embeddings.data.map((d) => ({ embedding: d.embedding })),\n };\n }\n );\n}\n"],"mappings":";;;AAgBA,SAAS,gBAAgB,mBAAmB;AAC5C,OAAO,YAAY;AACnB,SAAS,SAAS;AAGX,MAAM,4BAA4B,EAAE,OAAO;AAAA,EAChD,YAAY,EAAE,OAAO,EAAE,SAAS;AAAA,EAChC,gBAAgB,EAAE,MAAM,CAAC,EAAE,QAAQ,OAAO,GAAG,EAAE,QAAQ,QAAQ,CAAC,CAAC,EAAE,SAAS;AAC9E,CAAC;AAMM,MAAM,2BAA2B,EAAE,OAAO;AAE1C,MAAM,sBAAsB,YAAY;AAAA,EAC7C,MAAM;AAAA,EACN,cAAc;AAAA,EACd,MAAM;AAAA,IACJ,YAAY;AAAA,IACZ,OAAO;AAAA,IACP,UAAU;AAAA,MACR,OAAO,CAAC,MAAM;AAAA,IAChB;AAAA,EACF;AACF,CAAC;AAEM,MAAM,sBAAsB,YAAY;AAAA,EAC7C,MAAM;AAAA,EACN,cAAc;AAAA,EACd,MAAM;AAAA,IACJ,YAAY;AAAA,IACZ,OAAO;AAAA,IACP,UAAU;AAAA,MACR,OAAO,CAAC,MAAM;AAAA,IAChB;AAAA,EACF;AACF,CAAC;AAEM,MAAM,sBAAsB,YAAY;AAAA,EAC7C,MAAM;AAAA,EACN,cAAc;AAAA,EACd,MAAM;AAAA,IACJ,YAAY;AAAA,IACZ,OAAO;AAAA,IACP,UAAU;AAAA,MACR,OAAO,CAAC,MAAM;AAAA,IAChB;AAAA,EACF;AACF,CAAC;AAEM,MAAM,6BAA6B;AAAA,EACxC,0BAA0B;AAAA,EAC1B,0BAA0B;AAAA,EAC1B,0BAA0B;AAC5B;AAEO,SAAS,eAAe,MAAc,SAAyB;AACpE,MAAI,UAAS,mCAAS,WAAU,QAAQ,IAAI;AAC5C,MAAI,CAAC;AACH,UAAM,IAAI;AAAA,MACR;AAAA,IACF;AACF,QAAM,QAAQ,2BAA2B,IAAI;AAC7C,MAAI,CAAC;AAAO,UAAM,IAAI,MAAM,sBAAsB,IAAI,EAAE;AAExD,QAAM,SAAS,IAAI,OAAO,EAAE,OAAO,CAAC;AACpC,SAAO;AAAA,IACL;AAAA,MACE,MAAM,MAAM;AAAA,MACZ,cAAc;AAAA,MACd,MAAM,MAAM;AAAA,IACd;AAAA,IACA,CAAO,OAAOA,aAAY;AACxB,YAAM,aAAa,MAAM,OAAO,WAAW,OAAO;AAAA,QAChD,OAAO;AAAA,QACP,OAAO,MAAM,IAAI,CAAC,MAAM,EAAE,KAAK,CAAC;AAAA,QAChC,YAAYA,YAAA,gBAAAA,SAAS;AAAA,QACrB,iBAAiBA,YAAA,gBAAAA,SAAS;AAAA,MAC5B,CAAC;AACD,aAAO;AAAA,QACL,YAAY,WAAW,KAAK,IAAI,CAAC,OAAO,EAAE,WAAW,EAAE,UAAU,EAAE;AAAA,MACrE;AAAA,IACF;AAAA,EACF;AACF;","names":["options"]} \ No newline at end of file diff --git a/plugins/openai/lib/gpt.d.mts b/plugins/openai/lib/gpt.d.mts new file mode 100644 index 00000000..09239bfa --- /dev/null +++ b/plugins/openai/lib/gpt.d.mts @@ -0,0 +1,682 @@ +import * as _genkit_ai_ai_model from '@genkit-ai/ai/model'; +import { Role, Part, MessageData, ToolRequestPart, CandidateData, GenerateRequest, GenerateResponseChunkData, GenerateResponseData, ModelAction } from '@genkit-ai/ai/model'; +import { StreamingCallback } from '@genkit-ai/core'; +import OpenAI from 'openai'; +import { ChatCompletionRole, ChatCompletionContentPart, ChatCompletionMessageParam, ChatCompletionMessageToolCall, ChatCompletionChunk, ChatCompletion } from 'openai/resources/index.mjs'; +import z from 'zod'; + +declare const OpenAiConfigSchema: z.ZodObject; + temperature: z.ZodOptional; + maxOutputTokens: z.ZodOptional; + topK: z.ZodOptional; + topP: z.ZodOptional; + stopSequences: z.ZodOptional>; +}, { + frequencyPenalty: z.ZodOptional; + logitBias: z.ZodOptional>; + logProbs: z.ZodOptional; + presencePenalty: z.ZodOptional; + seed: z.ZodOptional; + topLogProbs: z.ZodOptional; + user: z.ZodOptional; + visualDetailLevel: z.ZodOptional>; +}>, "strip", z.ZodTypeAny, { + user?: string | undefined; + version?: string | undefined; + temperature?: number | undefined; + maxOutputTokens?: number | undefined; + topK?: number | undefined; + topP?: number | undefined; + stopSequences?: string[] | undefined; + frequencyPenalty?: number | undefined; + logitBias?: Record | undefined; + logProbs?: boolean | undefined; + presencePenalty?: number | undefined; + seed?: number | undefined; + topLogProbs?: number | undefined; + visualDetailLevel?: "auto" | "low" | "high" | undefined; +}, { + user?: string | undefined; + version?: string | undefined; + temperature?: number | undefined; + maxOutputTokens?: number | undefined; + topK?: number | undefined; + topP?: number | undefined; + stopSequences?: string[] | undefined; + frequencyPenalty?: number | undefined; + logitBias?: Record | undefined; + logProbs?: boolean | undefined; + presencePenalty?: number | undefined; + seed?: number | undefined; + topLogProbs?: number | undefined; + visualDetailLevel?: "auto" | "low" | "high" | undefined; +}>; +type VisualDetailLevel = z.infer['visualDetailLevel']; +declare const gpt4o: _genkit_ai_ai_model.ModelReference; + temperature: z.ZodOptional; + maxOutputTokens: z.ZodOptional; + topK: z.ZodOptional; + topP: z.ZodOptional; + stopSequences: z.ZodOptional>; +}, { + frequencyPenalty: z.ZodOptional; + logitBias: z.ZodOptional>; + logProbs: z.ZodOptional; + presencePenalty: z.ZodOptional; + seed: z.ZodOptional; + topLogProbs: z.ZodOptional; + user: z.ZodOptional; + visualDetailLevel: z.ZodOptional>; +}>, "strip", z.ZodTypeAny, { + user?: string | undefined; + version?: string | undefined; + temperature?: number | undefined; + maxOutputTokens?: number | undefined; + topK?: number | undefined; + topP?: number | undefined; + stopSequences?: string[] | undefined; + frequencyPenalty?: number | undefined; + logitBias?: Record | undefined; + logProbs?: boolean | undefined; + presencePenalty?: number | undefined; + seed?: number | undefined; + topLogProbs?: number | undefined; + visualDetailLevel?: "auto" | "low" | "high" | undefined; +}, { + user?: string | undefined; + version?: string | undefined; + temperature?: number | undefined; + maxOutputTokens?: number | undefined; + topK?: number | undefined; + topP?: number | undefined; + stopSequences?: string[] | undefined; + frequencyPenalty?: number | undefined; + logitBias?: Record | undefined; + logProbs?: boolean | undefined; + presencePenalty?: number | undefined; + seed?: number | undefined; + topLogProbs?: number | undefined; + visualDetailLevel?: "auto" | "low" | "high" | undefined; +}>>; +declare const gpt4oMini: _genkit_ai_ai_model.ModelReference; + temperature: z.ZodOptional; + maxOutputTokens: z.ZodOptional; + topK: z.ZodOptional; + topP: z.ZodOptional; + stopSequences: z.ZodOptional>; +}, { + frequencyPenalty: z.ZodOptional; + logitBias: z.ZodOptional>; + logProbs: z.ZodOptional; + presencePenalty: z.ZodOptional; + seed: z.ZodOptional; + topLogProbs: z.ZodOptional; + user: z.ZodOptional; + visualDetailLevel: z.ZodOptional>; +}>, "strip", z.ZodTypeAny, { + user?: string | undefined; + version?: string | undefined; + temperature?: number | undefined; + maxOutputTokens?: number | undefined; + topK?: number | undefined; + topP?: number | undefined; + stopSequences?: string[] | undefined; + frequencyPenalty?: number | undefined; + logitBias?: Record | undefined; + logProbs?: boolean | undefined; + presencePenalty?: number | undefined; + seed?: number | undefined; + topLogProbs?: number | undefined; + visualDetailLevel?: "auto" | "low" | "high" | undefined; +}, { + user?: string | undefined; + version?: string | undefined; + temperature?: number | undefined; + maxOutputTokens?: number | undefined; + topK?: number | undefined; + topP?: number | undefined; + stopSequences?: string[] | undefined; + frequencyPenalty?: number | undefined; + logitBias?: Record | undefined; + logProbs?: boolean | undefined; + presencePenalty?: number | undefined; + seed?: number | undefined; + topLogProbs?: number | undefined; + visualDetailLevel?: "auto" | "low" | "high" | undefined; +}>>; +declare const gpt4Turbo: _genkit_ai_ai_model.ModelReference; + temperature: z.ZodOptional; + maxOutputTokens: z.ZodOptional; + topK: z.ZodOptional; + topP: z.ZodOptional; + stopSequences: z.ZodOptional>; +}, { + frequencyPenalty: z.ZodOptional; + logitBias: z.ZodOptional>; + logProbs: z.ZodOptional; + presencePenalty: z.ZodOptional; + seed: z.ZodOptional; + topLogProbs: z.ZodOptional; + user: z.ZodOptional; + visualDetailLevel: z.ZodOptional>; +}>, "strip", z.ZodTypeAny, { + user?: string | undefined; + version?: string | undefined; + temperature?: number | undefined; + maxOutputTokens?: number | undefined; + topK?: number | undefined; + topP?: number | undefined; + stopSequences?: string[] | undefined; + frequencyPenalty?: number | undefined; + logitBias?: Record | undefined; + logProbs?: boolean | undefined; + presencePenalty?: number | undefined; + seed?: number | undefined; + topLogProbs?: number | undefined; + visualDetailLevel?: "auto" | "low" | "high" | undefined; +}, { + user?: string | undefined; + version?: string | undefined; + temperature?: number | undefined; + maxOutputTokens?: number | undefined; + topK?: number | undefined; + topP?: number | undefined; + stopSequences?: string[] | undefined; + frequencyPenalty?: number | undefined; + logitBias?: Record | undefined; + logProbs?: boolean | undefined; + presencePenalty?: number | undefined; + seed?: number | undefined; + topLogProbs?: number | undefined; + visualDetailLevel?: "auto" | "low" | "high" | undefined; +}>>; +declare const gpt4Vision: _genkit_ai_ai_model.ModelReference; + temperature: z.ZodOptional; + maxOutputTokens: z.ZodOptional; + topK: z.ZodOptional; + topP: z.ZodOptional; + stopSequences: z.ZodOptional>; +}, { + frequencyPenalty: z.ZodOptional; + logitBias: z.ZodOptional>; + logProbs: z.ZodOptional; + presencePenalty: z.ZodOptional; + seed: z.ZodOptional; + topLogProbs: z.ZodOptional; + user: z.ZodOptional; + visualDetailLevel: z.ZodOptional>; +}>, "strip", z.ZodTypeAny, { + user?: string | undefined; + version?: string | undefined; + temperature?: number | undefined; + maxOutputTokens?: number | undefined; + topK?: number | undefined; + topP?: number | undefined; + stopSequences?: string[] | undefined; + frequencyPenalty?: number | undefined; + logitBias?: Record | undefined; + logProbs?: boolean | undefined; + presencePenalty?: number | undefined; + seed?: number | undefined; + topLogProbs?: number | undefined; + visualDetailLevel?: "auto" | "low" | "high" | undefined; +}, { + user?: string | undefined; + version?: string | undefined; + temperature?: number | undefined; + maxOutputTokens?: number | undefined; + topK?: number | undefined; + topP?: number | undefined; + stopSequences?: string[] | undefined; + frequencyPenalty?: number | undefined; + logitBias?: Record | undefined; + logProbs?: boolean | undefined; + presencePenalty?: number | undefined; + seed?: number | undefined; + topLogProbs?: number | undefined; + visualDetailLevel?: "auto" | "low" | "high" | undefined; +}>>; +declare const gpt4: _genkit_ai_ai_model.ModelReference; + temperature: z.ZodOptional; + maxOutputTokens: z.ZodOptional; + topK: z.ZodOptional; + topP: z.ZodOptional; + stopSequences: z.ZodOptional>; +}, { + frequencyPenalty: z.ZodOptional; + logitBias: z.ZodOptional>; + logProbs: z.ZodOptional; + presencePenalty: z.ZodOptional; + seed: z.ZodOptional; + topLogProbs: z.ZodOptional; + user: z.ZodOptional; + visualDetailLevel: z.ZodOptional>; +}>, "strip", z.ZodTypeAny, { + user?: string | undefined; + version?: string | undefined; + temperature?: number | undefined; + maxOutputTokens?: number | undefined; + topK?: number | undefined; + topP?: number | undefined; + stopSequences?: string[] | undefined; + frequencyPenalty?: number | undefined; + logitBias?: Record | undefined; + logProbs?: boolean | undefined; + presencePenalty?: number | undefined; + seed?: number | undefined; + topLogProbs?: number | undefined; + visualDetailLevel?: "auto" | "low" | "high" | undefined; +}, { + user?: string | undefined; + version?: string | undefined; + temperature?: number | undefined; + maxOutputTokens?: number | undefined; + topK?: number | undefined; + topP?: number | undefined; + stopSequences?: string[] | undefined; + frequencyPenalty?: number | undefined; + logitBias?: Record | undefined; + logProbs?: boolean | undefined; + presencePenalty?: number | undefined; + seed?: number | undefined; + topLogProbs?: number | undefined; + visualDetailLevel?: "auto" | "low" | "high" | undefined; +}>>; +declare const gpt35Turbo: _genkit_ai_ai_model.ModelReference; + temperature: z.ZodOptional; + maxOutputTokens: z.ZodOptional; + topK: z.ZodOptional; + topP: z.ZodOptional; + stopSequences: z.ZodOptional>; +}, { + frequencyPenalty: z.ZodOptional; + logitBias: z.ZodOptional>; + logProbs: z.ZodOptional; + presencePenalty: z.ZodOptional; + seed: z.ZodOptional; + topLogProbs: z.ZodOptional; + user: z.ZodOptional; + visualDetailLevel: z.ZodOptional>; +}>, "strip", z.ZodTypeAny, { + user?: string | undefined; + version?: string | undefined; + temperature?: number | undefined; + maxOutputTokens?: number | undefined; + topK?: number | undefined; + topP?: number | undefined; + stopSequences?: string[] | undefined; + frequencyPenalty?: number | undefined; + logitBias?: Record | undefined; + logProbs?: boolean | undefined; + presencePenalty?: number | undefined; + seed?: number | undefined; + topLogProbs?: number | undefined; + visualDetailLevel?: "auto" | "low" | "high" | undefined; +}, { + user?: string | undefined; + version?: string | undefined; + temperature?: number | undefined; + maxOutputTokens?: number | undefined; + topK?: number | undefined; + topP?: number | undefined; + stopSequences?: string[] | undefined; + frequencyPenalty?: number | undefined; + logitBias?: Record | undefined; + logProbs?: boolean | undefined; + presencePenalty?: number | undefined; + seed?: number | undefined; + topLogProbs?: number | undefined; + visualDetailLevel?: "auto" | "low" | "high" | undefined; +}>>; +declare const SUPPORTED_GPT_MODELS: { + 'gpt-4o': _genkit_ai_ai_model.ModelReference; + temperature: z.ZodOptional; + maxOutputTokens: z.ZodOptional; + topK: z.ZodOptional; + topP: z.ZodOptional; + stopSequences: z.ZodOptional>; + }, { + frequencyPenalty: z.ZodOptional; + logitBias: z.ZodOptional>; + logProbs: z.ZodOptional; + presencePenalty: z.ZodOptional; + seed: z.ZodOptional; + topLogProbs: z.ZodOptional; + user: z.ZodOptional; + visualDetailLevel: z.ZodOptional>; + }>, "strip", z.ZodTypeAny, { + user?: string | undefined; + version?: string | undefined; + temperature?: number | undefined; + maxOutputTokens?: number | undefined; + topK?: number | undefined; + topP?: number | undefined; + stopSequences?: string[] | undefined; + frequencyPenalty?: number | undefined; + logitBias?: Record | undefined; + logProbs?: boolean | undefined; + presencePenalty?: number | undefined; + seed?: number | undefined; + topLogProbs?: number | undefined; + visualDetailLevel?: "auto" | "low" | "high" | undefined; + }, { + user?: string | undefined; + version?: string | undefined; + temperature?: number | undefined; + maxOutputTokens?: number | undefined; + topK?: number | undefined; + topP?: number | undefined; + stopSequences?: string[] | undefined; + frequencyPenalty?: number | undefined; + logitBias?: Record | undefined; + logProbs?: boolean | undefined; + presencePenalty?: number | undefined; + seed?: number | undefined; + topLogProbs?: number | undefined; + visualDetailLevel?: "auto" | "low" | "high" | undefined; + }>>; + 'gpt-4o-mini': _genkit_ai_ai_model.ModelReference; + temperature: z.ZodOptional; + maxOutputTokens: z.ZodOptional; + topK: z.ZodOptional; + topP: z.ZodOptional; + stopSequences: z.ZodOptional>; + }, { + frequencyPenalty: z.ZodOptional; + logitBias: z.ZodOptional>; + logProbs: z.ZodOptional; + presencePenalty: z.ZodOptional; + seed: z.ZodOptional; + topLogProbs: z.ZodOptional; + user: z.ZodOptional; + visualDetailLevel: z.ZodOptional>; + }>, "strip", z.ZodTypeAny, { + user?: string | undefined; + version?: string | undefined; + temperature?: number | undefined; + maxOutputTokens?: number | undefined; + topK?: number | undefined; + topP?: number | undefined; + stopSequences?: string[] | undefined; + frequencyPenalty?: number | undefined; + logitBias?: Record | undefined; + logProbs?: boolean | undefined; + presencePenalty?: number | undefined; + seed?: number | undefined; + topLogProbs?: number | undefined; + visualDetailLevel?: "auto" | "low" | "high" | undefined; + }, { + user?: string | undefined; + version?: string | undefined; + temperature?: number | undefined; + maxOutputTokens?: number | undefined; + topK?: number | undefined; + topP?: number | undefined; + stopSequences?: string[] | undefined; + frequencyPenalty?: number | undefined; + logitBias?: Record | undefined; + logProbs?: boolean | undefined; + presencePenalty?: number | undefined; + seed?: number | undefined; + topLogProbs?: number | undefined; + visualDetailLevel?: "auto" | "low" | "high" | undefined; + }>>; + 'gpt-4-turbo': _genkit_ai_ai_model.ModelReference; + temperature: z.ZodOptional; + maxOutputTokens: z.ZodOptional; + topK: z.ZodOptional; + topP: z.ZodOptional; + stopSequences: z.ZodOptional>; + }, { + frequencyPenalty: z.ZodOptional; + logitBias: z.ZodOptional>; + logProbs: z.ZodOptional; + presencePenalty: z.ZodOptional; + seed: z.ZodOptional; + topLogProbs: z.ZodOptional; + user: z.ZodOptional; + visualDetailLevel: z.ZodOptional>; + }>, "strip", z.ZodTypeAny, { + user?: string | undefined; + version?: string | undefined; + temperature?: number | undefined; + maxOutputTokens?: number | undefined; + topK?: number | undefined; + topP?: number | undefined; + stopSequences?: string[] | undefined; + frequencyPenalty?: number | undefined; + logitBias?: Record | undefined; + logProbs?: boolean | undefined; + presencePenalty?: number | undefined; + seed?: number | undefined; + topLogProbs?: number | undefined; + visualDetailLevel?: "auto" | "low" | "high" | undefined; + }, { + user?: string | undefined; + version?: string | undefined; + temperature?: number | undefined; + maxOutputTokens?: number | undefined; + topK?: number | undefined; + topP?: number | undefined; + stopSequences?: string[] | undefined; + frequencyPenalty?: number | undefined; + logitBias?: Record | undefined; + logProbs?: boolean | undefined; + presencePenalty?: number | undefined; + seed?: number | undefined; + topLogProbs?: number | undefined; + visualDetailLevel?: "auto" | "low" | "high" | undefined; + }>>; + 'gpt-4-vision': _genkit_ai_ai_model.ModelReference; + temperature: z.ZodOptional; + maxOutputTokens: z.ZodOptional; + topK: z.ZodOptional; + topP: z.ZodOptional; + stopSequences: z.ZodOptional>; + }, { + frequencyPenalty: z.ZodOptional; + logitBias: z.ZodOptional>; + logProbs: z.ZodOptional; + presencePenalty: z.ZodOptional; + seed: z.ZodOptional; + topLogProbs: z.ZodOptional; + user: z.ZodOptional; + visualDetailLevel: z.ZodOptional>; + }>, "strip", z.ZodTypeAny, { + user?: string | undefined; + version?: string | undefined; + temperature?: number | undefined; + maxOutputTokens?: number | undefined; + topK?: number | undefined; + topP?: number | undefined; + stopSequences?: string[] | undefined; + frequencyPenalty?: number | undefined; + logitBias?: Record | undefined; + logProbs?: boolean | undefined; + presencePenalty?: number | undefined; + seed?: number | undefined; + topLogProbs?: number | undefined; + visualDetailLevel?: "auto" | "low" | "high" | undefined; + }, { + user?: string | undefined; + version?: string | undefined; + temperature?: number | undefined; + maxOutputTokens?: number | undefined; + topK?: number | undefined; + topP?: number | undefined; + stopSequences?: string[] | undefined; + frequencyPenalty?: number | undefined; + logitBias?: Record | undefined; + logProbs?: boolean | undefined; + presencePenalty?: number | undefined; + seed?: number | undefined; + topLogProbs?: number | undefined; + visualDetailLevel?: "auto" | "low" | "high" | undefined; + }>>; + 'gpt-4': _genkit_ai_ai_model.ModelReference; + temperature: z.ZodOptional; + maxOutputTokens: z.ZodOptional; + topK: z.ZodOptional; + topP: z.ZodOptional; + stopSequences: z.ZodOptional>; + }, { + frequencyPenalty: z.ZodOptional; + logitBias: z.ZodOptional>; + logProbs: z.ZodOptional; + presencePenalty: z.ZodOptional; + seed: z.ZodOptional; + topLogProbs: z.ZodOptional; + user: z.ZodOptional; + visualDetailLevel: z.ZodOptional>; + }>, "strip", z.ZodTypeAny, { + user?: string | undefined; + version?: string | undefined; + temperature?: number | undefined; + maxOutputTokens?: number | undefined; + topK?: number | undefined; + topP?: number | undefined; + stopSequences?: string[] | undefined; + frequencyPenalty?: number | undefined; + logitBias?: Record | undefined; + logProbs?: boolean | undefined; + presencePenalty?: number | undefined; + seed?: number | undefined; + topLogProbs?: number | undefined; + visualDetailLevel?: "auto" | "low" | "high" | undefined; + }, { + user?: string | undefined; + version?: string | undefined; + temperature?: number | undefined; + maxOutputTokens?: number | undefined; + topK?: number | undefined; + topP?: number | undefined; + stopSequences?: string[] | undefined; + frequencyPenalty?: number | undefined; + logitBias?: Record | undefined; + logProbs?: boolean | undefined; + presencePenalty?: number | undefined; + seed?: number | undefined; + topLogProbs?: number | undefined; + visualDetailLevel?: "auto" | "low" | "high" | undefined; + }>>; + 'gpt-3.5-turbo': _genkit_ai_ai_model.ModelReference; + temperature: z.ZodOptional; + maxOutputTokens: z.ZodOptional; + topK: z.ZodOptional; + topP: z.ZodOptional; + stopSequences: z.ZodOptional>; + }, { + frequencyPenalty: z.ZodOptional; + logitBias: z.ZodOptional>; + logProbs: z.ZodOptional; + presencePenalty: z.ZodOptional; + seed: z.ZodOptional; + topLogProbs: z.ZodOptional; + user: z.ZodOptional; + visualDetailLevel: z.ZodOptional>; + }>, "strip", z.ZodTypeAny, { + user?: string | undefined; + version?: string | undefined; + temperature?: number | undefined; + maxOutputTokens?: number | undefined; + topK?: number | undefined; + topP?: number | undefined; + stopSequences?: string[] | undefined; + frequencyPenalty?: number | undefined; + logitBias?: Record | undefined; + logProbs?: boolean | undefined; + presencePenalty?: number | undefined; + seed?: number | undefined; + topLogProbs?: number | undefined; + visualDetailLevel?: "auto" | "low" | "high" | undefined; + }, { + user?: string | undefined; + version?: string | undefined; + temperature?: number | undefined; + maxOutputTokens?: number | undefined; + topK?: number | undefined; + topP?: number | undefined; + stopSequences?: string[] | undefined; + frequencyPenalty?: number | undefined; + logitBias?: Record | undefined; + logProbs?: boolean | undefined; + presencePenalty?: number | undefined; + seed?: number | undefined; + topLogProbs?: number | undefined; + visualDetailLevel?: "auto" | "low" | "high" | undefined; + }>>; +}; +declare function toOpenAIRole(role: Role): ChatCompletionRole; +/** + * Converts a Genkit Part to the corresponding OpenAI ChatCompletionContentPart. + * @param part The Genkit Part to convert. + * @param visualDetailLevel The visual detail level to use for media parts. + * @returns The corresponding OpenAI ChatCompletionContentPart. + * @throws Error if the part contains unsupported fields for the current message role. + */ +declare function toOpenAiTextAndMedia(part: Part, visualDetailLevel: VisualDetailLevel): ChatCompletionContentPart; +/** + * Converts a Genkit MessageData array to an OpenAI ChatCompletionMessageParam array. + * @param messages The Genkit MessageData array to convert. + * @param visualDetailLevel The visual detail level to use for media parts. + * @returns The converted OpenAI ChatCompletionMessageParam array. + */ +declare function toOpenAiMessages(messages: MessageData[], visualDetailLevel?: VisualDetailLevel): ChatCompletionMessageParam[]; +/** + * Converts an OpenAI tool call to a Genkit ToolRequestPart. + * @param toolCall The OpenAI tool call to convert. + * @returns The converted Genkit ToolRequestPart. + */ +declare function fromOpenAiToolCall(toolCall: ChatCompletionMessageToolCall | ChatCompletionChunk.Choice.Delta.ToolCall): ToolRequestPart; +/** + * Converts an OpenAI message event to a Genkit CandidateData object. + * @param choice The OpenAI message event to convert. + * @param jsonMode Whether the event is a JSON response. + * @returns The converted Genkit CandidateData object. + */ +declare function fromOpenAiChoice(choice: ChatCompletion.Choice, jsonMode?: boolean): CandidateData; +/** + * Converts an OpenAI message stream event to a Genkit CandidateData object. + * @param choice The OpenAI message stream event to convert. + * @param jsonMode Whether the event is a JSON response. + * @returns The converted Genkit CandidateData object. + */ +declare function fromOpenAiChunkChoice(choice: ChatCompletionChunk.Choice, jsonMode?: boolean): CandidateData; +/** + * Converts an OpenAI request to an OpenAI API request body. + * @param modelName The name of the OpenAI model to use. + * @param request The Genkit GenerateRequest to convert. + * @returns The converted OpenAI API request body. + * @throws An error if the specified model is not supported or if an unsupported output format is requested. + */ +declare function toOpenAiRequestBody(modelName: string, request: GenerateRequest): OpenAI.Chat.Completions.ChatCompletionCreateParamsNonStreaming; +/** + * Creates the runner used by Genkit to interact with the GPT model. + * @param name The name of the GPT model. + * @param client The OpenAI client instance. + * @returns The runner that Genkit will call when the model is invoked. + */ +declare function gptRunner(name: string, client: OpenAI): (request: GenerateRequest, streamingCallback?: StreamingCallback) => Promise; +/** + * Defines a GPT model with the given name and OpenAI client. + * @param name The name of the GPT model. + * @param client The OpenAI client instance. + * @returns The defined GPT model. + * @throws An error if the specified model is not supported. + */ +declare function gptModel(name: string, client: OpenAI): ModelAction; + +export { OpenAiConfigSchema, SUPPORTED_GPT_MODELS, fromOpenAiChoice, fromOpenAiChunkChoice, fromOpenAiToolCall, gpt35Turbo, gpt4, gpt4Turbo, gpt4Vision, gpt4o, gpt4oMini, gptModel, gptRunner, toOpenAIRole, toOpenAiMessages, toOpenAiRequestBody, toOpenAiTextAndMedia }; diff --git a/plugins/openai/lib/gpt.d.ts b/plugins/openai/lib/gpt.d.ts new file mode 100644 index 00000000..09239bfa --- /dev/null +++ b/plugins/openai/lib/gpt.d.ts @@ -0,0 +1,682 @@ +import * as _genkit_ai_ai_model from '@genkit-ai/ai/model'; +import { Role, Part, MessageData, ToolRequestPart, CandidateData, GenerateRequest, GenerateResponseChunkData, GenerateResponseData, ModelAction } from '@genkit-ai/ai/model'; +import { StreamingCallback } from '@genkit-ai/core'; +import OpenAI from 'openai'; +import { ChatCompletionRole, ChatCompletionContentPart, ChatCompletionMessageParam, ChatCompletionMessageToolCall, ChatCompletionChunk, ChatCompletion } from 'openai/resources/index.mjs'; +import z from 'zod'; + +declare const OpenAiConfigSchema: z.ZodObject; + temperature: z.ZodOptional; + maxOutputTokens: z.ZodOptional; + topK: z.ZodOptional; + topP: z.ZodOptional; + stopSequences: z.ZodOptional>; +}, { + frequencyPenalty: z.ZodOptional; + logitBias: z.ZodOptional>; + logProbs: z.ZodOptional; + presencePenalty: z.ZodOptional; + seed: z.ZodOptional; + topLogProbs: z.ZodOptional; + user: z.ZodOptional; + visualDetailLevel: z.ZodOptional>; +}>, "strip", z.ZodTypeAny, { + user?: string | undefined; + version?: string | undefined; + temperature?: number | undefined; + maxOutputTokens?: number | undefined; + topK?: number | undefined; + topP?: number | undefined; + stopSequences?: string[] | undefined; + frequencyPenalty?: number | undefined; + logitBias?: Record | undefined; + logProbs?: boolean | undefined; + presencePenalty?: number | undefined; + seed?: number | undefined; + topLogProbs?: number | undefined; + visualDetailLevel?: "auto" | "low" | "high" | undefined; +}, { + user?: string | undefined; + version?: string | undefined; + temperature?: number | undefined; + maxOutputTokens?: number | undefined; + topK?: number | undefined; + topP?: number | undefined; + stopSequences?: string[] | undefined; + frequencyPenalty?: number | undefined; + logitBias?: Record | undefined; + logProbs?: boolean | undefined; + presencePenalty?: number | undefined; + seed?: number | undefined; + topLogProbs?: number | undefined; + visualDetailLevel?: "auto" | "low" | "high" | undefined; +}>; +type VisualDetailLevel = z.infer['visualDetailLevel']; +declare const gpt4o: _genkit_ai_ai_model.ModelReference; + temperature: z.ZodOptional; + maxOutputTokens: z.ZodOptional; + topK: z.ZodOptional; + topP: z.ZodOptional; + stopSequences: z.ZodOptional>; +}, { + frequencyPenalty: z.ZodOptional; + logitBias: z.ZodOptional>; + logProbs: z.ZodOptional; + presencePenalty: z.ZodOptional; + seed: z.ZodOptional; + topLogProbs: z.ZodOptional; + user: z.ZodOptional; + visualDetailLevel: z.ZodOptional>; +}>, "strip", z.ZodTypeAny, { + user?: string | undefined; + version?: string | undefined; + temperature?: number | undefined; + maxOutputTokens?: number | undefined; + topK?: number | undefined; + topP?: number | undefined; + stopSequences?: string[] | undefined; + frequencyPenalty?: number | undefined; + logitBias?: Record | undefined; + logProbs?: boolean | undefined; + presencePenalty?: number | undefined; + seed?: number | undefined; + topLogProbs?: number | undefined; + visualDetailLevel?: "auto" | "low" | "high" | undefined; +}, { + user?: string | undefined; + version?: string | undefined; + temperature?: number | undefined; + maxOutputTokens?: number | undefined; + topK?: number | undefined; + topP?: number | undefined; + stopSequences?: string[] | undefined; + frequencyPenalty?: number | undefined; + logitBias?: Record | undefined; + logProbs?: boolean | undefined; + presencePenalty?: number | undefined; + seed?: number | undefined; + topLogProbs?: number | undefined; + visualDetailLevel?: "auto" | "low" | "high" | undefined; +}>>; +declare const gpt4oMini: _genkit_ai_ai_model.ModelReference; + temperature: z.ZodOptional; + maxOutputTokens: z.ZodOptional; + topK: z.ZodOptional; + topP: z.ZodOptional; + stopSequences: z.ZodOptional>; +}, { + frequencyPenalty: z.ZodOptional; + logitBias: z.ZodOptional>; + logProbs: z.ZodOptional; + presencePenalty: z.ZodOptional; + seed: z.ZodOptional; + topLogProbs: z.ZodOptional; + user: z.ZodOptional; + visualDetailLevel: z.ZodOptional>; +}>, "strip", z.ZodTypeAny, { + user?: string | undefined; + version?: string | undefined; + temperature?: number | undefined; + maxOutputTokens?: number | undefined; + topK?: number | undefined; + topP?: number | undefined; + stopSequences?: string[] | undefined; + frequencyPenalty?: number | undefined; + logitBias?: Record | undefined; + logProbs?: boolean | undefined; + presencePenalty?: number | undefined; + seed?: number | undefined; + topLogProbs?: number | undefined; + visualDetailLevel?: "auto" | "low" | "high" | undefined; +}, { + user?: string | undefined; + version?: string | undefined; + temperature?: number | undefined; + maxOutputTokens?: number | undefined; + topK?: number | undefined; + topP?: number | undefined; + stopSequences?: string[] | undefined; + frequencyPenalty?: number | undefined; + logitBias?: Record | undefined; + logProbs?: boolean | undefined; + presencePenalty?: number | undefined; + seed?: number | undefined; + topLogProbs?: number | undefined; + visualDetailLevel?: "auto" | "low" | "high" | undefined; +}>>; +declare const gpt4Turbo: _genkit_ai_ai_model.ModelReference; + temperature: z.ZodOptional; + maxOutputTokens: z.ZodOptional; + topK: z.ZodOptional; + topP: z.ZodOptional; + stopSequences: z.ZodOptional>; +}, { + frequencyPenalty: z.ZodOptional; + logitBias: z.ZodOptional>; + logProbs: z.ZodOptional; + presencePenalty: z.ZodOptional; + seed: z.ZodOptional; + topLogProbs: z.ZodOptional; + user: z.ZodOptional; + visualDetailLevel: z.ZodOptional>; +}>, "strip", z.ZodTypeAny, { + user?: string | undefined; + version?: string | undefined; + temperature?: number | undefined; + maxOutputTokens?: number | undefined; + topK?: number | undefined; + topP?: number | undefined; + stopSequences?: string[] | undefined; + frequencyPenalty?: number | undefined; + logitBias?: Record | undefined; + logProbs?: boolean | undefined; + presencePenalty?: number | undefined; + seed?: number | undefined; + topLogProbs?: number | undefined; + visualDetailLevel?: "auto" | "low" | "high" | undefined; +}, { + user?: string | undefined; + version?: string | undefined; + temperature?: number | undefined; + maxOutputTokens?: number | undefined; + topK?: number | undefined; + topP?: number | undefined; + stopSequences?: string[] | undefined; + frequencyPenalty?: number | undefined; + logitBias?: Record | undefined; + logProbs?: boolean | undefined; + presencePenalty?: number | undefined; + seed?: number | undefined; + topLogProbs?: number | undefined; + visualDetailLevel?: "auto" | "low" | "high" | undefined; +}>>; +declare const gpt4Vision: _genkit_ai_ai_model.ModelReference; + temperature: z.ZodOptional; + maxOutputTokens: z.ZodOptional; + topK: z.ZodOptional; + topP: z.ZodOptional; + stopSequences: z.ZodOptional>; +}, { + frequencyPenalty: z.ZodOptional; + logitBias: z.ZodOptional>; + logProbs: z.ZodOptional; + presencePenalty: z.ZodOptional; + seed: z.ZodOptional; + topLogProbs: z.ZodOptional; + user: z.ZodOptional; + visualDetailLevel: z.ZodOptional>; +}>, "strip", z.ZodTypeAny, { + user?: string | undefined; + version?: string | undefined; + temperature?: number | undefined; + maxOutputTokens?: number | undefined; + topK?: number | undefined; + topP?: number | undefined; + stopSequences?: string[] | undefined; + frequencyPenalty?: number | undefined; + logitBias?: Record | undefined; + logProbs?: boolean | undefined; + presencePenalty?: number | undefined; + seed?: number | undefined; + topLogProbs?: number | undefined; + visualDetailLevel?: "auto" | "low" | "high" | undefined; +}, { + user?: string | undefined; + version?: string | undefined; + temperature?: number | undefined; + maxOutputTokens?: number | undefined; + topK?: number | undefined; + topP?: number | undefined; + stopSequences?: string[] | undefined; + frequencyPenalty?: number | undefined; + logitBias?: Record | undefined; + logProbs?: boolean | undefined; + presencePenalty?: number | undefined; + seed?: number | undefined; + topLogProbs?: number | undefined; + visualDetailLevel?: "auto" | "low" | "high" | undefined; +}>>; +declare const gpt4: _genkit_ai_ai_model.ModelReference; + temperature: z.ZodOptional; + maxOutputTokens: z.ZodOptional; + topK: z.ZodOptional; + topP: z.ZodOptional; + stopSequences: z.ZodOptional>; +}, { + frequencyPenalty: z.ZodOptional; + logitBias: z.ZodOptional>; + logProbs: z.ZodOptional; + presencePenalty: z.ZodOptional; + seed: z.ZodOptional; + topLogProbs: z.ZodOptional; + user: z.ZodOptional; + visualDetailLevel: z.ZodOptional>; +}>, "strip", z.ZodTypeAny, { + user?: string | undefined; + version?: string | undefined; + temperature?: number | undefined; + maxOutputTokens?: number | undefined; + topK?: number | undefined; + topP?: number | undefined; + stopSequences?: string[] | undefined; + frequencyPenalty?: number | undefined; + logitBias?: Record | undefined; + logProbs?: boolean | undefined; + presencePenalty?: number | undefined; + seed?: number | undefined; + topLogProbs?: number | undefined; + visualDetailLevel?: "auto" | "low" | "high" | undefined; +}, { + user?: string | undefined; + version?: string | undefined; + temperature?: number | undefined; + maxOutputTokens?: number | undefined; + topK?: number | undefined; + topP?: number | undefined; + stopSequences?: string[] | undefined; + frequencyPenalty?: number | undefined; + logitBias?: Record | undefined; + logProbs?: boolean | undefined; + presencePenalty?: number | undefined; + seed?: number | undefined; + topLogProbs?: number | undefined; + visualDetailLevel?: "auto" | "low" | "high" | undefined; +}>>; +declare const gpt35Turbo: _genkit_ai_ai_model.ModelReference; + temperature: z.ZodOptional; + maxOutputTokens: z.ZodOptional; + topK: z.ZodOptional; + topP: z.ZodOptional; + stopSequences: z.ZodOptional>; +}, { + frequencyPenalty: z.ZodOptional; + logitBias: z.ZodOptional>; + logProbs: z.ZodOptional; + presencePenalty: z.ZodOptional; + seed: z.ZodOptional; + topLogProbs: z.ZodOptional; + user: z.ZodOptional; + visualDetailLevel: z.ZodOptional>; +}>, "strip", z.ZodTypeAny, { + user?: string | undefined; + version?: string | undefined; + temperature?: number | undefined; + maxOutputTokens?: number | undefined; + topK?: number | undefined; + topP?: number | undefined; + stopSequences?: string[] | undefined; + frequencyPenalty?: number | undefined; + logitBias?: Record | undefined; + logProbs?: boolean | undefined; + presencePenalty?: number | undefined; + seed?: number | undefined; + topLogProbs?: number | undefined; + visualDetailLevel?: "auto" | "low" | "high" | undefined; +}, { + user?: string | undefined; + version?: string | undefined; + temperature?: number | undefined; + maxOutputTokens?: number | undefined; + topK?: number | undefined; + topP?: number | undefined; + stopSequences?: string[] | undefined; + frequencyPenalty?: number | undefined; + logitBias?: Record | undefined; + logProbs?: boolean | undefined; + presencePenalty?: number | undefined; + seed?: number | undefined; + topLogProbs?: number | undefined; + visualDetailLevel?: "auto" | "low" | "high" | undefined; +}>>; +declare const SUPPORTED_GPT_MODELS: { + 'gpt-4o': _genkit_ai_ai_model.ModelReference; + temperature: z.ZodOptional; + maxOutputTokens: z.ZodOptional; + topK: z.ZodOptional; + topP: z.ZodOptional; + stopSequences: z.ZodOptional>; + }, { + frequencyPenalty: z.ZodOptional; + logitBias: z.ZodOptional>; + logProbs: z.ZodOptional; + presencePenalty: z.ZodOptional; + seed: z.ZodOptional; + topLogProbs: z.ZodOptional; + user: z.ZodOptional; + visualDetailLevel: z.ZodOptional>; + }>, "strip", z.ZodTypeAny, { + user?: string | undefined; + version?: string | undefined; + temperature?: number | undefined; + maxOutputTokens?: number | undefined; + topK?: number | undefined; + topP?: number | undefined; + stopSequences?: string[] | undefined; + frequencyPenalty?: number | undefined; + logitBias?: Record | undefined; + logProbs?: boolean | undefined; + presencePenalty?: number | undefined; + seed?: number | undefined; + topLogProbs?: number | undefined; + visualDetailLevel?: "auto" | "low" | "high" | undefined; + }, { + user?: string | undefined; + version?: string | undefined; + temperature?: number | undefined; + maxOutputTokens?: number | undefined; + topK?: number | undefined; + topP?: number | undefined; + stopSequences?: string[] | undefined; + frequencyPenalty?: number | undefined; + logitBias?: Record | undefined; + logProbs?: boolean | undefined; + presencePenalty?: number | undefined; + seed?: number | undefined; + topLogProbs?: number | undefined; + visualDetailLevel?: "auto" | "low" | "high" | undefined; + }>>; + 'gpt-4o-mini': _genkit_ai_ai_model.ModelReference; + temperature: z.ZodOptional; + maxOutputTokens: z.ZodOptional; + topK: z.ZodOptional; + topP: z.ZodOptional; + stopSequences: z.ZodOptional>; + }, { + frequencyPenalty: z.ZodOptional; + logitBias: z.ZodOptional>; + logProbs: z.ZodOptional; + presencePenalty: z.ZodOptional; + seed: z.ZodOptional; + topLogProbs: z.ZodOptional; + user: z.ZodOptional; + visualDetailLevel: z.ZodOptional>; + }>, "strip", z.ZodTypeAny, { + user?: string | undefined; + version?: string | undefined; + temperature?: number | undefined; + maxOutputTokens?: number | undefined; + topK?: number | undefined; + topP?: number | undefined; + stopSequences?: string[] | undefined; + frequencyPenalty?: number | undefined; + logitBias?: Record | undefined; + logProbs?: boolean | undefined; + presencePenalty?: number | undefined; + seed?: number | undefined; + topLogProbs?: number | undefined; + visualDetailLevel?: "auto" | "low" | "high" | undefined; + }, { + user?: string | undefined; + version?: string | undefined; + temperature?: number | undefined; + maxOutputTokens?: number | undefined; + topK?: number | undefined; + topP?: number | undefined; + stopSequences?: string[] | undefined; + frequencyPenalty?: number | undefined; + logitBias?: Record | undefined; + logProbs?: boolean | undefined; + presencePenalty?: number | undefined; + seed?: number | undefined; + topLogProbs?: number | undefined; + visualDetailLevel?: "auto" | "low" | "high" | undefined; + }>>; + 'gpt-4-turbo': _genkit_ai_ai_model.ModelReference; + temperature: z.ZodOptional; + maxOutputTokens: z.ZodOptional; + topK: z.ZodOptional; + topP: z.ZodOptional; + stopSequences: z.ZodOptional>; + }, { + frequencyPenalty: z.ZodOptional; + logitBias: z.ZodOptional>; + logProbs: z.ZodOptional; + presencePenalty: z.ZodOptional; + seed: z.ZodOptional; + topLogProbs: z.ZodOptional; + user: z.ZodOptional; + visualDetailLevel: z.ZodOptional>; + }>, "strip", z.ZodTypeAny, { + user?: string | undefined; + version?: string | undefined; + temperature?: number | undefined; + maxOutputTokens?: number | undefined; + topK?: number | undefined; + topP?: number | undefined; + stopSequences?: string[] | undefined; + frequencyPenalty?: number | undefined; + logitBias?: Record | undefined; + logProbs?: boolean | undefined; + presencePenalty?: number | undefined; + seed?: number | undefined; + topLogProbs?: number | undefined; + visualDetailLevel?: "auto" | "low" | "high" | undefined; + }, { + user?: string | undefined; + version?: string | undefined; + temperature?: number | undefined; + maxOutputTokens?: number | undefined; + topK?: number | undefined; + topP?: number | undefined; + stopSequences?: string[] | undefined; + frequencyPenalty?: number | undefined; + logitBias?: Record | undefined; + logProbs?: boolean | undefined; + presencePenalty?: number | undefined; + seed?: number | undefined; + topLogProbs?: number | undefined; + visualDetailLevel?: "auto" | "low" | "high" | undefined; + }>>; + 'gpt-4-vision': _genkit_ai_ai_model.ModelReference; + temperature: z.ZodOptional; + maxOutputTokens: z.ZodOptional; + topK: z.ZodOptional; + topP: z.ZodOptional; + stopSequences: z.ZodOptional>; + }, { + frequencyPenalty: z.ZodOptional; + logitBias: z.ZodOptional>; + logProbs: z.ZodOptional; + presencePenalty: z.ZodOptional; + seed: z.ZodOptional; + topLogProbs: z.ZodOptional; + user: z.ZodOptional; + visualDetailLevel: z.ZodOptional>; + }>, "strip", z.ZodTypeAny, { + user?: string | undefined; + version?: string | undefined; + temperature?: number | undefined; + maxOutputTokens?: number | undefined; + topK?: number | undefined; + topP?: number | undefined; + stopSequences?: string[] | undefined; + frequencyPenalty?: number | undefined; + logitBias?: Record | undefined; + logProbs?: boolean | undefined; + presencePenalty?: number | undefined; + seed?: number | undefined; + topLogProbs?: number | undefined; + visualDetailLevel?: "auto" | "low" | "high" | undefined; + }, { + user?: string | undefined; + version?: string | undefined; + temperature?: number | undefined; + maxOutputTokens?: number | undefined; + topK?: number | undefined; + topP?: number | undefined; + stopSequences?: string[] | undefined; + frequencyPenalty?: number | undefined; + logitBias?: Record | undefined; + logProbs?: boolean | undefined; + presencePenalty?: number | undefined; + seed?: number | undefined; + topLogProbs?: number | undefined; + visualDetailLevel?: "auto" | "low" | "high" | undefined; + }>>; + 'gpt-4': _genkit_ai_ai_model.ModelReference; + temperature: z.ZodOptional; + maxOutputTokens: z.ZodOptional; + topK: z.ZodOptional; + topP: z.ZodOptional; + stopSequences: z.ZodOptional>; + }, { + frequencyPenalty: z.ZodOptional; + logitBias: z.ZodOptional>; + logProbs: z.ZodOptional; + presencePenalty: z.ZodOptional; + seed: z.ZodOptional; + topLogProbs: z.ZodOptional; + user: z.ZodOptional; + visualDetailLevel: z.ZodOptional>; + }>, "strip", z.ZodTypeAny, { + user?: string | undefined; + version?: string | undefined; + temperature?: number | undefined; + maxOutputTokens?: number | undefined; + topK?: number | undefined; + topP?: number | undefined; + stopSequences?: string[] | undefined; + frequencyPenalty?: number | undefined; + logitBias?: Record | undefined; + logProbs?: boolean | undefined; + presencePenalty?: number | undefined; + seed?: number | undefined; + topLogProbs?: number | undefined; + visualDetailLevel?: "auto" | "low" | "high" | undefined; + }, { + user?: string | undefined; + version?: string | undefined; + temperature?: number | undefined; + maxOutputTokens?: number | undefined; + topK?: number | undefined; + topP?: number | undefined; + stopSequences?: string[] | undefined; + frequencyPenalty?: number | undefined; + logitBias?: Record | undefined; + logProbs?: boolean | undefined; + presencePenalty?: number | undefined; + seed?: number | undefined; + topLogProbs?: number | undefined; + visualDetailLevel?: "auto" | "low" | "high" | undefined; + }>>; + 'gpt-3.5-turbo': _genkit_ai_ai_model.ModelReference; + temperature: z.ZodOptional; + maxOutputTokens: z.ZodOptional; + topK: z.ZodOptional; + topP: z.ZodOptional; + stopSequences: z.ZodOptional>; + }, { + frequencyPenalty: z.ZodOptional; + logitBias: z.ZodOptional>; + logProbs: z.ZodOptional; + presencePenalty: z.ZodOptional; + seed: z.ZodOptional; + topLogProbs: z.ZodOptional; + user: z.ZodOptional; + visualDetailLevel: z.ZodOptional>; + }>, "strip", z.ZodTypeAny, { + user?: string | undefined; + version?: string | undefined; + temperature?: number | undefined; + maxOutputTokens?: number | undefined; + topK?: number | undefined; + topP?: number | undefined; + stopSequences?: string[] | undefined; + frequencyPenalty?: number | undefined; + logitBias?: Record | undefined; + logProbs?: boolean | undefined; + presencePenalty?: number | undefined; + seed?: number | undefined; + topLogProbs?: number | undefined; + visualDetailLevel?: "auto" | "low" | "high" | undefined; + }, { + user?: string | undefined; + version?: string | undefined; + temperature?: number | undefined; + maxOutputTokens?: number | undefined; + topK?: number | undefined; + topP?: number | undefined; + stopSequences?: string[] | undefined; + frequencyPenalty?: number | undefined; + logitBias?: Record | undefined; + logProbs?: boolean | undefined; + presencePenalty?: number | undefined; + seed?: number | undefined; + topLogProbs?: number | undefined; + visualDetailLevel?: "auto" | "low" | "high" | undefined; + }>>; +}; +declare function toOpenAIRole(role: Role): ChatCompletionRole; +/** + * Converts a Genkit Part to the corresponding OpenAI ChatCompletionContentPart. + * @param part The Genkit Part to convert. + * @param visualDetailLevel The visual detail level to use for media parts. + * @returns The corresponding OpenAI ChatCompletionContentPart. + * @throws Error if the part contains unsupported fields for the current message role. + */ +declare function toOpenAiTextAndMedia(part: Part, visualDetailLevel: VisualDetailLevel): ChatCompletionContentPart; +/** + * Converts a Genkit MessageData array to an OpenAI ChatCompletionMessageParam array. + * @param messages The Genkit MessageData array to convert. + * @param visualDetailLevel The visual detail level to use for media parts. + * @returns The converted OpenAI ChatCompletionMessageParam array. + */ +declare function toOpenAiMessages(messages: MessageData[], visualDetailLevel?: VisualDetailLevel): ChatCompletionMessageParam[]; +/** + * Converts an OpenAI tool call to a Genkit ToolRequestPart. + * @param toolCall The OpenAI tool call to convert. + * @returns The converted Genkit ToolRequestPart. + */ +declare function fromOpenAiToolCall(toolCall: ChatCompletionMessageToolCall | ChatCompletionChunk.Choice.Delta.ToolCall): ToolRequestPart; +/** + * Converts an OpenAI message event to a Genkit CandidateData object. + * @param choice The OpenAI message event to convert. + * @param jsonMode Whether the event is a JSON response. + * @returns The converted Genkit CandidateData object. + */ +declare function fromOpenAiChoice(choice: ChatCompletion.Choice, jsonMode?: boolean): CandidateData; +/** + * Converts an OpenAI message stream event to a Genkit CandidateData object. + * @param choice The OpenAI message stream event to convert. + * @param jsonMode Whether the event is a JSON response. + * @returns The converted Genkit CandidateData object. + */ +declare function fromOpenAiChunkChoice(choice: ChatCompletionChunk.Choice, jsonMode?: boolean): CandidateData; +/** + * Converts an OpenAI request to an OpenAI API request body. + * @param modelName The name of the OpenAI model to use. + * @param request The Genkit GenerateRequest to convert. + * @returns The converted OpenAI API request body. + * @throws An error if the specified model is not supported or if an unsupported output format is requested. + */ +declare function toOpenAiRequestBody(modelName: string, request: GenerateRequest): OpenAI.Chat.Completions.ChatCompletionCreateParamsNonStreaming; +/** + * Creates the runner used by Genkit to interact with the GPT model. + * @param name The name of the GPT model. + * @param client The OpenAI client instance. + * @returns The runner that Genkit will call when the model is invoked. + */ +declare function gptRunner(name: string, client: OpenAI): (request: GenerateRequest, streamingCallback?: StreamingCallback) => Promise; +/** + * Defines a GPT model with the given name and OpenAI client. + * @param name The name of the GPT model. + * @param client The OpenAI client instance. + * @returns The defined GPT model. + * @throws An error if the specified model is not supported. + */ +declare function gptModel(name: string, client: OpenAI): ModelAction; + +export { OpenAiConfigSchema, SUPPORTED_GPT_MODELS, fromOpenAiChoice, fromOpenAiChunkChoice, fromOpenAiToolCall, gpt35Turbo, gpt4, gpt4Turbo, gpt4Vision, gpt4o, gpt4oMini, gptModel, gptRunner, toOpenAIRole, toOpenAiMessages, toOpenAiRequestBody, toOpenAiTextAndMedia }; diff --git a/plugins/openai/lib/gpt.js b/plugins/openai/lib/gpt.js new file mode 100644 index 00000000..1e56c014 --- /dev/null +++ b/plugins/openai/lib/gpt.js @@ -0,0 +1,521 @@ +"use strict"; +var __create = Object.create; +var __defProp = Object.defineProperty; +var __defProps = Object.defineProperties; +var __getOwnPropDesc = Object.getOwnPropertyDescriptor; +var __getOwnPropDescs = Object.getOwnPropertyDescriptors; +var __getOwnPropNames = Object.getOwnPropertyNames; +var __getOwnPropSymbols = Object.getOwnPropertySymbols; +var __getProtoOf = Object.getPrototypeOf; +var __hasOwnProp = Object.prototype.hasOwnProperty; +var __propIsEnum = Object.prototype.propertyIsEnumerable; +var __knownSymbol = (name, symbol) => { + return (symbol = Symbol[name]) ? symbol : Symbol.for("Symbol." + name); +}; +var __defNormalProp = (obj, key, value) => key in obj ? __defProp(obj, key, { enumerable: true, configurable: true, writable: true, value }) : obj[key] = value; +var __spreadValues = (a, b) => { + for (var prop in b || (b = {})) + if (__hasOwnProp.call(b, prop)) + __defNormalProp(a, prop, b[prop]); + if (__getOwnPropSymbols) + for (var prop of __getOwnPropSymbols(b)) { + if (__propIsEnum.call(b, prop)) + __defNormalProp(a, prop, b[prop]); + } + return a; +}; +var __spreadProps = (a, b) => __defProps(a, __getOwnPropDescs(b)); +var __export = (target, all) => { + for (var name in all) + __defProp(target, name, { get: all[name], enumerable: true }); +}; +var __copyProps = (to, from, except, desc) => { + if (from && typeof from === "object" || typeof from === "function") { + for (let key of __getOwnPropNames(from)) + if (!__hasOwnProp.call(to, key) && key !== except) + __defProp(to, key, { get: () => from[key], enumerable: !(desc = __getOwnPropDesc(from, key)) || desc.enumerable }); + } + return to; +}; +var __toESM = (mod, isNodeMode, target) => (target = mod != null ? __create(__getProtoOf(mod)) : {}, __copyProps( + // If the importer is in node compatibility mode or this is not an ESM + // file that has been converted to a CommonJS file using a Babel- + // compatible transform (i.e. "__esModule" has not been set), then set + // "default" to the CommonJS "module.exports" for node compatibility. + isNodeMode || !mod || !mod.__esModule ? __defProp(target, "default", { value: mod, enumerable: true }) : target, + mod +)); +var __toCommonJS = (mod) => __copyProps(__defProp({}, "__esModule", { value: true }), mod); +var __async = (__this, __arguments, generator) => { + return new Promise((resolve, reject) => { + var fulfilled = (value) => { + try { + step(generator.next(value)); + } catch (e) { + reject(e); + } + }; + var rejected = (value) => { + try { + step(generator.throw(value)); + } catch (e) { + reject(e); + } + }; + var step = (x) => x.done ? resolve(x.value) : Promise.resolve(x.value).then(fulfilled, rejected); + step((generator = generator.apply(__this, __arguments)).next()); + }); +}; +var __forAwait = (obj, it, method) => (it = obj[__knownSymbol("asyncIterator")]) ? it.call(obj) : (obj = obj[__knownSymbol("iterator")](), it = {}, method = (key, fn) => (fn = obj[key]) && (it[key] = (arg) => new Promise((yes, no, done) => (arg = fn.call(obj, arg), done = arg.done, Promise.resolve(arg.value).then((value) => yes({ value, done }), no)))), method("next"), method("return"), it); +var gpt_exports = {}; +__export(gpt_exports, { + OpenAiConfigSchema: () => OpenAiConfigSchema, + SUPPORTED_GPT_MODELS: () => SUPPORTED_GPT_MODELS, + fromOpenAiChoice: () => fromOpenAiChoice, + fromOpenAiChunkChoice: () => fromOpenAiChunkChoice, + fromOpenAiToolCall: () => fromOpenAiToolCall, + gpt35Turbo: () => gpt35Turbo, + gpt4: () => gpt4, + gpt4Turbo: () => gpt4Turbo, + gpt4Vision: () => gpt4Vision, + gpt4o: () => gpt4o, + gpt4oMini: () => gpt4oMini, + gptModel: () => gptModel, + gptRunner: () => gptRunner, + toOpenAIRole: () => toOpenAIRole, + toOpenAiMessages: () => toOpenAiMessages, + toOpenAiRequestBody: () => toOpenAiRequestBody, + toOpenAiTextAndMedia: () => toOpenAiTextAndMedia +}); +module.exports = __toCommonJS(gpt_exports); +var import_ai = require("@genkit-ai/ai"); +var import_model = require("@genkit-ai/ai/model"); +var import_zod = __toESM(require("zod")); +const MODELS_SUPPORTING_OPENAI_RESPONSE_FORMAT = [ + "gpt-4o", + "gpt-4o-2024-05-13", + "gpt-4o-mini", + "gpt-4o-mini-2024-07-18", + "gpt-4-turbo", + "gpt-4-turbo-2024-04-09", + "gpt-4-turbo-preview", + "gpt-4-0125-preview", + "gpt-4-1106-preview", + "gpt-3.5-turbo-0125", + "gpt-3.5-turbo", + "gpt-3.5-turbo-1106" +]; +const OpenAiConfigSchema = import_model.GenerationCommonConfigSchema.extend({ + frequencyPenalty: import_zod.default.number().min(-2).max(2).optional(), + logitBias: import_zod.default.record(import_zod.default.string(), import_zod.default.number().min(-100).max(100)).optional(), + logProbs: import_zod.default.boolean().optional(), + presencePenalty: import_zod.default.number().min(-2).max(2).optional(), + seed: import_zod.default.number().int().optional(), + topLogProbs: import_zod.default.number().int().min(0).max(20).optional(), + user: import_zod.default.string().optional(), + visualDetailLevel: import_zod.default.enum(["auto", "low", "high"]).optional() +}); +const gpt4o = (0, import_model.modelRef)({ + name: "openai/gpt-4o", + info: { + versions: ["gpt-4o", "gpt-4o-2024-05-13"], + label: "OpenAI - GPT-4o", + supports: { + multiturn: true, + tools: true, + media: true, + systemRole: true, + output: ["text", "json"] + } + }, + configSchema: OpenAiConfigSchema +}); +const gpt4oMini = (0, import_model.modelRef)({ + name: "openai/gpt-4o-mini", + info: { + versions: ["gpt-4o-mini", "gpt-4o-mini-2024-07-18"], + label: "OpenAI - GPT-4o mini", + supports: { + multiturn: true, + tools: true, + media: true, + systemRole: true, + output: ["text", "json"] + } + }, + configSchema: OpenAiConfigSchema +}); +const gpt4Turbo = (0, import_model.modelRef)({ + name: "openai/gpt-4-turbo", + info: { + versions: [ + "gpt-4-turbo", + "gpt-4-turbo-2024-04-09", + "gpt-4-turbo-preview", + "gpt-4-0125-preview", + "gpt-4-1106-preview" + ], + label: "OpenAI - GPT-4 Turbo", + supports: { + multiturn: true, + tools: true, + media: true, + systemRole: true, + output: ["text", "json"] + } + }, + configSchema: OpenAiConfigSchema +}); +const gpt4Vision = (0, import_model.modelRef)({ + name: "openai/gpt-4-vision", + info: { + versions: ["gpt-4-vision-preview", "gpt-4-1106-vision-preview"], + label: "OpenAI - GPT-4 Vision", + supports: { + multiturn: true, + tools: false, + media: true, + systemRole: true, + output: ["text"] + } + }, + configSchema: OpenAiConfigSchema +}); +const gpt4 = (0, import_model.modelRef)({ + name: "openai/gpt-4", + info: { + versions: ["gpt-4", "gpt-4-0613", "gpt-4-32k", "gpt-4-32k-0613"], + label: "OpenAI - GPT-4", + supports: { + multiturn: true, + tools: true, + media: false, + systemRole: true, + output: ["text"] + } + }, + configSchema: OpenAiConfigSchema +}); +const gpt35Turbo = (0, import_model.modelRef)({ + name: "openai/gpt-3.5-turbo", + info: { + versions: ["gpt-3.5-turbo-0125", "gpt-3.5-turbo", "gpt-3.5-turbo-1106"], + label: "OpenAI - GPT-3.5 Turbo", + supports: { + multiturn: true, + tools: true, + media: false, + systemRole: true, + output: ["json", "text"] + } + }, + configSchema: OpenAiConfigSchema +}); +const SUPPORTED_GPT_MODELS = { + "gpt-4o": gpt4o, + "gpt-4o-mini": gpt4oMini, + "gpt-4-turbo": gpt4Turbo, + "gpt-4-vision": gpt4Vision, + "gpt-4": gpt4, + "gpt-3.5-turbo": gpt35Turbo +}; +function toOpenAIRole(role) { + switch (role) { + case "user": + return "user"; + case "model": + return "assistant"; + case "system": + return "system"; + case "tool": + return "tool"; + default: + throw new Error(`role ${role} doesn't map to an OpenAI role.`); + } +} +function toOpenAiTool(tool) { + return { + type: "function", + function: { + name: tool.name, + parameters: tool.inputSchema + } + }; +} +function toOpenAiTextAndMedia(part, visualDetailLevel) { + if (part.text) { + return { + type: "text", + text: part.text + }; + } else if (part.media) { + return { + type: "image_url", + image_url: { + url: part.media.url, + detail: visualDetailLevel + } + }; + } + throw Error( + `Unsupported genkit part fields encountered for current message role: ${JSON.stringify(part)}.` + ); +} +function toOpenAiMessages(messages, visualDetailLevel = "auto") { + const openAiMsgs = []; + for (const message of messages) { + const msg = new import_ai.Message(message); + const role = toOpenAIRole(message.role); + switch (role) { + case "user": + openAiMsgs.push({ + role, + content: msg.content.map( + (part) => toOpenAiTextAndMedia(part, visualDetailLevel) + ) + }); + break; + case "system": + openAiMsgs.push({ + role, + content: msg.text() + }); + break; + case "assistant": { + const toolCalls = msg.content.filter( + (part) => Boolean(part.toolRequest) + ).map((part) => { + var _a; + return { + id: (_a = part.toolRequest.ref) != null ? _a : "", + type: "function", + function: { + name: part.toolRequest.name, + arguments: JSON.stringify(part.toolRequest.input) + } + }; + }); + if (toolCalls.length > 0) { + openAiMsgs.push({ + role, + tool_calls: toolCalls + }); + } else { + openAiMsgs.push({ + role, + content: msg.text() + }); + } + break; + } + case "tool": { + const toolResponseParts = msg.toolResponseParts(); + toolResponseParts.map((part) => { + var _a; + openAiMsgs.push({ + role, + tool_call_id: (_a = part.toolResponse.ref) != null ? _a : "", + content: typeof part.toolResponse.output === "string" ? part.toolResponse.output : JSON.stringify(part.toolResponse.output) + }); + }); + break; + } + } + } + return openAiMsgs; +} +const finishReasonMap = { + length: "length", + stop: "stop", + tool_calls: "stop", + content_filter: "blocked" +}; +function fromOpenAiToolCall(toolCall) { + if (!toolCall.function) { + throw Error( + `Unexpected openAI chunk choice. tool_calls was provided but one or more tool_calls is missing.` + ); + } + const f = toolCall.function; + return { + toolRequest: { + name: f.name, + ref: toolCall.id, + input: f.arguments ? JSON.parse(f.arguments) : f.arguments + } + }; +} +function fromOpenAiChoice(choice, jsonMode = false) { + var _a; + const toolRequestParts = (_a = choice.message.tool_calls) == null ? void 0 : _a.map(fromOpenAiToolCall); + return { + index: choice.index, + finishReason: finishReasonMap[choice.finish_reason] || "other", + message: { + role: "model", + content: toolRequestParts && toolRequestParts.length > 0 ? ( + // Note: Not sure why I have to cast here exactly. + // Otherwise it thinks toolRequest must be 'undefined' if provided + toolRequestParts + ) : [ + jsonMode ? { data: JSON.parse(choice.message.content) } : { text: choice.message.content } + ] + }, + custom: {} + }; +} +function fromOpenAiChunkChoice(choice, jsonMode = false) { + var _a; + const toolRequestParts = (_a = choice.delta.tool_calls) == null ? void 0 : _a.map(fromOpenAiToolCall); + return { + index: choice.index, + finishReason: choice.finish_reason ? finishReasonMap[choice.finish_reason] || "other" : "unknown", + message: { + role: "model", + content: toolRequestParts ? ( + // Note: Not sure why I have to cast here exactly. + // Otherwise it thinks toolRequest must be 'undefined' if provided + toolRequestParts + ) : [ + jsonMode ? { data: JSON.parse(choice.delta.content) } : { text: choice.delta.content } + ] + }, + custom: {} + }; +} +function toOpenAiRequestBody(modelName, request) { + var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m, _n, _o, _p, _q, _r, _s; + const model = SUPPORTED_GPT_MODELS[modelName]; + if (!model) + throw new Error(`Unsupported model: ${modelName}`); + const openAiMessages = toOpenAiMessages( + request.messages, + (_a = request.config) == null ? void 0 : _a.visualDetailLevel + ); + const mappedModelName = ((_b = request.config) == null ? void 0 : _b.version) || model.version || modelName; + const body = { + model: mappedModelName, + messages: openAiMessages, + temperature: (_c = request.config) == null ? void 0 : _c.temperature, + max_tokens: (_d = request.config) == null ? void 0 : _d.maxOutputTokens, + top_p: (_e = request.config) == null ? void 0 : _e.topP, + stop: (_f = request.config) == null ? void 0 : _f.stopSequences, + frequency_penalty: (_g = request.config) == null ? void 0 : _g.frequencyPenalty, + logit_bias: (_h = request.config) == null ? void 0 : _h.logitBias, + logprobs: (_i = request.config) == null ? void 0 : _i.logProbs, + // logprobs not snake case! + presence_penalty: (_j = request.config) == null ? void 0 : _j.presencePenalty, + seed: (_k = request.config) == null ? void 0 : _k.seed, + top_logprobs: (_l = request.config) == null ? void 0 : _l.topLogProbs, + // logprobs not snake case! + user: (_m = request.config) == null ? void 0 : _m.user, + tools: (_n = request.tools) == null ? void 0 : _n.map(toOpenAiTool), + n: request.candidates + }; + const response_format = (_o = request.output) == null ? void 0 : _o.format; + if (response_format && MODELS_SUPPORTING_OPENAI_RESPONSE_FORMAT.includes(mappedModelName)) { + if (response_format === "json" && ((_q = (_p = model.info.supports) == null ? void 0 : _p.output) == null ? void 0 : _q.includes("json"))) { + body.response_format = { + type: "json_object" + }; + } else if (response_format === "text" && ((_s = (_r = model.info.supports) == null ? void 0 : _r.output) == null ? void 0 : _s.includes("text"))) { + body.response_format = { + type: "text" + }; + } else { + throw new Error( + `${response_format} format is not supported for GPT models currently` + ); + } + } + for (const key in body) { + if (!body[key] || Array.isArray(body[key]) && !body[key].length) + delete body[key]; + } + return body; +} +function gptRunner(name, client) { + return (request, streamingCallback) => __async(this, null, function* () { + var _a, _b, _c, _d; + let response; + const body = toOpenAiRequestBody(name, request); + if (streamingCallback) { + const stream = client.beta.chat.completions.stream(__spreadProps(__spreadValues({}, body), { + stream: true + })); + try { + for (var iter = __forAwait(stream), more, temp, error; more = !(temp = yield iter.next()).done; more = false) { + const chunk = temp.value; + (_a = chunk.choices) == null ? void 0 : _a.forEach((chunk2) => { + const c = fromOpenAiChunkChoice(chunk2); + streamingCallback({ + index: c.index, + content: c.message.content + }); + }); + } + } catch (temp) { + error = [temp]; + } finally { + try { + more && (temp = iter.return) && (yield temp.call(iter)); + } finally { + if (error) + throw error[0]; + } + } + response = yield stream.finalChatCompletion(); + } else { + response = yield client.chat.completions.create(body); + } + return { + candidates: response.choices.map( + (c) => { + var _a2; + return fromOpenAiChoice(c, ((_a2 = request.output) == null ? void 0 : _a2.format) === "json"); + } + ), + usage: { + inputTokens: (_b = response.usage) == null ? void 0 : _b.prompt_tokens, + outputTokens: (_c = response.usage) == null ? void 0 : _c.completion_tokens, + totalTokens: (_d = response.usage) == null ? void 0 : _d.total_tokens + }, + custom: response + }; + }); +} +function gptModel(name, client) { + const modelId = `openai/${name}`; + const model = SUPPORTED_GPT_MODELS[name]; + if (!model) + throw new Error(`Unsupported model: ${name}`); + return (0, import_model.defineModel)( + __spreadProps(__spreadValues({ + name: modelId + }, model.info), { + configSchema: model.configSchema + }), + gptRunner(name, client) + ); +} +// Annotate the CommonJS export names for ESM import in node: +0 && (module.exports = { + OpenAiConfigSchema, + SUPPORTED_GPT_MODELS, + fromOpenAiChoice, + fromOpenAiChunkChoice, + fromOpenAiToolCall, + gpt35Turbo, + gpt4, + gpt4Turbo, + gpt4Vision, + gpt4o, + gpt4oMini, + gptModel, + gptRunner, + toOpenAIRole, + toOpenAiMessages, + toOpenAiRequestBody, + toOpenAiTextAndMedia +}); +//# sourceMappingURL=gpt.js.map \ No newline at end of file diff --git a/plugins/openai/lib/gpt.js.map b/plugins/openai/lib/gpt.js.map new file mode 100644 index 00000000..ea4b36a3 --- /dev/null +++ b/plugins/openai/lib/gpt.js.map @@ -0,0 +1 @@ +{"version":3,"sources":["../src/gpt.ts"],"sourcesContent":["/**\n * Copyright 2024 The Fire Company\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\nimport { Message } from '@genkit-ai/ai';\nimport {\n GenerateResponseChunkData,\n GenerateResponseData,\n GenerationCommonConfigSchema,\n ModelAction,\n defineModel,\n modelRef,\n type CandidateData,\n type GenerateRequest,\n type MessageData,\n type Part,\n type Role,\n type ToolDefinition,\n type ToolRequestPart,\n} from '@genkit-ai/ai/model';\nimport { StreamingCallback } from '@genkit-ai/core';\nimport OpenAI from 'openai';\nimport {\n type ChatCompletion,\n type ChatCompletionChunk,\n type ChatCompletionContentPart,\n type ChatCompletionCreateParamsNonStreaming,\n type ChatCompletionMessageParam,\n type ChatCompletionMessageToolCall,\n type ChatCompletionRole,\n type ChatCompletionTool,\n type CompletionChoice,\n} from 'openai/resources/index.mjs';\nimport z from 'zod';\n\nconst MODELS_SUPPORTING_OPENAI_RESPONSE_FORMAT = [\n 'gpt-4o',\n 'gpt-4o-2024-05-13',\n 'gpt-4o-mini',\n 'gpt-4o-mini-2024-07-18',\n 'gpt-4-turbo',\n 'gpt-4-turbo-2024-04-09',\n 'gpt-4-turbo-preview',\n 'gpt-4-0125-preview',\n 'gpt-4-1106-preview',\n 'gpt-3.5-turbo-0125',\n 'gpt-3.5-turbo',\n 'gpt-3.5-turbo-1106',\n];\n\nexport const OpenAiConfigSchema = GenerationCommonConfigSchema.extend({\n frequencyPenalty: z.number().min(-2).max(2).optional(),\n logitBias: z.record(z.string(), z.number().min(-100).max(100)).optional(),\n logProbs: z.boolean().optional(),\n presencePenalty: z.number().min(-2).max(2).optional(),\n seed: z.number().int().optional(),\n topLogProbs: z.number().int().min(0).max(20).optional(),\n user: z.string().optional(),\n visualDetailLevel: z.enum(['auto', 'low', 'high']).optional(),\n});\n\ntype VisualDetailLevel = z.infer<\n typeof OpenAiConfigSchema\n>['visualDetailLevel'];\n\nexport const gpt4o = modelRef({\n name: 'openai/gpt-4o',\n info: {\n versions: ['gpt-4o', 'gpt-4o-2024-05-13'],\n label: 'OpenAI - GPT-4o',\n supports: {\n multiturn: true,\n tools: true,\n media: true,\n systemRole: true,\n output: ['text', 'json'],\n },\n },\n configSchema: OpenAiConfigSchema,\n});\n\nexport const gpt4oMini = modelRef({\n name: 'openai/gpt-4o-mini',\n info: {\n versions: ['gpt-4o-mini', 'gpt-4o-mini-2024-07-18'],\n label: 'OpenAI - GPT-4o mini',\n supports: {\n multiturn: true,\n tools: true,\n media: true,\n systemRole: true,\n output: ['text', 'json'],\n },\n },\n configSchema: OpenAiConfigSchema,\n});\n\nexport const gpt4Turbo = modelRef({\n name: 'openai/gpt-4-turbo',\n info: {\n versions: [\n 'gpt-4-turbo',\n 'gpt-4-turbo-2024-04-09',\n 'gpt-4-turbo-preview',\n 'gpt-4-0125-preview',\n 'gpt-4-1106-preview',\n ],\n label: 'OpenAI - GPT-4 Turbo',\n supports: {\n multiturn: true,\n tools: true,\n media: true,\n systemRole: true,\n output: ['text', 'json'],\n },\n },\n configSchema: OpenAiConfigSchema,\n});\n\nexport const gpt4Vision = modelRef({\n name: 'openai/gpt-4-vision',\n info: {\n versions: ['gpt-4-vision-preview', 'gpt-4-1106-vision-preview'],\n label: 'OpenAI - GPT-4 Vision',\n supports: {\n multiturn: true,\n tools: false,\n media: true,\n systemRole: true,\n output: ['text'],\n },\n },\n configSchema: OpenAiConfigSchema,\n});\n\nexport const gpt4 = modelRef({\n name: 'openai/gpt-4',\n info: {\n versions: ['gpt-4', 'gpt-4-0613', 'gpt-4-32k', 'gpt-4-32k-0613'],\n label: 'OpenAI - GPT-4',\n supports: {\n multiturn: true,\n tools: true,\n media: false,\n systemRole: true,\n output: ['text'],\n },\n },\n configSchema: OpenAiConfigSchema,\n});\n\nexport const gpt35Turbo = modelRef({\n name: 'openai/gpt-3.5-turbo',\n info: {\n versions: ['gpt-3.5-turbo-0125', 'gpt-3.5-turbo', 'gpt-3.5-turbo-1106'],\n label: 'OpenAI - GPT-3.5 Turbo',\n supports: {\n multiturn: true,\n tools: true,\n media: false,\n systemRole: true,\n output: ['json', 'text'],\n },\n },\n configSchema: OpenAiConfigSchema,\n});\n\nexport const SUPPORTED_GPT_MODELS = {\n 'gpt-4o': gpt4o,\n 'gpt-4o-mini': gpt4oMini,\n 'gpt-4-turbo': gpt4Turbo,\n 'gpt-4-vision': gpt4Vision,\n 'gpt-4': gpt4,\n 'gpt-3.5-turbo': gpt35Turbo,\n};\n\nexport function toOpenAIRole(role: Role): ChatCompletionRole {\n switch (role) {\n case 'user':\n return 'user';\n case 'model':\n return 'assistant';\n case 'system':\n return 'system';\n case 'tool':\n return 'tool';\n default:\n throw new Error(`role ${role} doesn't map to an OpenAI role.`);\n }\n}\n\n/**\n * Converts a Genkit ToolDefinition to an OpenAI ChatCompletionTool object.\n * @param tool The Genkit ToolDefinition to convert.\n * @returns The converted OpenAI ChatCompletionTool object.\n */\nfunction toOpenAiTool(tool: ToolDefinition): ChatCompletionTool {\n return {\n type: 'function',\n function: {\n name: tool.name,\n parameters: tool.inputSchema,\n },\n };\n}\n\n/**\n * Converts a Genkit Part to the corresponding OpenAI ChatCompletionContentPart.\n * @param part The Genkit Part to convert.\n * @param visualDetailLevel The visual detail level to use for media parts.\n * @returns The corresponding OpenAI ChatCompletionContentPart.\n * @throws Error if the part contains unsupported fields for the current message role.\n */\nexport function toOpenAiTextAndMedia(\n part: Part,\n visualDetailLevel: VisualDetailLevel\n): ChatCompletionContentPart {\n if (part.text) {\n return {\n type: 'text',\n text: part.text,\n };\n } else if (part.media) {\n return {\n type: 'image_url',\n image_url: {\n url: part.media.url,\n detail: visualDetailLevel,\n },\n };\n }\n throw Error(\n `Unsupported genkit part fields encountered for current message role: ${JSON.stringify(part)}.`\n );\n}\n\n/**\n * Converts a Genkit MessageData array to an OpenAI ChatCompletionMessageParam array.\n * @param messages The Genkit MessageData array to convert.\n * @param visualDetailLevel The visual detail level to use for media parts.\n * @returns The converted OpenAI ChatCompletionMessageParam array.\n */\nexport function toOpenAiMessages(\n messages: MessageData[],\n visualDetailLevel: VisualDetailLevel = 'auto'\n): ChatCompletionMessageParam[] {\n const openAiMsgs: ChatCompletionMessageParam[] = [];\n for (const message of messages) {\n const msg = new Message(message);\n const role = toOpenAIRole(message.role);\n switch (role) {\n case 'user':\n openAiMsgs.push({\n role: role,\n content: msg.content.map((part) =>\n toOpenAiTextAndMedia(part, visualDetailLevel)\n ),\n });\n break;\n case 'system':\n openAiMsgs.push({\n role: role,\n content: msg.text(),\n });\n break;\n case 'assistant': {\n const toolCalls: ChatCompletionMessageToolCall[] = msg.content\n .filter(\n (\n part\n ): part is Part & {\n toolRequest: NonNullable;\n } => Boolean(part.toolRequest)\n )\n .map((part) => ({\n id: part.toolRequest.ref ?? '',\n type: 'function',\n function: {\n name: part.toolRequest.name,\n arguments: JSON.stringify(part.toolRequest.input),\n },\n }));\n if (toolCalls.length > 0) {\n openAiMsgs.push({\n role: role,\n tool_calls: toolCalls,\n });\n } else {\n openAiMsgs.push({\n role: role,\n content: msg.text(),\n });\n }\n break;\n }\n case 'tool': {\n const toolResponseParts = msg.toolResponseParts();\n toolResponseParts.map((part) => {\n openAiMsgs.push({\n role: role,\n tool_call_id: part.toolResponse.ref ?? '',\n content:\n typeof part.toolResponse.output === 'string'\n ? part.toolResponse.output\n : JSON.stringify(part.toolResponse.output),\n });\n });\n break;\n }\n }\n }\n return openAiMsgs;\n}\n\nconst finishReasonMap: Record<\n // OpenAI Node SDK doesn't support tool_call in the enum, but it is returned from the API\n CompletionChoice['finish_reason'] | 'tool_calls',\n CandidateData['finishReason']\n> = {\n length: 'length',\n stop: 'stop',\n tool_calls: 'stop',\n content_filter: 'blocked',\n};\n\n/**\n * Converts an OpenAI tool call to a Genkit ToolRequestPart.\n * @param toolCall The OpenAI tool call to convert.\n * @returns The converted Genkit ToolRequestPart.\n */\nexport function fromOpenAiToolCall(\n toolCall:\n | ChatCompletionMessageToolCall\n | ChatCompletionChunk.Choice.Delta.ToolCall\n): ToolRequestPart {\n if (!toolCall.function) {\n throw Error(\n `Unexpected openAI chunk choice. tool_calls was provided but one or more tool_calls is missing.`\n );\n }\n const f = toolCall.function;\n return {\n toolRequest: {\n name: f.name!,\n ref: toolCall.id,\n input: f.arguments ? JSON.parse(f.arguments) : f.arguments,\n },\n };\n}\n\n/**\n * Converts an OpenAI message event to a Genkit CandidateData object.\n * @param choice The OpenAI message event to convert.\n * @param jsonMode Whether the event is a JSON response.\n * @returns The converted Genkit CandidateData object.\n */\nexport function fromOpenAiChoice(\n choice: ChatCompletion.Choice,\n jsonMode = false\n): CandidateData {\n const toolRequestParts = choice.message.tool_calls?.map(fromOpenAiToolCall);\n return {\n index: choice.index,\n finishReason: finishReasonMap[choice.finish_reason] || 'other',\n message: {\n role: 'model',\n content: toolRequestParts && toolRequestParts.length > 0\n ? // Note: Not sure why I have to cast here exactly.\n // Otherwise it thinks toolRequest must be 'undefined' if provided\n (toolRequestParts as ToolRequestPart[])\n : [\n jsonMode\n ? { data: JSON.parse(choice.message.content!) }\n : { text: choice.message.content! },\n ],\n },\n custom: {},\n };\n}\n\n/**\n * Converts an OpenAI message stream event to a Genkit CandidateData object.\n * @param choice The OpenAI message stream event to convert.\n * @param jsonMode Whether the event is a JSON response.\n * @returns The converted Genkit CandidateData object.\n */\nexport function fromOpenAiChunkChoice(\n choice: ChatCompletionChunk.Choice,\n jsonMode = false\n): CandidateData {\n const toolRequestParts = choice.delta.tool_calls?.map(fromOpenAiToolCall);\n return {\n index: choice.index,\n finishReason: choice.finish_reason\n ? finishReasonMap[choice.finish_reason] || 'other'\n : 'unknown',\n message: {\n role: 'model',\n content: toolRequestParts\n ? // Note: Not sure why I have to cast here exactly.\n // Otherwise it thinks toolRequest must be 'undefined' if provided\n (toolRequestParts as ToolRequestPart[])\n : [\n jsonMode\n ? { data: JSON.parse(choice.delta.content!) }\n : { text: choice.delta.content! },\n ],\n },\n custom: {},\n };\n}\n\n/**\n * Converts an OpenAI request to an OpenAI API request body.\n * @param modelName The name of the OpenAI model to use.\n * @param request The Genkit GenerateRequest to convert.\n * @returns The converted OpenAI API request body.\n * @throws An error if the specified model is not supported or if an unsupported output format is requested.\n */\nexport function toOpenAiRequestBody(\n modelName: string,\n request: GenerateRequest\n) {\n const model = SUPPORTED_GPT_MODELS[modelName];\n if (!model) throw new Error(`Unsupported model: ${modelName}`);\n const openAiMessages = toOpenAiMessages(\n request.messages,\n request.config?.visualDetailLevel\n );\n const mappedModelName = request.config?.version || model.version || modelName;\n const body = {\n model: mappedModelName,\n messages: openAiMessages,\n temperature: request.config?.temperature,\n max_tokens: request.config?.maxOutputTokens,\n top_p: request.config?.topP,\n stop: request.config?.stopSequences,\n frequency_penalty: request.config?.frequencyPenalty,\n logit_bias: request.config?.logitBias,\n logprobs: request.config?.logProbs, // logprobs not snake case!\n presence_penalty: request.config?.presencePenalty,\n seed: request.config?.seed,\n top_logprobs: request.config?.topLogProbs, // logprobs not snake case!\n user: request.config?.user,\n tools: request.tools?.map(toOpenAiTool),\n n: request.candidates,\n } as ChatCompletionCreateParamsNonStreaming;\n\n const response_format = request.output?.format;\n if (\n response_format &&\n MODELS_SUPPORTING_OPENAI_RESPONSE_FORMAT.includes(mappedModelName)\n ) {\n if (\n response_format === 'json' &&\n model.info.supports?.output?.includes('json')\n ) {\n body.response_format = {\n type: 'json_object',\n };\n } else if (\n response_format === 'text' &&\n model.info.supports?.output?.includes('text')\n ) {\n body.response_format = {\n type: 'text',\n };\n } else {\n throw new Error(\n `${response_format} format is not supported for GPT models currently`\n );\n }\n }\n for (const key in body) {\n if (!body[key] || (Array.isArray(body[key]) && !body[key].length))\n delete body[key];\n }\n return body;\n}\n\n/**\n * Creates the runner used by Genkit to interact with the GPT model.\n * @param name The name of the GPT model.\n * @param client The OpenAI client instance.\n * @returns The runner that Genkit will call when the model is invoked.\n */\nexport function gptRunner(name: string, client: OpenAI) {\n return async (\n request: GenerateRequest,\n streamingCallback?: StreamingCallback\n ): Promise => {\n let response: ChatCompletion;\n const body = toOpenAiRequestBody(name, request);\n if (streamingCallback) {\n const stream = client.beta.chat.completions.stream({\n ...body,\n stream: true,\n });\n for await (const chunk of stream) {\n chunk.choices?.forEach((chunk) => {\n const c = fromOpenAiChunkChoice(chunk);\n streamingCallback({\n index: c.index,\n content: c.message.content,\n });\n });\n }\n response = await stream.finalChatCompletion();\n } else {\n response = await client.chat.completions.create(body);\n }\n return {\n candidates: response.choices.map((c) =>\n fromOpenAiChoice(c, request.output?.format === 'json')\n ),\n usage: {\n inputTokens: response.usage?.prompt_tokens,\n outputTokens: response.usage?.completion_tokens,\n totalTokens: response.usage?.total_tokens,\n },\n custom: response,\n };\n };\n}\n\n/**\n * Defines a GPT model with the given name and OpenAI client.\n * @param name The name of the GPT model.\n * @param client The OpenAI client instance.\n * @returns The defined GPT model.\n * @throws An error if the specified model is not supported.\n */\nexport function gptModel(\n name: string,\n client: OpenAI\n): ModelAction {\n const modelId = `openai/${name}`;\n const model = SUPPORTED_GPT_MODELS[name];\n if (!model) throw new Error(`Unsupported model: ${name}`);\n\n return defineModel(\n {\n name: modelId,\n ...model.info,\n configSchema: model.configSchema,\n },\n gptRunner(name, client)\n );\n}\n"],"mappings":";;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAgBA,gBAAwB;AACxB,mBAcO;AAcP,iBAAc;AAEd,MAAM,2CAA2C;AAAA,EAC/C;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AACF;AAEO,MAAM,qBAAqB,0CAA6B,OAAO;AAAA,EACpE,kBAAkB,WAAAA,QAAE,OAAO,EAAE,IAAI,EAAE,EAAE,IAAI,CAAC,EAAE,SAAS;AAAA,EACrD,WAAW,WAAAA,QAAE,OAAO,WAAAA,QAAE,OAAO,GAAG,WAAAA,QAAE,OAAO,EAAE,IAAI,IAAI,EAAE,IAAI,GAAG,CAAC,EAAE,SAAS;AAAA,EACxE,UAAU,WAAAA,QAAE,QAAQ,EAAE,SAAS;AAAA,EAC/B,iBAAiB,WAAAA,QAAE,OAAO,EAAE,IAAI,EAAE,EAAE,IAAI,CAAC,EAAE,SAAS;AAAA,EACpD,MAAM,WAAAA,QAAE,OAAO,EAAE,IAAI,EAAE,SAAS;AAAA,EAChC,aAAa,WAAAA,QAAE,OAAO,EAAE,IAAI,EAAE,IAAI,CAAC,EAAE,IAAI,EAAE,EAAE,SAAS;AAAA,EACtD,MAAM,WAAAA,QAAE,OAAO,EAAE,SAAS;AAAA,EAC1B,mBAAmB,WAAAA,QAAE,KAAK,CAAC,QAAQ,OAAO,MAAM,CAAC,EAAE,SAAS;AAC9D,CAAC;AAMM,MAAM,YAAQ,uBAAS;AAAA,EAC5B,MAAM;AAAA,EACN,MAAM;AAAA,IACJ,UAAU,CAAC,UAAU,mBAAmB;AAAA,IACxC,OAAO;AAAA,IACP,UAAU;AAAA,MACR,WAAW;AAAA,MACX,OAAO;AAAA,MACP,OAAO;AAAA,MACP,YAAY;AAAA,MACZ,QAAQ,CAAC,QAAQ,MAAM;AAAA,IACzB;AAAA,EACF;AAAA,EACA,cAAc;AAChB,CAAC;AAEM,MAAM,gBAAY,uBAAS;AAAA,EAChC,MAAM;AAAA,EACN,MAAM;AAAA,IACJ,UAAU,CAAC,eAAe,wBAAwB;AAAA,IAClD,OAAO;AAAA,IACP,UAAU;AAAA,MACR,WAAW;AAAA,MACX,OAAO;AAAA,MACP,OAAO;AAAA,MACP,YAAY;AAAA,MACZ,QAAQ,CAAC,QAAQ,MAAM;AAAA,IACzB;AAAA,EACF;AAAA,EACA,cAAc;AAChB,CAAC;AAEM,MAAM,gBAAY,uBAAS;AAAA,EAChC,MAAM;AAAA,EACN,MAAM;AAAA,IACJ,UAAU;AAAA,MACR;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,IACF;AAAA,IACA,OAAO;AAAA,IACP,UAAU;AAAA,MACR,WAAW;AAAA,MACX,OAAO;AAAA,MACP,OAAO;AAAA,MACP,YAAY;AAAA,MACZ,QAAQ,CAAC,QAAQ,MAAM;AAAA,IACzB;AAAA,EACF;AAAA,EACA,cAAc;AAChB,CAAC;AAEM,MAAM,iBAAa,uBAAS;AAAA,EACjC,MAAM;AAAA,EACN,MAAM;AAAA,IACJ,UAAU,CAAC,wBAAwB,2BAA2B;AAAA,IAC9D,OAAO;AAAA,IACP,UAAU;AAAA,MACR,WAAW;AAAA,MACX,OAAO;AAAA,MACP,OAAO;AAAA,MACP,YAAY;AAAA,MACZ,QAAQ,CAAC,MAAM;AAAA,IACjB;AAAA,EACF;AAAA,EACA,cAAc;AAChB,CAAC;AAEM,MAAM,WAAO,uBAAS;AAAA,EAC3B,MAAM;AAAA,EACN,MAAM;AAAA,IACJ,UAAU,CAAC,SAAS,cAAc,aAAa,gBAAgB;AAAA,IAC/D,OAAO;AAAA,IACP,UAAU;AAAA,MACR,WAAW;AAAA,MACX,OAAO;AAAA,MACP,OAAO;AAAA,MACP,YAAY;AAAA,MACZ,QAAQ,CAAC,MAAM;AAAA,IACjB;AAAA,EACF;AAAA,EACA,cAAc;AAChB,CAAC;AAEM,MAAM,iBAAa,uBAAS;AAAA,EACjC,MAAM;AAAA,EACN,MAAM;AAAA,IACJ,UAAU,CAAC,sBAAsB,iBAAiB,oBAAoB;AAAA,IACtE,OAAO;AAAA,IACP,UAAU;AAAA,MACR,WAAW;AAAA,MACX,OAAO;AAAA,MACP,OAAO;AAAA,MACP,YAAY;AAAA,MACZ,QAAQ,CAAC,QAAQ,MAAM;AAAA,IACzB;AAAA,EACF;AAAA,EACA,cAAc;AAChB,CAAC;AAEM,MAAM,uBAAuB;AAAA,EAClC,UAAU;AAAA,EACV,eAAe;AAAA,EACf,eAAe;AAAA,EACf,gBAAgB;AAAA,EAChB,SAAS;AAAA,EACT,iBAAiB;AACnB;AAEO,SAAS,aAAa,MAAgC;AAC3D,UAAQ,MAAM;AAAA,IACZ,KAAK;AACH,aAAO;AAAA,IACT,KAAK;AACH,aAAO;AAAA,IACT,KAAK;AACH,aAAO;AAAA,IACT,KAAK;AACH,aAAO;AAAA,IACT;AACE,YAAM,IAAI,MAAM,QAAQ,IAAI,iCAAiC;AAAA,EACjE;AACF;AAOA,SAAS,aAAa,MAA0C;AAC9D,SAAO;AAAA,IACL,MAAM;AAAA,IACN,UAAU;AAAA,MACR,MAAM,KAAK;AAAA,MACX,YAAY,KAAK;AAAA,IACnB;AAAA,EACF;AACF;AASO,SAAS,qBACd,MACA,mBAC2B;AAC3B,MAAI,KAAK,MAAM;AACb,WAAO;AAAA,MACL,MAAM;AAAA,MACN,MAAM,KAAK;AAAA,IACb;AAAA,EACF,WAAW,KAAK,OAAO;AACrB,WAAO;AAAA,MACL,MAAM;AAAA,MACN,WAAW;AAAA,QACT,KAAK,KAAK,MAAM;AAAA,QAChB,QAAQ;AAAA,MACV;AAAA,IACF;AAAA,EACF;AACA,QAAM;AAAA,IACJ,wEAAwE,KAAK,UAAU,IAAI,CAAC;AAAA,EAC9F;AACF;AAQO,SAAS,iBACd,UACA,oBAAuC,QACT;AAC9B,QAAM,aAA2C,CAAC;AAClD,aAAW,WAAW,UAAU;AAC9B,UAAM,MAAM,IAAI,kBAAQ,OAAO;AAC/B,UAAM,OAAO,aAAa,QAAQ,IAAI;AACtC,YAAQ,MAAM;AAAA,MACZ,KAAK;AACH,mBAAW,KAAK;AAAA,UACd;AAAA,UACA,SAAS,IAAI,QAAQ;AAAA,YAAI,CAAC,SACxB,qBAAqB,MAAM,iBAAiB;AAAA,UAC9C;AAAA,QACF,CAAC;AACD;AAAA,MACF,KAAK;AACH,mBAAW,KAAK;AAAA,UACd;AAAA,UACA,SAAS,IAAI,KAAK;AAAA,QACpB,CAAC;AACD;AAAA,MACF,KAAK,aAAa;AAChB,cAAM,YAA6C,IAAI,QACpD;AAAA,UACC,CACE,SAGG,QAAQ,KAAK,WAAW;AAAA,QAC/B,EACC,IAAI,CAAC,SAAM;AA9RtB;AA8R0B;AAAA,YACd,KAAI,UAAK,YAAY,QAAjB,YAAwB;AAAA,YAC5B,MAAM;AAAA,YACN,UAAU;AAAA,cACR,MAAM,KAAK,YAAY;AAAA,cACvB,WAAW,KAAK,UAAU,KAAK,YAAY,KAAK;AAAA,YAClD;AAAA,UACF;AAAA,SAAE;AACJ,YAAI,UAAU,SAAS,GAAG;AACxB,qBAAW,KAAK;AAAA,YACd;AAAA,YACA,YAAY;AAAA,UACd,CAAC;AAAA,QACH,OAAO;AACL,qBAAW,KAAK;AAAA,YACd;AAAA,YACA,SAAS,IAAI,KAAK;AAAA,UACpB,CAAC;AAAA,QACH;AACA;AAAA,MACF;AAAA,MACA,KAAK,QAAQ;AACX,cAAM,oBAAoB,IAAI,kBAAkB;AAChD,0BAAkB,IAAI,CAAC,SAAS;AArTxC;AAsTU,qBAAW,KAAK;AAAA,YACd;AAAA,YACA,eAAc,UAAK,aAAa,QAAlB,YAAyB;AAAA,YACvC,SACE,OAAO,KAAK,aAAa,WAAW,WAChC,KAAK,aAAa,SAClB,KAAK,UAAU,KAAK,aAAa,MAAM;AAAA,UAC/C,CAAC;AAAA,QACH,CAAC;AACD;AAAA,MACF;AAAA,IACF;AAAA,EACF;AACA,SAAO;AACT;AAEA,MAAM,kBAIF;AAAA,EACF,QAAQ;AAAA,EACR,MAAM;AAAA,EACN,YAAY;AAAA,EACZ,gBAAgB;AAClB;AAOO,SAAS,mBACd,UAGiB;AACjB,MAAI,CAAC,SAAS,UAAU;AACtB,UAAM;AAAA,MACJ;AAAA,IACF;AAAA,EACF;AACA,QAAM,IAAI,SAAS;AACnB,SAAO;AAAA,IACL,aAAa;AAAA,MACX,MAAM,EAAE;AAAA,MACR,KAAK,SAAS;AAAA,MACd,OAAO,EAAE,YAAY,KAAK,MAAM,EAAE,SAAS,IAAI,EAAE;AAAA,IACnD;AAAA,EACF;AACF;AAQO,SAAS,iBACd,QACA,WAAW,OACI;AAnXjB;AAoXE,QAAM,oBAAmB,YAAO,QAAQ,eAAf,mBAA2B,IAAI;AACxD,SAAO;AAAA,IACL,OAAO,OAAO;AAAA,IACd,cAAc,gBAAgB,OAAO,aAAa,KAAK;AAAA,IACvD,SAAS;AAAA,MACP,MAAM;AAAA,MACN,SAAS,oBAAoB,iBAAiB,SAAS;AAAA;AAAA;AAAA,QAGlD;AAAA,UACD;AAAA,QACE,WACI,EAAE,MAAM,KAAK,MAAM,OAAO,QAAQ,OAAQ,EAAE,IAC5C,EAAE,MAAM,OAAO,QAAQ,QAAS;AAAA,MACtC;AAAA,IACN;AAAA,IACA,QAAQ,CAAC;AAAA,EACX;AACF;AAQO,SAAS,sBACd,QACA,WAAW,OACI;AAjZjB;AAkZE,QAAM,oBAAmB,YAAO,MAAM,eAAb,mBAAyB,IAAI;AACtD,SAAO;AAAA,IACL,OAAO,OAAO;AAAA,IACd,cAAc,OAAO,gBACjB,gBAAgB,OAAO,aAAa,KAAK,UACzC;AAAA,IACJ,SAAS;AAAA,MACP,MAAM;AAAA,MACN,SAAS;AAAA;AAAA;AAAA,QAGJ;AAAA,UACD;AAAA,QACE,WACI,EAAE,MAAM,KAAK,MAAM,OAAO,MAAM,OAAQ,EAAE,IAC1C,EAAE,MAAM,OAAO,MAAM,QAAS;AAAA,MACpC;AAAA,IACN;AAAA,IACA,QAAQ,CAAC;AAAA,EACX;AACF;AASO,SAAS,oBACd,WACA,SACA;AAlbF;AAmbE,QAAM,QAAQ,qBAAqB,SAAS;AAC5C,MAAI,CAAC;AAAO,UAAM,IAAI,MAAM,sBAAsB,SAAS,EAAE;AAC7D,QAAM,iBAAiB;AAAA,IACrB,QAAQ;AAAA,KACR,aAAQ,WAAR,mBAAgB;AAAA,EAClB;AACA,QAAM,oBAAkB,aAAQ,WAAR,mBAAgB,YAAW,MAAM,WAAW;AACpE,QAAM,OAAO;AAAA,IACX,OAAO;AAAA,IACP,UAAU;AAAA,IACV,cAAa,aAAQ,WAAR,mBAAgB;AAAA,IAC7B,aAAY,aAAQ,WAAR,mBAAgB;AAAA,IAC5B,QAAO,aAAQ,WAAR,mBAAgB;AAAA,IACvB,OAAM,aAAQ,WAAR,mBAAgB;AAAA,IACtB,oBAAmB,aAAQ,WAAR,mBAAgB;AAAA,IACnC,aAAY,aAAQ,WAAR,mBAAgB;AAAA,IAC5B,WAAU,aAAQ,WAAR,mBAAgB;AAAA;AAAA,IAC1B,mBAAkB,aAAQ,WAAR,mBAAgB;AAAA,IAClC,OAAM,aAAQ,WAAR,mBAAgB;AAAA,IACtB,eAAc,aAAQ,WAAR,mBAAgB;AAAA;AAAA,IAC9B,OAAM,aAAQ,WAAR,mBAAgB;AAAA,IACtB,QAAO,aAAQ,UAAR,mBAAe,IAAI;AAAA,IAC1B,GAAG,QAAQ;AAAA,EACb;AAEA,QAAM,mBAAkB,aAAQ,WAAR,mBAAgB;AACxC,MACE,mBACA,yCAAyC,SAAS,eAAe,GACjE;AACA,QACE,oBAAoB,YACpB,iBAAM,KAAK,aAAX,mBAAqB,WAArB,mBAA6B,SAAS,UACtC;AACA,WAAK,kBAAkB;AAAA,QACrB,MAAM;AAAA,MACR;AAAA,IACF,WACE,oBAAoB,YACpB,iBAAM,KAAK,aAAX,mBAAqB,WAArB,mBAA6B,SAAS,UACtC;AACA,WAAK,kBAAkB;AAAA,QACrB,MAAM;AAAA,MACR;AAAA,IACF,OAAO;AACL,YAAM,IAAI;AAAA,QACR,GAAG,eAAe;AAAA,MACpB;AAAA,IACF;AAAA,EACF;AACA,aAAW,OAAO,MAAM;AACtB,QAAI,CAAC,KAAK,GAAG,KAAM,MAAM,QAAQ,KAAK,GAAG,CAAC,KAAK,CAAC,KAAK,GAAG,EAAE;AACxD,aAAO,KAAK,GAAG;AAAA,EACnB;AACA,SAAO;AACT;AAQO,SAAS,UAAU,MAAc,QAAgB;AACtD,SAAO,CACL,SACA,sBACkC;AAtftC;AAufI,QAAI;AACJ,UAAM,OAAO,oBAAoB,MAAM,OAAO;AAC9C,QAAI,mBAAmB;AACrB,YAAM,SAAS,OAAO,KAAK,KAAK,YAAY,OAAO,iCAC9C,OAD8C;AAAA,QAEjD,QAAQ;AAAA,MACV,EAAC;AACD;AAAA,mCAA0B,SAA1B,0EAAkC;AAAvB,gBAAM,QAAjB;AACE,sBAAM,YAAN,mBAAe,QAAQ,CAACC,WAAU;AAChC,kBAAM,IAAI,sBAAsBA,MAAK;AACrC,8BAAkB;AAAA,cAChB,OAAO,EAAE;AAAA,cACT,SAAS,EAAE,QAAQ;AAAA,YACrB,CAAC;AAAA,UACH;AAAA,QACF;AAAA,eARA,MA9fN;AA8fM;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AASA,iBAAW,MAAM,OAAO,oBAAoB;AAAA,IAC9C,OAAO;AACL,iBAAW,MAAM,OAAO,KAAK,YAAY,OAAO,IAAI;AAAA,IACtD;AACA,WAAO;AAAA,MACL,YAAY,SAAS,QAAQ;AAAA,QAAI,CAAC,MAAG;AA5gB3C,cAAAC;AA6gBQ,kCAAiB,KAAGA,MAAA,QAAQ,WAAR,gBAAAA,IAAgB,YAAW,MAAM;AAAA;AAAA,MACvD;AAAA,MACA,OAAO;AAAA,QACL,cAAa,cAAS,UAAT,mBAAgB;AAAA,QAC7B,eAAc,cAAS,UAAT,mBAAgB;AAAA,QAC9B,cAAa,cAAS,UAAT,mBAAgB;AAAA,MAC/B;AAAA,MACA,QAAQ;AAAA,IACV;AAAA,EACF;AACF;AASO,SAAS,SACd,MACA,QACwC;AACxC,QAAM,UAAU,UAAU,IAAI;AAC9B,QAAM,QAAQ,qBAAqB,IAAI;AACvC,MAAI,CAAC;AAAO,UAAM,IAAI,MAAM,sBAAsB,IAAI,EAAE;AAExD,aAAO;AAAA,IACL;AAAA,MACE,MAAM;AAAA,OACH,MAAM,OAFX;AAAA,MAGE,cAAc,MAAM;AAAA,IACtB;AAAA,IACA,UAAU,MAAM,MAAM;AAAA,EACxB;AACF;","names":["z","chunk","_a"]} \ No newline at end of file diff --git a/plugins/openai/lib/gpt.mjs b/plugins/openai/lib/gpt.mjs new file mode 100644 index 00000000..51e37b6f --- /dev/null +++ b/plugins/openai/lib/gpt.mjs @@ -0,0 +1,440 @@ +import { + __async, + __forAwait, + __spreadProps, + __spreadValues +} from "./chunk-WFI2LP4G.mjs"; +import { Message } from "@genkit-ai/ai"; +import { + GenerationCommonConfigSchema, + defineModel, + modelRef +} from "@genkit-ai/ai/model"; +import z from "zod"; +const MODELS_SUPPORTING_OPENAI_RESPONSE_FORMAT = [ + "gpt-4o", + "gpt-4o-2024-05-13", + "gpt-4o-mini", + "gpt-4o-mini-2024-07-18", + "gpt-4-turbo", + "gpt-4-turbo-2024-04-09", + "gpt-4-turbo-preview", + "gpt-4-0125-preview", + "gpt-4-1106-preview", + "gpt-3.5-turbo-0125", + "gpt-3.5-turbo", + "gpt-3.5-turbo-1106" +]; +const OpenAiConfigSchema = GenerationCommonConfigSchema.extend({ + frequencyPenalty: z.number().min(-2).max(2).optional(), + logitBias: z.record(z.string(), z.number().min(-100).max(100)).optional(), + logProbs: z.boolean().optional(), + presencePenalty: z.number().min(-2).max(2).optional(), + seed: z.number().int().optional(), + topLogProbs: z.number().int().min(0).max(20).optional(), + user: z.string().optional(), + visualDetailLevel: z.enum(["auto", "low", "high"]).optional() +}); +const gpt4o = modelRef({ + name: "openai/gpt-4o", + info: { + versions: ["gpt-4o", "gpt-4o-2024-05-13"], + label: "OpenAI - GPT-4o", + supports: { + multiturn: true, + tools: true, + media: true, + systemRole: true, + output: ["text", "json"] + } + }, + configSchema: OpenAiConfigSchema +}); +const gpt4oMini = modelRef({ + name: "openai/gpt-4o-mini", + info: { + versions: ["gpt-4o-mini", "gpt-4o-mini-2024-07-18"], + label: "OpenAI - GPT-4o mini", + supports: { + multiturn: true, + tools: true, + media: true, + systemRole: true, + output: ["text", "json"] + } + }, + configSchema: OpenAiConfigSchema +}); +const gpt4Turbo = modelRef({ + name: "openai/gpt-4-turbo", + info: { + versions: [ + "gpt-4-turbo", + "gpt-4-turbo-2024-04-09", + "gpt-4-turbo-preview", + "gpt-4-0125-preview", + "gpt-4-1106-preview" + ], + label: "OpenAI - GPT-4 Turbo", + supports: { + multiturn: true, + tools: true, + media: true, + systemRole: true, + output: ["text", "json"] + } + }, + configSchema: OpenAiConfigSchema +}); +const gpt4Vision = modelRef({ + name: "openai/gpt-4-vision", + info: { + versions: ["gpt-4-vision-preview", "gpt-4-1106-vision-preview"], + label: "OpenAI - GPT-4 Vision", + supports: { + multiturn: true, + tools: false, + media: true, + systemRole: true, + output: ["text"] + } + }, + configSchema: OpenAiConfigSchema +}); +const gpt4 = modelRef({ + name: "openai/gpt-4", + info: { + versions: ["gpt-4", "gpt-4-0613", "gpt-4-32k", "gpt-4-32k-0613"], + label: "OpenAI - GPT-4", + supports: { + multiturn: true, + tools: true, + media: false, + systemRole: true, + output: ["text"] + } + }, + configSchema: OpenAiConfigSchema +}); +const gpt35Turbo = modelRef({ + name: "openai/gpt-3.5-turbo", + info: { + versions: ["gpt-3.5-turbo-0125", "gpt-3.5-turbo", "gpt-3.5-turbo-1106"], + label: "OpenAI - GPT-3.5 Turbo", + supports: { + multiturn: true, + tools: true, + media: false, + systemRole: true, + output: ["json", "text"] + } + }, + configSchema: OpenAiConfigSchema +}); +const SUPPORTED_GPT_MODELS = { + "gpt-4o": gpt4o, + "gpt-4o-mini": gpt4oMini, + "gpt-4-turbo": gpt4Turbo, + "gpt-4-vision": gpt4Vision, + "gpt-4": gpt4, + "gpt-3.5-turbo": gpt35Turbo +}; +function toOpenAIRole(role) { + switch (role) { + case "user": + return "user"; + case "model": + return "assistant"; + case "system": + return "system"; + case "tool": + return "tool"; + default: + throw new Error(`role ${role} doesn't map to an OpenAI role.`); + } +} +function toOpenAiTool(tool) { + return { + type: "function", + function: { + name: tool.name, + parameters: tool.inputSchema + } + }; +} +function toOpenAiTextAndMedia(part, visualDetailLevel) { + if (part.text) { + return { + type: "text", + text: part.text + }; + } else if (part.media) { + return { + type: "image_url", + image_url: { + url: part.media.url, + detail: visualDetailLevel + } + }; + } + throw Error( + `Unsupported genkit part fields encountered for current message role: ${JSON.stringify(part)}.` + ); +} +function toOpenAiMessages(messages, visualDetailLevel = "auto") { + const openAiMsgs = []; + for (const message of messages) { + const msg = new Message(message); + const role = toOpenAIRole(message.role); + switch (role) { + case "user": + openAiMsgs.push({ + role, + content: msg.content.map( + (part) => toOpenAiTextAndMedia(part, visualDetailLevel) + ) + }); + break; + case "system": + openAiMsgs.push({ + role, + content: msg.text() + }); + break; + case "assistant": { + const toolCalls = msg.content.filter( + (part) => Boolean(part.toolRequest) + ).map((part) => { + var _a; + return { + id: (_a = part.toolRequest.ref) != null ? _a : "", + type: "function", + function: { + name: part.toolRequest.name, + arguments: JSON.stringify(part.toolRequest.input) + } + }; + }); + if (toolCalls.length > 0) { + openAiMsgs.push({ + role, + tool_calls: toolCalls + }); + } else { + openAiMsgs.push({ + role, + content: msg.text() + }); + } + break; + } + case "tool": { + const toolResponseParts = msg.toolResponseParts(); + toolResponseParts.map((part) => { + var _a; + openAiMsgs.push({ + role, + tool_call_id: (_a = part.toolResponse.ref) != null ? _a : "", + content: typeof part.toolResponse.output === "string" ? part.toolResponse.output : JSON.stringify(part.toolResponse.output) + }); + }); + break; + } + } + } + return openAiMsgs; +} +const finishReasonMap = { + length: "length", + stop: "stop", + tool_calls: "stop", + content_filter: "blocked" +}; +function fromOpenAiToolCall(toolCall) { + if (!toolCall.function) { + throw Error( + `Unexpected openAI chunk choice. tool_calls was provided but one or more tool_calls is missing.` + ); + } + const f = toolCall.function; + return { + toolRequest: { + name: f.name, + ref: toolCall.id, + input: f.arguments ? JSON.parse(f.arguments) : f.arguments + } + }; +} +function fromOpenAiChoice(choice, jsonMode = false) { + var _a; + const toolRequestParts = (_a = choice.message.tool_calls) == null ? void 0 : _a.map(fromOpenAiToolCall); + return { + index: choice.index, + finishReason: finishReasonMap[choice.finish_reason] || "other", + message: { + role: "model", + content: toolRequestParts && toolRequestParts.length > 0 ? ( + // Note: Not sure why I have to cast here exactly. + // Otherwise it thinks toolRequest must be 'undefined' if provided + toolRequestParts + ) : [ + jsonMode ? { data: JSON.parse(choice.message.content) } : { text: choice.message.content } + ] + }, + custom: {} + }; +} +function fromOpenAiChunkChoice(choice, jsonMode = false) { + var _a; + const toolRequestParts = (_a = choice.delta.tool_calls) == null ? void 0 : _a.map(fromOpenAiToolCall); + return { + index: choice.index, + finishReason: choice.finish_reason ? finishReasonMap[choice.finish_reason] || "other" : "unknown", + message: { + role: "model", + content: toolRequestParts ? ( + // Note: Not sure why I have to cast here exactly. + // Otherwise it thinks toolRequest must be 'undefined' if provided + toolRequestParts + ) : [ + jsonMode ? { data: JSON.parse(choice.delta.content) } : { text: choice.delta.content } + ] + }, + custom: {} + }; +} +function toOpenAiRequestBody(modelName, request) { + var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m, _n, _o, _p, _q, _r, _s; + const model = SUPPORTED_GPT_MODELS[modelName]; + if (!model) + throw new Error(`Unsupported model: ${modelName}`); + const openAiMessages = toOpenAiMessages( + request.messages, + (_a = request.config) == null ? void 0 : _a.visualDetailLevel + ); + const mappedModelName = ((_b = request.config) == null ? void 0 : _b.version) || model.version || modelName; + const body = { + model: mappedModelName, + messages: openAiMessages, + temperature: (_c = request.config) == null ? void 0 : _c.temperature, + max_tokens: (_d = request.config) == null ? void 0 : _d.maxOutputTokens, + top_p: (_e = request.config) == null ? void 0 : _e.topP, + stop: (_f = request.config) == null ? void 0 : _f.stopSequences, + frequency_penalty: (_g = request.config) == null ? void 0 : _g.frequencyPenalty, + logit_bias: (_h = request.config) == null ? void 0 : _h.logitBias, + logprobs: (_i = request.config) == null ? void 0 : _i.logProbs, + // logprobs not snake case! + presence_penalty: (_j = request.config) == null ? void 0 : _j.presencePenalty, + seed: (_k = request.config) == null ? void 0 : _k.seed, + top_logprobs: (_l = request.config) == null ? void 0 : _l.topLogProbs, + // logprobs not snake case! + user: (_m = request.config) == null ? void 0 : _m.user, + tools: (_n = request.tools) == null ? void 0 : _n.map(toOpenAiTool), + n: request.candidates + }; + const response_format = (_o = request.output) == null ? void 0 : _o.format; + if (response_format && MODELS_SUPPORTING_OPENAI_RESPONSE_FORMAT.includes(mappedModelName)) { + if (response_format === "json" && ((_q = (_p = model.info.supports) == null ? void 0 : _p.output) == null ? void 0 : _q.includes("json"))) { + body.response_format = { + type: "json_object" + }; + } else if (response_format === "text" && ((_s = (_r = model.info.supports) == null ? void 0 : _r.output) == null ? void 0 : _s.includes("text"))) { + body.response_format = { + type: "text" + }; + } else { + throw new Error( + `${response_format} format is not supported for GPT models currently` + ); + } + } + for (const key in body) { + if (!body[key] || Array.isArray(body[key]) && !body[key].length) + delete body[key]; + } + return body; +} +function gptRunner(name, client) { + return (request, streamingCallback) => __async(this, null, function* () { + var _a, _b, _c, _d; + let response; + const body = toOpenAiRequestBody(name, request); + if (streamingCallback) { + const stream = client.beta.chat.completions.stream(__spreadProps(__spreadValues({}, body), { + stream: true + })); + try { + for (var iter = __forAwait(stream), more, temp, error; more = !(temp = yield iter.next()).done; more = false) { + const chunk = temp.value; + (_a = chunk.choices) == null ? void 0 : _a.forEach((chunk2) => { + const c = fromOpenAiChunkChoice(chunk2); + streamingCallback({ + index: c.index, + content: c.message.content + }); + }); + } + } catch (temp) { + error = [temp]; + } finally { + try { + more && (temp = iter.return) && (yield temp.call(iter)); + } finally { + if (error) + throw error[0]; + } + } + response = yield stream.finalChatCompletion(); + } else { + response = yield client.chat.completions.create(body); + } + return { + candidates: response.choices.map( + (c) => { + var _a2; + return fromOpenAiChoice(c, ((_a2 = request.output) == null ? void 0 : _a2.format) === "json"); + } + ), + usage: { + inputTokens: (_b = response.usage) == null ? void 0 : _b.prompt_tokens, + outputTokens: (_c = response.usage) == null ? void 0 : _c.completion_tokens, + totalTokens: (_d = response.usage) == null ? void 0 : _d.total_tokens + }, + custom: response + }; + }); +} +function gptModel(name, client) { + const modelId = `openai/${name}`; + const model = SUPPORTED_GPT_MODELS[name]; + if (!model) + throw new Error(`Unsupported model: ${name}`); + return defineModel( + __spreadProps(__spreadValues({ + name: modelId + }, model.info), { + configSchema: model.configSchema + }), + gptRunner(name, client) + ); +} +export { + OpenAiConfigSchema, + SUPPORTED_GPT_MODELS, + fromOpenAiChoice, + fromOpenAiChunkChoice, + fromOpenAiToolCall, + gpt35Turbo, + gpt4, + gpt4Turbo, + gpt4Vision, + gpt4o, + gpt4oMini, + gptModel, + gptRunner, + toOpenAIRole, + toOpenAiMessages, + toOpenAiRequestBody, + toOpenAiTextAndMedia +}; +//# sourceMappingURL=gpt.mjs.map \ No newline at end of file diff --git a/plugins/openai/lib/gpt.mjs.map b/plugins/openai/lib/gpt.mjs.map new file mode 100644 index 00000000..1c442693 --- /dev/null +++ b/plugins/openai/lib/gpt.mjs.map @@ -0,0 +1 @@ +{"version":3,"sources":["../src/gpt.ts"],"sourcesContent":["/**\n * Copyright 2024 The Fire Company\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\nimport { Message } from '@genkit-ai/ai';\nimport {\n GenerateResponseChunkData,\n GenerateResponseData,\n GenerationCommonConfigSchema,\n ModelAction,\n defineModel,\n modelRef,\n type CandidateData,\n type GenerateRequest,\n type MessageData,\n type Part,\n type Role,\n type ToolDefinition,\n type ToolRequestPart,\n} from '@genkit-ai/ai/model';\nimport { StreamingCallback } from '@genkit-ai/core';\nimport OpenAI from 'openai';\nimport {\n type ChatCompletion,\n type ChatCompletionChunk,\n type ChatCompletionContentPart,\n type ChatCompletionCreateParamsNonStreaming,\n type ChatCompletionMessageParam,\n type ChatCompletionMessageToolCall,\n type ChatCompletionRole,\n type ChatCompletionTool,\n type CompletionChoice,\n} from 'openai/resources/index.mjs';\nimport z from 'zod';\n\nconst MODELS_SUPPORTING_OPENAI_RESPONSE_FORMAT = [\n 'gpt-4o',\n 'gpt-4o-2024-05-13',\n 'gpt-4o-mini',\n 'gpt-4o-mini-2024-07-18',\n 'gpt-4-turbo',\n 'gpt-4-turbo-2024-04-09',\n 'gpt-4-turbo-preview',\n 'gpt-4-0125-preview',\n 'gpt-4-1106-preview',\n 'gpt-3.5-turbo-0125',\n 'gpt-3.5-turbo',\n 'gpt-3.5-turbo-1106',\n];\n\nexport const OpenAiConfigSchema = GenerationCommonConfigSchema.extend({\n frequencyPenalty: z.number().min(-2).max(2).optional(),\n logitBias: z.record(z.string(), z.number().min(-100).max(100)).optional(),\n logProbs: z.boolean().optional(),\n presencePenalty: z.number().min(-2).max(2).optional(),\n seed: z.number().int().optional(),\n topLogProbs: z.number().int().min(0).max(20).optional(),\n user: z.string().optional(),\n visualDetailLevel: z.enum(['auto', 'low', 'high']).optional(),\n});\n\ntype VisualDetailLevel = z.infer<\n typeof OpenAiConfigSchema\n>['visualDetailLevel'];\n\nexport const gpt4o = modelRef({\n name: 'openai/gpt-4o',\n info: {\n versions: ['gpt-4o', 'gpt-4o-2024-05-13'],\n label: 'OpenAI - GPT-4o',\n supports: {\n multiturn: true,\n tools: true,\n media: true,\n systemRole: true,\n output: ['text', 'json'],\n },\n },\n configSchema: OpenAiConfigSchema,\n});\n\nexport const gpt4oMini = modelRef({\n name: 'openai/gpt-4o-mini',\n info: {\n versions: ['gpt-4o-mini', 'gpt-4o-mini-2024-07-18'],\n label: 'OpenAI - GPT-4o mini',\n supports: {\n multiturn: true,\n tools: true,\n media: true,\n systemRole: true,\n output: ['text', 'json'],\n },\n },\n configSchema: OpenAiConfigSchema,\n});\n\nexport const gpt4Turbo = modelRef({\n name: 'openai/gpt-4-turbo',\n info: {\n versions: [\n 'gpt-4-turbo',\n 'gpt-4-turbo-2024-04-09',\n 'gpt-4-turbo-preview',\n 'gpt-4-0125-preview',\n 'gpt-4-1106-preview',\n ],\n label: 'OpenAI - GPT-4 Turbo',\n supports: {\n multiturn: true,\n tools: true,\n media: true,\n systemRole: true,\n output: ['text', 'json'],\n },\n },\n configSchema: OpenAiConfigSchema,\n});\n\nexport const gpt4Vision = modelRef({\n name: 'openai/gpt-4-vision',\n info: {\n versions: ['gpt-4-vision-preview', 'gpt-4-1106-vision-preview'],\n label: 'OpenAI - GPT-4 Vision',\n supports: {\n multiturn: true,\n tools: false,\n media: true,\n systemRole: true,\n output: ['text'],\n },\n },\n configSchema: OpenAiConfigSchema,\n});\n\nexport const gpt4 = modelRef({\n name: 'openai/gpt-4',\n info: {\n versions: ['gpt-4', 'gpt-4-0613', 'gpt-4-32k', 'gpt-4-32k-0613'],\n label: 'OpenAI - GPT-4',\n supports: {\n multiturn: true,\n tools: true,\n media: false,\n systemRole: true,\n output: ['text'],\n },\n },\n configSchema: OpenAiConfigSchema,\n});\n\nexport const gpt35Turbo = modelRef({\n name: 'openai/gpt-3.5-turbo',\n info: {\n versions: ['gpt-3.5-turbo-0125', 'gpt-3.5-turbo', 'gpt-3.5-turbo-1106'],\n label: 'OpenAI - GPT-3.5 Turbo',\n supports: {\n multiturn: true,\n tools: true,\n media: false,\n systemRole: true,\n output: ['json', 'text'],\n },\n },\n configSchema: OpenAiConfigSchema,\n});\n\nexport const SUPPORTED_GPT_MODELS = {\n 'gpt-4o': gpt4o,\n 'gpt-4o-mini': gpt4oMini,\n 'gpt-4-turbo': gpt4Turbo,\n 'gpt-4-vision': gpt4Vision,\n 'gpt-4': gpt4,\n 'gpt-3.5-turbo': gpt35Turbo,\n};\n\nexport function toOpenAIRole(role: Role): ChatCompletionRole {\n switch (role) {\n case 'user':\n return 'user';\n case 'model':\n return 'assistant';\n case 'system':\n return 'system';\n case 'tool':\n return 'tool';\n default:\n throw new Error(`role ${role} doesn't map to an OpenAI role.`);\n }\n}\n\n/**\n * Converts a Genkit ToolDefinition to an OpenAI ChatCompletionTool object.\n * @param tool The Genkit ToolDefinition to convert.\n * @returns The converted OpenAI ChatCompletionTool object.\n */\nfunction toOpenAiTool(tool: ToolDefinition): ChatCompletionTool {\n return {\n type: 'function',\n function: {\n name: tool.name,\n parameters: tool.inputSchema,\n },\n };\n}\n\n/**\n * Converts a Genkit Part to the corresponding OpenAI ChatCompletionContentPart.\n * @param part The Genkit Part to convert.\n * @param visualDetailLevel The visual detail level to use for media parts.\n * @returns The corresponding OpenAI ChatCompletionContentPart.\n * @throws Error if the part contains unsupported fields for the current message role.\n */\nexport function toOpenAiTextAndMedia(\n part: Part,\n visualDetailLevel: VisualDetailLevel\n): ChatCompletionContentPart {\n if (part.text) {\n return {\n type: 'text',\n text: part.text,\n };\n } else if (part.media) {\n return {\n type: 'image_url',\n image_url: {\n url: part.media.url,\n detail: visualDetailLevel,\n },\n };\n }\n throw Error(\n `Unsupported genkit part fields encountered for current message role: ${JSON.stringify(part)}.`\n );\n}\n\n/**\n * Converts a Genkit MessageData array to an OpenAI ChatCompletionMessageParam array.\n * @param messages The Genkit MessageData array to convert.\n * @param visualDetailLevel The visual detail level to use for media parts.\n * @returns The converted OpenAI ChatCompletionMessageParam array.\n */\nexport function toOpenAiMessages(\n messages: MessageData[],\n visualDetailLevel: VisualDetailLevel = 'auto'\n): ChatCompletionMessageParam[] {\n const openAiMsgs: ChatCompletionMessageParam[] = [];\n for (const message of messages) {\n const msg = new Message(message);\n const role = toOpenAIRole(message.role);\n switch (role) {\n case 'user':\n openAiMsgs.push({\n role: role,\n content: msg.content.map((part) =>\n toOpenAiTextAndMedia(part, visualDetailLevel)\n ),\n });\n break;\n case 'system':\n openAiMsgs.push({\n role: role,\n content: msg.text(),\n });\n break;\n case 'assistant': {\n const toolCalls: ChatCompletionMessageToolCall[] = msg.content\n .filter(\n (\n part\n ): part is Part & {\n toolRequest: NonNullable;\n } => Boolean(part.toolRequest)\n )\n .map((part) => ({\n id: part.toolRequest.ref ?? '',\n type: 'function',\n function: {\n name: part.toolRequest.name,\n arguments: JSON.stringify(part.toolRequest.input),\n },\n }));\n if (toolCalls.length > 0) {\n openAiMsgs.push({\n role: role,\n tool_calls: toolCalls,\n });\n } else {\n openAiMsgs.push({\n role: role,\n content: msg.text(),\n });\n }\n break;\n }\n case 'tool': {\n const toolResponseParts = msg.toolResponseParts();\n toolResponseParts.map((part) => {\n openAiMsgs.push({\n role: role,\n tool_call_id: part.toolResponse.ref ?? '',\n content:\n typeof part.toolResponse.output === 'string'\n ? part.toolResponse.output\n : JSON.stringify(part.toolResponse.output),\n });\n });\n break;\n }\n }\n }\n return openAiMsgs;\n}\n\nconst finishReasonMap: Record<\n // OpenAI Node SDK doesn't support tool_call in the enum, but it is returned from the API\n CompletionChoice['finish_reason'] | 'tool_calls',\n CandidateData['finishReason']\n> = {\n length: 'length',\n stop: 'stop',\n tool_calls: 'stop',\n content_filter: 'blocked',\n};\n\n/**\n * Converts an OpenAI tool call to a Genkit ToolRequestPart.\n * @param toolCall The OpenAI tool call to convert.\n * @returns The converted Genkit ToolRequestPart.\n */\nexport function fromOpenAiToolCall(\n toolCall:\n | ChatCompletionMessageToolCall\n | ChatCompletionChunk.Choice.Delta.ToolCall\n): ToolRequestPart {\n if (!toolCall.function) {\n throw Error(\n `Unexpected openAI chunk choice. tool_calls was provided but one or more tool_calls is missing.`\n );\n }\n const f = toolCall.function;\n return {\n toolRequest: {\n name: f.name!,\n ref: toolCall.id,\n input: f.arguments ? JSON.parse(f.arguments) : f.arguments,\n },\n };\n}\n\n/**\n * Converts an OpenAI message event to a Genkit CandidateData object.\n * @param choice The OpenAI message event to convert.\n * @param jsonMode Whether the event is a JSON response.\n * @returns The converted Genkit CandidateData object.\n */\nexport function fromOpenAiChoice(\n choice: ChatCompletion.Choice,\n jsonMode = false\n): CandidateData {\n const toolRequestParts = choice.message.tool_calls?.map(fromOpenAiToolCall);\n return {\n index: choice.index,\n finishReason: finishReasonMap[choice.finish_reason] || 'other',\n message: {\n role: 'model',\n content: toolRequestParts && toolRequestParts.length > 0\n ? // Note: Not sure why I have to cast here exactly.\n // Otherwise it thinks toolRequest must be 'undefined' if provided\n (toolRequestParts as ToolRequestPart[])\n : [\n jsonMode\n ? { data: JSON.parse(choice.message.content!) }\n : { text: choice.message.content! },\n ],\n },\n custom: {},\n };\n}\n\n/**\n * Converts an OpenAI message stream event to a Genkit CandidateData object.\n * @param choice The OpenAI message stream event to convert.\n * @param jsonMode Whether the event is a JSON response.\n * @returns The converted Genkit CandidateData object.\n */\nexport function fromOpenAiChunkChoice(\n choice: ChatCompletionChunk.Choice,\n jsonMode = false\n): CandidateData {\n const toolRequestParts = choice.delta.tool_calls?.map(fromOpenAiToolCall);\n return {\n index: choice.index,\n finishReason: choice.finish_reason\n ? finishReasonMap[choice.finish_reason] || 'other'\n : 'unknown',\n message: {\n role: 'model',\n content: toolRequestParts\n ? // Note: Not sure why I have to cast here exactly.\n // Otherwise it thinks toolRequest must be 'undefined' if provided\n (toolRequestParts as ToolRequestPart[])\n : [\n jsonMode\n ? { data: JSON.parse(choice.delta.content!) }\n : { text: choice.delta.content! },\n ],\n },\n custom: {},\n };\n}\n\n/**\n * Converts an OpenAI request to an OpenAI API request body.\n * @param modelName The name of the OpenAI model to use.\n * @param request The Genkit GenerateRequest to convert.\n * @returns The converted OpenAI API request body.\n * @throws An error if the specified model is not supported or if an unsupported output format is requested.\n */\nexport function toOpenAiRequestBody(\n modelName: string,\n request: GenerateRequest\n) {\n const model = SUPPORTED_GPT_MODELS[modelName];\n if (!model) throw new Error(`Unsupported model: ${modelName}`);\n const openAiMessages = toOpenAiMessages(\n request.messages,\n request.config?.visualDetailLevel\n );\n const mappedModelName = request.config?.version || model.version || modelName;\n const body = {\n model: mappedModelName,\n messages: openAiMessages,\n temperature: request.config?.temperature,\n max_tokens: request.config?.maxOutputTokens,\n top_p: request.config?.topP,\n stop: request.config?.stopSequences,\n frequency_penalty: request.config?.frequencyPenalty,\n logit_bias: request.config?.logitBias,\n logprobs: request.config?.logProbs, // logprobs not snake case!\n presence_penalty: request.config?.presencePenalty,\n seed: request.config?.seed,\n top_logprobs: request.config?.topLogProbs, // logprobs not snake case!\n user: request.config?.user,\n tools: request.tools?.map(toOpenAiTool),\n n: request.candidates,\n } as ChatCompletionCreateParamsNonStreaming;\n\n const response_format = request.output?.format;\n if (\n response_format &&\n MODELS_SUPPORTING_OPENAI_RESPONSE_FORMAT.includes(mappedModelName)\n ) {\n if (\n response_format === 'json' &&\n model.info.supports?.output?.includes('json')\n ) {\n body.response_format = {\n type: 'json_object',\n };\n } else if (\n response_format === 'text' &&\n model.info.supports?.output?.includes('text')\n ) {\n body.response_format = {\n type: 'text',\n };\n } else {\n throw new Error(\n `${response_format} format is not supported for GPT models currently`\n );\n }\n }\n for (const key in body) {\n if (!body[key] || (Array.isArray(body[key]) && !body[key].length))\n delete body[key];\n }\n return body;\n}\n\n/**\n * Creates the runner used by Genkit to interact with the GPT model.\n * @param name The name of the GPT model.\n * @param client The OpenAI client instance.\n * @returns The runner that Genkit will call when the model is invoked.\n */\nexport function gptRunner(name: string, client: OpenAI) {\n return async (\n request: GenerateRequest,\n streamingCallback?: StreamingCallback\n ): Promise => {\n let response: ChatCompletion;\n const body = toOpenAiRequestBody(name, request);\n if (streamingCallback) {\n const stream = client.beta.chat.completions.stream({\n ...body,\n stream: true,\n });\n for await (const chunk of stream) {\n chunk.choices?.forEach((chunk) => {\n const c = fromOpenAiChunkChoice(chunk);\n streamingCallback({\n index: c.index,\n content: c.message.content,\n });\n });\n }\n response = await stream.finalChatCompletion();\n } else {\n response = await client.chat.completions.create(body);\n }\n return {\n candidates: response.choices.map((c) =>\n fromOpenAiChoice(c, request.output?.format === 'json')\n ),\n usage: {\n inputTokens: response.usage?.prompt_tokens,\n outputTokens: response.usage?.completion_tokens,\n totalTokens: response.usage?.total_tokens,\n },\n custom: response,\n };\n };\n}\n\n/**\n * Defines a GPT model with the given name and OpenAI client.\n * @param name The name of the GPT model.\n * @param client The OpenAI client instance.\n * @returns The defined GPT model.\n * @throws An error if the specified model is not supported.\n */\nexport function gptModel(\n name: string,\n client: OpenAI\n): ModelAction {\n const modelId = `openai/${name}`;\n const model = SUPPORTED_GPT_MODELS[name];\n if (!model) throw new Error(`Unsupported model: ${name}`);\n\n return defineModel(\n {\n name: modelId,\n ...model.info,\n configSchema: model.configSchema,\n },\n gptRunner(name, client)\n );\n}\n"],"mappings":";;;;;;AAgBA,SAAS,eAAe;AACxB;AAAA,EAGE;AAAA,EAEA;AAAA,EACA;AAAA,OAQK;AAcP,OAAO,OAAO;AAEd,MAAM,2CAA2C;AAAA,EAC/C;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AACF;AAEO,MAAM,qBAAqB,6BAA6B,OAAO;AAAA,EACpE,kBAAkB,EAAE,OAAO,EAAE,IAAI,EAAE,EAAE,IAAI,CAAC,EAAE,SAAS;AAAA,EACrD,WAAW,EAAE,OAAO,EAAE,OAAO,GAAG,EAAE,OAAO,EAAE,IAAI,IAAI,EAAE,IAAI,GAAG,CAAC,EAAE,SAAS;AAAA,EACxE,UAAU,EAAE,QAAQ,EAAE,SAAS;AAAA,EAC/B,iBAAiB,EAAE,OAAO,EAAE,IAAI,EAAE,EAAE,IAAI,CAAC,EAAE,SAAS;AAAA,EACpD,MAAM,EAAE,OAAO,EAAE,IAAI,EAAE,SAAS;AAAA,EAChC,aAAa,EAAE,OAAO,EAAE,IAAI,EAAE,IAAI,CAAC,EAAE,IAAI,EAAE,EAAE,SAAS;AAAA,EACtD,MAAM,EAAE,OAAO,EAAE,SAAS;AAAA,EAC1B,mBAAmB,EAAE,KAAK,CAAC,QAAQ,OAAO,MAAM,CAAC,EAAE,SAAS;AAC9D,CAAC;AAMM,MAAM,QAAQ,SAAS;AAAA,EAC5B,MAAM;AAAA,EACN,MAAM;AAAA,IACJ,UAAU,CAAC,UAAU,mBAAmB;AAAA,IACxC,OAAO;AAAA,IACP,UAAU;AAAA,MACR,WAAW;AAAA,MACX,OAAO;AAAA,MACP,OAAO;AAAA,MACP,YAAY;AAAA,MACZ,QAAQ,CAAC,QAAQ,MAAM;AAAA,IACzB;AAAA,EACF;AAAA,EACA,cAAc;AAChB,CAAC;AAEM,MAAM,YAAY,SAAS;AAAA,EAChC,MAAM;AAAA,EACN,MAAM;AAAA,IACJ,UAAU,CAAC,eAAe,wBAAwB;AAAA,IAClD,OAAO;AAAA,IACP,UAAU;AAAA,MACR,WAAW;AAAA,MACX,OAAO;AAAA,MACP,OAAO;AAAA,MACP,YAAY;AAAA,MACZ,QAAQ,CAAC,QAAQ,MAAM;AAAA,IACzB;AAAA,EACF;AAAA,EACA,cAAc;AAChB,CAAC;AAEM,MAAM,YAAY,SAAS;AAAA,EAChC,MAAM;AAAA,EACN,MAAM;AAAA,IACJ,UAAU;AAAA,MACR;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,IACF;AAAA,IACA,OAAO;AAAA,IACP,UAAU;AAAA,MACR,WAAW;AAAA,MACX,OAAO;AAAA,MACP,OAAO;AAAA,MACP,YAAY;AAAA,MACZ,QAAQ,CAAC,QAAQ,MAAM;AAAA,IACzB;AAAA,EACF;AAAA,EACA,cAAc;AAChB,CAAC;AAEM,MAAM,aAAa,SAAS;AAAA,EACjC,MAAM;AAAA,EACN,MAAM;AAAA,IACJ,UAAU,CAAC,wBAAwB,2BAA2B;AAAA,IAC9D,OAAO;AAAA,IACP,UAAU;AAAA,MACR,WAAW;AAAA,MACX,OAAO;AAAA,MACP,OAAO;AAAA,MACP,YAAY;AAAA,MACZ,QAAQ,CAAC,MAAM;AAAA,IACjB;AAAA,EACF;AAAA,EACA,cAAc;AAChB,CAAC;AAEM,MAAM,OAAO,SAAS;AAAA,EAC3B,MAAM;AAAA,EACN,MAAM;AAAA,IACJ,UAAU,CAAC,SAAS,cAAc,aAAa,gBAAgB;AAAA,IAC/D,OAAO;AAAA,IACP,UAAU;AAAA,MACR,WAAW;AAAA,MACX,OAAO;AAAA,MACP,OAAO;AAAA,MACP,YAAY;AAAA,MACZ,QAAQ,CAAC,MAAM;AAAA,IACjB;AAAA,EACF;AAAA,EACA,cAAc;AAChB,CAAC;AAEM,MAAM,aAAa,SAAS;AAAA,EACjC,MAAM;AAAA,EACN,MAAM;AAAA,IACJ,UAAU,CAAC,sBAAsB,iBAAiB,oBAAoB;AAAA,IACtE,OAAO;AAAA,IACP,UAAU;AAAA,MACR,WAAW;AAAA,MACX,OAAO;AAAA,MACP,OAAO;AAAA,MACP,YAAY;AAAA,MACZ,QAAQ,CAAC,QAAQ,MAAM;AAAA,IACzB;AAAA,EACF;AAAA,EACA,cAAc;AAChB,CAAC;AAEM,MAAM,uBAAuB;AAAA,EAClC,UAAU;AAAA,EACV,eAAe;AAAA,EACf,eAAe;AAAA,EACf,gBAAgB;AAAA,EAChB,SAAS;AAAA,EACT,iBAAiB;AACnB;AAEO,SAAS,aAAa,MAAgC;AAC3D,UAAQ,MAAM;AAAA,IACZ,KAAK;AACH,aAAO;AAAA,IACT,KAAK;AACH,aAAO;AAAA,IACT,KAAK;AACH,aAAO;AAAA,IACT,KAAK;AACH,aAAO;AAAA,IACT;AACE,YAAM,IAAI,MAAM,QAAQ,IAAI,iCAAiC;AAAA,EACjE;AACF;AAOA,SAAS,aAAa,MAA0C;AAC9D,SAAO;AAAA,IACL,MAAM;AAAA,IACN,UAAU;AAAA,MACR,MAAM,KAAK;AAAA,MACX,YAAY,KAAK;AAAA,IACnB;AAAA,EACF;AACF;AASO,SAAS,qBACd,MACA,mBAC2B;AAC3B,MAAI,KAAK,MAAM;AACb,WAAO;AAAA,MACL,MAAM;AAAA,MACN,MAAM,KAAK;AAAA,IACb;AAAA,EACF,WAAW,KAAK,OAAO;AACrB,WAAO;AAAA,MACL,MAAM;AAAA,MACN,WAAW;AAAA,QACT,KAAK,KAAK,MAAM;AAAA,QAChB,QAAQ;AAAA,MACV;AAAA,IACF;AAAA,EACF;AACA,QAAM;AAAA,IACJ,wEAAwE,KAAK,UAAU,IAAI,CAAC;AAAA,EAC9F;AACF;AAQO,SAAS,iBACd,UACA,oBAAuC,QACT;AAC9B,QAAM,aAA2C,CAAC;AAClD,aAAW,WAAW,UAAU;AAC9B,UAAM,MAAM,IAAI,QAAQ,OAAO;AAC/B,UAAM,OAAO,aAAa,QAAQ,IAAI;AACtC,YAAQ,MAAM;AAAA,MACZ,KAAK;AACH,mBAAW,KAAK;AAAA,UACd;AAAA,UACA,SAAS,IAAI,QAAQ;AAAA,YAAI,CAAC,SACxB,qBAAqB,MAAM,iBAAiB;AAAA,UAC9C;AAAA,QACF,CAAC;AACD;AAAA,MACF,KAAK;AACH,mBAAW,KAAK;AAAA,UACd;AAAA,UACA,SAAS,IAAI,KAAK;AAAA,QACpB,CAAC;AACD;AAAA,MACF,KAAK,aAAa;AAChB,cAAM,YAA6C,IAAI,QACpD;AAAA,UACC,CACE,SAGG,QAAQ,KAAK,WAAW;AAAA,QAC/B,EACC,IAAI,CAAC,SAAM;AA9RtB;AA8R0B;AAAA,YACd,KAAI,UAAK,YAAY,QAAjB,YAAwB;AAAA,YAC5B,MAAM;AAAA,YACN,UAAU;AAAA,cACR,MAAM,KAAK,YAAY;AAAA,cACvB,WAAW,KAAK,UAAU,KAAK,YAAY,KAAK;AAAA,YAClD;AAAA,UACF;AAAA,SAAE;AACJ,YAAI,UAAU,SAAS,GAAG;AACxB,qBAAW,KAAK;AAAA,YACd;AAAA,YACA,YAAY;AAAA,UACd,CAAC;AAAA,QACH,OAAO;AACL,qBAAW,KAAK;AAAA,YACd;AAAA,YACA,SAAS,IAAI,KAAK;AAAA,UACpB,CAAC;AAAA,QACH;AACA;AAAA,MACF;AAAA,MACA,KAAK,QAAQ;AACX,cAAM,oBAAoB,IAAI,kBAAkB;AAChD,0BAAkB,IAAI,CAAC,SAAS;AArTxC;AAsTU,qBAAW,KAAK;AAAA,YACd;AAAA,YACA,eAAc,UAAK,aAAa,QAAlB,YAAyB;AAAA,YACvC,SACE,OAAO,KAAK,aAAa,WAAW,WAChC,KAAK,aAAa,SAClB,KAAK,UAAU,KAAK,aAAa,MAAM;AAAA,UAC/C,CAAC;AAAA,QACH,CAAC;AACD;AAAA,MACF;AAAA,IACF;AAAA,EACF;AACA,SAAO;AACT;AAEA,MAAM,kBAIF;AAAA,EACF,QAAQ;AAAA,EACR,MAAM;AAAA,EACN,YAAY;AAAA,EACZ,gBAAgB;AAClB;AAOO,SAAS,mBACd,UAGiB;AACjB,MAAI,CAAC,SAAS,UAAU;AACtB,UAAM;AAAA,MACJ;AAAA,IACF;AAAA,EACF;AACA,QAAM,IAAI,SAAS;AACnB,SAAO;AAAA,IACL,aAAa;AAAA,MACX,MAAM,EAAE;AAAA,MACR,KAAK,SAAS;AAAA,MACd,OAAO,EAAE,YAAY,KAAK,MAAM,EAAE,SAAS,IAAI,EAAE;AAAA,IACnD;AAAA,EACF;AACF;AAQO,SAAS,iBACd,QACA,WAAW,OACI;AAnXjB;AAoXE,QAAM,oBAAmB,YAAO,QAAQ,eAAf,mBAA2B,IAAI;AACxD,SAAO;AAAA,IACL,OAAO,OAAO;AAAA,IACd,cAAc,gBAAgB,OAAO,aAAa,KAAK;AAAA,IACvD,SAAS;AAAA,MACP,MAAM;AAAA,MACN,SAAS,oBAAoB,iBAAiB,SAAS;AAAA;AAAA;AAAA,QAGlD;AAAA,UACD;AAAA,QACE,WACI,EAAE,MAAM,KAAK,MAAM,OAAO,QAAQ,OAAQ,EAAE,IAC5C,EAAE,MAAM,OAAO,QAAQ,QAAS;AAAA,MACtC;AAAA,IACN;AAAA,IACA,QAAQ,CAAC;AAAA,EACX;AACF;AAQO,SAAS,sBACd,QACA,WAAW,OACI;AAjZjB;AAkZE,QAAM,oBAAmB,YAAO,MAAM,eAAb,mBAAyB,IAAI;AACtD,SAAO;AAAA,IACL,OAAO,OAAO;AAAA,IACd,cAAc,OAAO,gBACjB,gBAAgB,OAAO,aAAa,KAAK,UACzC;AAAA,IACJ,SAAS;AAAA,MACP,MAAM;AAAA,MACN,SAAS;AAAA;AAAA;AAAA,QAGJ;AAAA,UACD;AAAA,QACE,WACI,EAAE,MAAM,KAAK,MAAM,OAAO,MAAM,OAAQ,EAAE,IAC1C,EAAE,MAAM,OAAO,MAAM,QAAS;AAAA,MACpC;AAAA,IACN;AAAA,IACA,QAAQ,CAAC;AAAA,EACX;AACF;AASO,SAAS,oBACd,WACA,SACA;AAlbF;AAmbE,QAAM,QAAQ,qBAAqB,SAAS;AAC5C,MAAI,CAAC;AAAO,UAAM,IAAI,MAAM,sBAAsB,SAAS,EAAE;AAC7D,QAAM,iBAAiB;AAAA,IACrB,QAAQ;AAAA,KACR,aAAQ,WAAR,mBAAgB;AAAA,EAClB;AACA,QAAM,oBAAkB,aAAQ,WAAR,mBAAgB,YAAW,MAAM,WAAW;AACpE,QAAM,OAAO;AAAA,IACX,OAAO;AAAA,IACP,UAAU;AAAA,IACV,cAAa,aAAQ,WAAR,mBAAgB;AAAA,IAC7B,aAAY,aAAQ,WAAR,mBAAgB;AAAA,IAC5B,QAAO,aAAQ,WAAR,mBAAgB;AAAA,IACvB,OAAM,aAAQ,WAAR,mBAAgB;AAAA,IACtB,oBAAmB,aAAQ,WAAR,mBAAgB;AAAA,IACnC,aAAY,aAAQ,WAAR,mBAAgB;AAAA,IAC5B,WAAU,aAAQ,WAAR,mBAAgB;AAAA;AAAA,IAC1B,mBAAkB,aAAQ,WAAR,mBAAgB;AAAA,IAClC,OAAM,aAAQ,WAAR,mBAAgB;AAAA,IACtB,eAAc,aAAQ,WAAR,mBAAgB;AAAA;AAAA,IAC9B,OAAM,aAAQ,WAAR,mBAAgB;AAAA,IACtB,QAAO,aAAQ,UAAR,mBAAe,IAAI;AAAA,IAC1B,GAAG,QAAQ;AAAA,EACb;AAEA,QAAM,mBAAkB,aAAQ,WAAR,mBAAgB;AACxC,MACE,mBACA,yCAAyC,SAAS,eAAe,GACjE;AACA,QACE,oBAAoB,YACpB,iBAAM,KAAK,aAAX,mBAAqB,WAArB,mBAA6B,SAAS,UACtC;AACA,WAAK,kBAAkB;AAAA,QACrB,MAAM;AAAA,MACR;AAAA,IACF,WACE,oBAAoB,YACpB,iBAAM,KAAK,aAAX,mBAAqB,WAArB,mBAA6B,SAAS,UACtC;AACA,WAAK,kBAAkB;AAAA,QACrB,MAAM;AAAA,MACR;AAAA,IACF,OAAO;AACL,YAAM,IAAI;AAAA,QACR,GAAG,eAAe;AAAA,MACpB;AAAA,IACF;AAAA,EACF;AACA,aAAW,OAAO,MAAM;AACtB,QAAI,CAAC,KAAK,GAAG,KAAM,MAAM,QAAQ,KAAK,GAAG,CAAC,KAAK,CAAC,KAAK,GAAG,EAAE;AACxD,aAAO,KAAK,GAAG;AAAA,EACnB;AACA,SAAO;AACT;AAQO,SAAS,UAAU,MAAc,QAAgB;AACtD,SAAO,CACL,SACA,sBACkC;AAtftC;AAufI,QAAI;AACJ,UAAM,OAAO,oBAAoB,MAAM,OAAO;AAC9C,QAAI,mBAAmB;AACrB,YAAM,SAAS,OAAO,KAAK,KAAK,YAAY,OAAO,iCAC9C,OAD8C;AAAA,QAEjD,QAAQ;AAAA,MACV,EAAC;AACD;AAAA,mCAA0B,SAA1B,0EAAkC;AAAvB,gBAAM,QAAjB;AACE,sBAAM,YAAN,mBAAe,QAAQ,CAACA,WAAU;AAChC,kBAAM,IAAI,sBAAsBA,MAAK;AACrC,8BAAkB;AAAA,cAChB,OAAO,EAAE;AAAA,cACT,SAAS,EAAE,QAAQ;AAAA,YACrB,CAAC;AAAA,UACH;AAAA,QACF;AAAA,eARA,MA9fN;AA8fM;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AASA,iBAAW,MAAM,OAAO,oBAAoB;AAAA,IAC9C,OAAO;AACL,iBAAW,MAAM,OAAO,KAAK,YAAY,OAAO,IAAI;AAAA,IACtD;AACA,WAAO;AAAA,MACL,YAAY,SAAS,QAAQ;AAAA,QAAI,CAAC,MAAG;AA5gB3C,cAAAC;AA6gBQ,kCAAiB,KAAGA,MAAA,QAAQ,WAAR,gBAAAA,IAAgB,YAAW,MAAM;AAAA;AAAA,MACvD;AAAA,MACA,OAAO;AAAA,QACL,cAAa,cAAS,UAAT,mBAAgB;AAAA,QAC7B,eAAc,cAAS,UAAT,mBAAgB;AAAA,QAC9B,cAAa,cAAS,UAAT,mBAAgB;AAAA,MAC/B;AAAA,MACA,QAAQ;AAAA,IACV;AAAA,EACF;AACF;AASO,SAAS,SACd,MACA,QACwC;AACxC,QAAM,UAAU,UAAU,IAAI;AAC9B,QAAM,QAAQ,qBAAqB,IAAI;AACvC,MAAI,CAAC;AAAO,UAAM,IAAI,MAAM,sBAAsB,IAAI,EAAE;AAExD,SAAO;AAAA,IACL;AAAA,MACE,MAAM;AAAA,OACH,MAAM,OAFX;AAAA,MAGE,cAAc,MAAM;AAAA,IACtB;AAAA,IACA,UAAU,MAAM,MAAM;AAAA,EACxB;AACF;","names":["chunk","_a"]} \ No newline at end of file diff --git a/plugins/openai/lib/index.d.mts b/plugins/openai/lib/index.d.mts new file mode 100644 index 00000000..13470a00 --- /dev/null +++ b/plugins/openai/lib/index.d.mts @@ -0,0 +1,11 @@ +import '@genkit-ai/core'; +export { dallE3 } from './dalle.mjs'; +export { P as PluginOptions, o as default, o as openAI, t as textEmbedding3Large, a as textEmbedding3Small, b as textEmbeddingAda002 } from './embedder-DZYwphxr.mjs'; +export { gpt35Turbo, gpt4, gpt4Turbo, gpt4Vision, gpt4o, gpt4oMini } from './gpt.mjs'; +export { tts1, tts1Hd } from './tts.mjs'; +export { whisper1 } from './whisper.mjs'; +import '@genkit-ai/ai/model'; +import 'openai'; +import 'zod'; +import '@genkit-ai/ai/embedder'; +import 'openai/resources/index.mjs'; diff --git a/plugins/openai/lib/index.d.ts b/plugins/openai/lib/index.d.ts new file mode 100644 index 00000000..c700a269 --- /dev/null +++ b/plugins/openai/lib/index.d.ts @@ -0,0 +1,11 @@ +import '@genkit-ai/core'; +export { dallE3 } from './dalle.js'; +export { P as PluginOptions, o as default, o as openAI, t as textEmbedding3Large, a as textEmbedding3Small, b as textEmbeddingAda002 } from './embedder-DTnK2FJN.js'; +export { gpt35Turbo, gpt4, gpt4Turbo, gpt4Vision, gpt4o, gpt4oMini } from './gpt.js'; +export { tts1, tts1Hd } from './tts.js'; +export { whisper1 } from './whisper.js'; +import '@genkit-ai/ai/model'; +import 'openai'; +import 'zod'; +import '@genkit-ai/ai/embedder'; +import 'openai/resources/index.mjs'; diff --git a/plugins/openai/lib/index.js b/plugins/openai/lib/index.js new file mode 100644 index 00000000..acca4a9b --- /dev/null +++ b/plugins/openai/lib/index.js @@ -0,0 +1,119 @@ +"use strict"; +var __create = Object.create; +var __defProp = Object.defineProperty; +var __getOwnPropDesc = Object.getOwnPropertyDescriptor; +var __getOwnPropNames = Object.getOwnPropertyNames; +var __getProtoOf = Object.getPrototypeOf; +var __hasOwnProp = Object.prototype.hasOwnProperty; +var __export = (target, all) => { + for (var name in all) + __defProp(target, name, { get: all[name], enumerable: true }); +}; +var __copyProps = (to, from, except, desc) => { + if (from && typeof from === "object" || typeof from === "function") { + for (let key of __getOwnPropNames(from)) + if (!__hasOwnProp.call(to, key) && key !== except) + __defProp(to, key, { get: () => from[key], enumerable: !(desc = __getOwnPropDesc(from, key)) || desc.enumerable }); + } + return to; +}; +var __toESM = (mod, isNodeMode, target) => (target = mod != null ? __create(__getProtoOf(mod)) : {}, __copyProps( + // If the importer is in node compatibility mode or this is not an ESM + // file that has been converted to a CommonJS file using a Babel- + // compatible transform (i.e. "__esModule" has not been set), then set + // "default" to the CommonJS "module.exports" for node compatibility. + isNodeMode || !mod || !mod.__esModule ? __defProp(target, "default", { value: mod, enumerable: true }) : target, + mod +)); +var __toCommonJS = (mod) => __copyProps(__defProp({}, "__esModule", { value: true }), mod); +var __async = (__this, __arguments, generator) => { + return new Promise((resolve, reject) => { + var fulfilled = (value) => { + try { + step(generator.next(value)); + } catch (e) { + reject(e); + } + }; + var rejected = (value) => { + try { + step(generator.throw(value)); + } catch (e) { + reject(e); + } + }; + var step = (x) => x.done ? resolve(x.value) : Promise.resolve(x.value).then(fulfilled, rejected); + step((generator = generator.apply(__this, __arguments)).next()); + }); +}; +var src_exports = {}; +__export(src_exports, { + dallE3: () => import_dalle.dallE3, + default: () => src_default, + gpt35Turbo: () => import_gpt.gpt35Turbo, + gpt4: () => import_gpt.gpt4, + gpt4Turbo: () => import_gpt.gpt4Turbo, + gpt4Vision: () => import_gpt.gpt4Vision, + gpt4o: () => import_gpt.gpt4o, + gpt4oMini: () => import_gpt.gpt4oMini, + openAI: () => openAI, + textEmbedding3Large: () => import_embedder.textEmbedding3Large, + textEmbedding3Small: () => import_embedder.textEmbedding3Small, + textEmbeddingAda002: () => import_embedder.textEmbeddingAda002, + tts1: () => import_tts.tts1, + tts1Hd: () => import_tts.tts1Hd, + whisper1: () => import_whisper.whisper1 +}); +module.exports = __toCommonJS(src_exports); +var import_core = require("@genkit-ai/core"); +var import_openai = __toESM(require("openai")); +var import_dalle = require("./dalle.js"); +var import_embedder = require("./embedder.js"); +var import_gpt = require("./gpt.js"); +var import_tts = require("./tts.js"); +var import_whisper = require("./whisper.js"); +const openAI = (0, import_core.genkitPlugin)( + "openai", + (options) => __async(void 0, null, function* () { + let apiKey = (options == null ? void 0 : options.apiKey) || process.env.OPENAI_API_KEY; + if (!apiKey) + throw new Error( + "please pass in the API key or set the OPENAI_API_KEY environment variable" + ); + const client = new import_openai.default({ apiKey }); + return { + models: [ + ...Object.keys(import_gpt.SUPPORTED_GPT_MODELS).map( + (name) => (0, import_gpt.gptModel)(name, client) + ), + ...Object.keys(import_tts.SUPPORTED_TTS_MODELS).map( + (name) => (0, import_tts.ttsModel)(name, client) + ), + (0, import_dalle.dallE3Model)(client), + (0, import_whisper.whisper1Model)(client) + ], + embedders: Object.keys(import_embedder.SUPPORTED_EMBEDDING_MODELS).map( + (name) => (0, import_embedder.openaiEmbedder)(name, options) + ) + }; + }) +); +var src_default = openAI; +// Annotate the CommonJS export names for ESM import in node: +0 && (module.exports = { + dallE3, + gpt35Turbo, + gpt4, + gpt4Turbo, + gpt4Vision, + gpt4o, + gpt4oMini, + openAI, + textEmbedding3Large, + textEmbedding3Small, + textEmbeddingAda002, + tts1, + tts1Hd, + whisper1 +}); +//# sourceMappingURL=index.js.map \ No newline at end of file diff --git a/plugins/openai/lib/index.js.map b/plugins/openai/lib/index.js.map new file mode 100644 index 00000000..97634bd8 --- /dev/null +++ b/plugins/openai/lib/index.js.map @@ -0,0 +1 @@ +{"version":3,"sources":["../src/index.ts"],"sourcesContent":["/**\n * Copyright 2024 The Fire Company\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\nimport { genkitPlugin, type Plugin } from '@genkit-ai/core';\nimport OpenAI from 'openai';\nimport { dallE3, dallE3Model } from './dalle.js';\nimport {\n openaiEmbedder,\n SUPPORTED_EMBEDDING_MODELS,\n textEmbedding3Large,\n textEmbedding3Small,\n textEmbeddingAda002,\n} from './embedder.js';\nimport {\n gpt35Turbo,\n gpt4,\n gpt4Turbo,\n gpt4Vision,\n gpt4o,\n gpt4oMini,\n gptModel,\n SUPPORTED_GPT_MODELS,\n} from './gpt.js';\nimport { SUPPORTED_TTS_MODELS, ttsModel, tts1, tts1Hd } from './tts.js';\nimport { whisper1, whisper1Model } from './whisper.js';\nexport {\n dallE3,\n gpt35Turbo,\n gpt4,\n gpt4Turbo,\n gpt4Vision,\n gpt4o,\n gpt4oMini,\n tts1,\n tts1Hd,\n whisper1,\n textEmbedding3Large,\n textEmbedding3Small,\n textEmbeddingAda002,\n};\n\nexport interface PluginOptions {\n apiKey?: string;\n}\n\n/**\n * This module provides an interface to the OpenAI models through the Genkit\n * plugin system. It allows users to interact with various models by providing\n * an API key and optional configuration.\n *\n * The main export is the `openai` plugin, which can be configured with an API\n * key either directly or through environment variables. It initializes the\n * OpenAI client and makes available the models for use.\n *\n * Exports:\n * - gpt4o: Reference to the GPT-4o model.\n * - gpt4oMini: Reference to the GPT-4o-mini model.\n * - gpt4Turbo: Reference to the GPT-4 Turbo model.\n * - gpt4Vision: Reference to the GPT-4 Vision model.\n * - gpt4: Reference to the GPT-4 model.\n * - gpt35Turbo: Reference to the GPT-3.5 Turbo model.\n * - dallE3: Reference to the DALL-E 3 model.\n * - tts1: Reference to the Text-to-speech 1 model.\n * - tts1Hd: Reference to the Text-to-speech 1 HD model.\n * - whisper: Reference to the Whisper model.\n * - textEmbedding3Large: Reference to the Text Embedding Large model.\n * - textEmbedding3Small: Reference to the Text Embedding Small model.\n * - textEmbeddingAda002: Reference to the Ada model.\n * - openai: The main plugin function to interact with OpenAI.\n *\n * Usage:\n * To use the models, initialize the openai plugin inside `configureGenkit` and\n * pass the configuration options. If no API key is provided in the options, the\n * environment variable `OPENAI_API_KEY` must be set.\n *\n * Example:\n * ```\n * import openai from 'genkitx-openai';\n *\n * export default configureGenkit({\n * plugins: [\n * openai({ apiKey: 'your-api-key' })\n * ... // other plugins\n * ]\n * });\n * ```\n */\nexport const openAI: Plugin<[PluginOptions] | []> = genkitPlugin(\n 'openai',\n async (options?: PluginOptions) => {\n let apiKey = options?.apiKey || process.env.OPENAI_API_KEY;\n if (!apiKey)\n throw new Error(\n 'please pass in the API key or set the OPENAI_API_KEY environment variable'\n );\n const client = new OpenAI({ apiKey });\n return {\n models: [\n ...Object.keys(SUPPORTED_GPT_MODELS).map((name) =>\n gptModel(name, client)\n ),\n ...Object.keys(SUPPORTED_TTS_MODELS).map((name) =>\n ttsModel(name, client)\n ),\n dallE3Model(client),\n whisper1Model(client),\n ],\n embedders: Object.keys(SUPPORTED_EMBEDDING_MODELS).map((name) =>\n openaiEmbedder(name, options)\n ),\n };\n }\n);\n\nexport default openAI;\n"],"mappings":";;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAgBA,kBAA0C;AAC1C,oBAAmB;AACnB,mBAAoC;AACpC,sBAMO;AACP,iBASO;AACP,iBAA6D;AAC7D,qBAAwC;AA+DjC,MAAM,aAAuC;AAAA,EAClD;AAAA,EACA,CAAO,YAA4B;AACjC,QAAI,UAAS,mCAAS,WAAU,QAAQ,IAAI;AAC5C,QAAI,CAAC;AACH,YAAM,IAAI;AAAA,QACR;AAAA,MACF;AACF,UAAM,SAAS,IAAI,cAAAA,QAAO,EAAE,OAAO,CAAC;AACpC,WAAO;AAAA,MACL,QAAQ;AAAA,QACN,GAAG,OAAO,KAAK,+BAAoB,EAAE;AAAA,UAAI,CAAC,aACxC,qBAAS,MAAM,MAAM;AAAA,QACvB;AAAA,QACA,GAAG,OAAO,KAAK,+BAAoB,EAAE;AAAA,UAAI,CAAC,aACxC,qBAAS,MAAM,MAAM;AAAA,QACvB;AAAA,YACA,0BAAY,MAAM;AAAA,YAClB,8BAAc,MAAM;AAAA,MACtB;AAAA,MACA,WAAW,OAAO,KAAK,0CAA0B,EAAE;AAAA,QAAI,CAAC,aACtD,gCAAe,MAAM,OAAO;AAAA,MAC9B;AAAA,IACF;AAAA,EACF;AACF;AAEA,IAAO,cAAQ;","names":["OpenAI"]} \ No newline at end of file diff --git a/plugins/openai/lib/index.mjs b/plugins/openai/lib/index.mjs new file mode 100644 index 00000000..2be9766e --- /dev/null +++ b/plugins/openai/lib/index.mjs @@ -0,0 +1,70 @@ +import { + __async +} from "./chunk-WFI2LP4G.mjs"; +import { genkitPlugin } from "@genkit-ai/core"; +import OpenAI from "openai"; +import { dallE3, dallE3Model } from "./dalle.js"; +import { + openaiEmbedder, + SUPPORTED_EMBEDDING_MODELS, + textEmbedding3Large, + textEmbedding3Small, + textEmbeddingAda002 +} from "./embedder.js"; +import { + gpt35Turbo, + gpt4, + gpt4Turbo, + gpt4Vision, + gpt4o, + gpt4oMini, + gptModel, + SUPPORTED_GPT_MODELS +} from "./gpt.js"; +import { SUPPORTED_TTS_MODELS, ttsModel, tts1, tts1Hd } from "./tts.js"; +import { whisper1, whisper1Model } from "./whisper.js"; +const openAI = genkitPlugin( + "openai", + (options) => __async(void 0, null, function* () { + let apiKey = (options == null ? void 0 : options.apiKey) || process.env.OPENAI_API_KEY; + if (!apiKey) + throw new Error( + "please pass in the API key or set the OPENAI_API_KEY environment variable" + ); + const client = new OpenAI({ apiKey }); + return { + models: [ + ...Object.keys(SUPPORTED_GPT_MODELS).map( + (name) => gptModel(name, client) + ), + ...Object.keys(SUPPORTED_TTS_MODELS).map( + (name) => ttsModel(name, client) + ), + dallE3Model(client), + whisper1Model(client) + ], + embedders: Object.keys(SUPPORTED_EMBEDDING_MODELS).map( + (name) => openaiEmbedder(name, options) + ) + }; + }) +); +var src_default = openAI; +export { + dallE3, + src_default as default, + gpt35Turbo, + gpt4, + gpt4Turbo, + gpt4Vision, + gpt4o, + gpt4oMini, + openAI, + textEmbedding3Large, + textEmbedding3Small, + textEmbeddingAda002, + tts1, + tts1Hd, + whisper1 +}; +//# sourceMappingURL=index.mjs.map \ No newline at end of file diff --git a/plugins/openai/lib/index.mjs.map b/plugins/openai/lib/index.mjs.map new file mode 100644 index 00000000..0ff8a879 --- /dev/null +++ b/plugins/openai/lib/index.mjs.map @@ -0,0 +1 @@ +{"version":3,"sources":["../src/index.ts"],"sourcesContent":["/**\n * Copyright 2024 The Fire Company\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\nimport { genkitPlugin, type Plugin } from '@genkit-ai/core';\nimport OpenAI from 'openai';\nimport { dallE3, dallE3Model } from './dalle.js';\nimport {\n openaiEmbedder,\n SUPPORTED_EMBEDDING_MODELS,\n textEmbedding3Large,\n textEmbedding3Small,\n textEmbeddingAda002,\n} from './embedder.js';\nimport {\n gpt35Turbo,\n gpt4,\n gpt4Turbo,\n gpt4Vision,\n gpt4o,\n gpt4oMini,\n gptModel,\n SUPPORTED_GPT_MODELS,\n} from './gpt.js';\nimport { SUPPORTED_TTS_MODELS, ttsModel, tts1, tts1Hd } from './tts.js';\nimport { whisper1, whisper1Model } from './whisper.js';\nexport {\n dallE3,\n gpt35Turbo,\n gpt4,\n gpt4Turbo,\n gpt4Vision,\n gpt4o,\n gpt4oMini,\n tts1,\n tts1Hd,\n whisper1,\n textEmbedding3Large,\n textEmbedding3Small,\n textEmbeddingAda002,\n};\n\nexport interface PluginOptions {\n apiKey?: string;\n}\n\n/**\n * This module provides an interface to the OpenAI models through the Genkit\n * plugin system. It allows users to interact with various models by providing\n * an API key and optional configuration.\n *\n * The main export is the `openai` plugin, which can be configured with an API\n * key either directly or through environment variables. It initializes the\n * OpenAI client and makes available the models for use.\n *\n * Exports:\n * - gpt4o: Reference to the GPT-4o model.\n * - gpt4oMini: Reference to the GPT-4o-mini model.\n * - gpt4Turbo: Reference to the GPT-4 Turbo model.\n * - gpt4Vision: Reference to the GPT-4 Vision model.\n * - gpt4: Reference to the GPT-4 model.\n * - gpt35Turbo: Reference to the GPT-3.5 Turbo model.\n * - dallE3: Reference to the DALL-E 3 model.\n * - tts1: Reference to the Text-to-speech 1 model.\n * - tts1Hd: Reference to the Text-to-speech 1 HD model.\n * - whisper: Reference to the Whisper model.\n * - textEmbedding3Large: Reference to the Text Embedding Large model.\n * - textEmbedding3Small: Reference to the Text Embedding Small model.\n * - textEmbeddingAda002: Reference to the Ada model.\n * - openai: The main plugin function to interact with OpenAI.\n *\n * Usage:\n * To use the models, initialize the openai plugin inside `configureGenkit` and\n * pass the configuration options. If no API key is provided in the options, the\n * environment variable `OPENAI_API_KEY` must be set.\n *\n * Example:\n * ```\n * import openai from 'genkitx-openai';\n *\n * export default configureGenkit({\n * plugins: [\n * openai({ apiKey: 'your-api-key' })\n * ... // other plugins\n * ]\n * });\n * ```\n */\nexport const openAI: Plugin<[PluginOptions] | []> = genkitPlugin(\n 'openai',\n async (options?: PluginOptions) => {\n let apiKey = options?.apiKey || process.env.OPENAI_API_KEY;\n if (!apiKey)\n throw new Error(\n 'please pass in the API key or set the OPENAI_API_KEY environment variable'\n );\n const client = new OpenAI({ apiKey });\n return {\n models: [\n ...Object.keys(SUPPORTED_GPT_MODELS).map((name) =>\n gptModel(name, client)\n ),\n ...Object.keys(SUPPORTED_TTS_MODELS).map((name) =>\n ttsModel(name, client)\n ),\n dallE3Model(client),\n whisper1Model(client),\n ],\n embedders: Object.keys(SUPPORTED_EMBEDDING_MODELS).map((name) =>\n openaiEmbedder(name, options)\n ),\n };\n }\n);\n\nexport default openAI;\n"],"mappings":";;;AAgBA,SAAS,oBAAiC;AAC1C,OAAO,YAAY;AACnB,SAAS,QAAQ,mBAAmB;AACpC;AAAA,EACE;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,OACK;AACP;AAAA,EACE;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,OACK;AACP,SAAS,sBAAsB,UAAU,MAAM,cAAc;AAC7D,SAAS,UAAU,qBAAqB;AA+DjC,MAAM,SAAuC;AAAA,EAClD;AAAA,EACA,CAAO,YAA4B;AACjC,QAAI,UAAS,mCAAS,WAAU,QAAQ,IAAI;AAC5C,QAAI,CAAC;AACH,YAAM,IAAI;AAAA,QACR;AAAA,MACF;AACF,UAAM,SAAS,IAAI,OAAO,EAAE,OAAO,CAAC;AACpC,WAAO;AAAA,MACL,QAAQ;AAAA,QACN,GAAG,OAAO,KAAK,oBAAoB,EAAE;AAAA,UAAI,CAAC,SACxC,SAAS,MAAM,MAAM;AAAA,QACvB;AAAA,QACA,GAAG,OAAO,KAAK,oBAAoB,EAAE;AAAA,UAAI,CAAC,SACxC,SAAS,MAAM,MAAM;AAAA,QACvB;AAAA,QACA,YAAY,MAAM;AAAA,QAClB,cAAc,MAAM;AAAA,MACtB;AAAA,MACA,WAAW,OAAO,KAAK,0BAA0B,EAAE;AAAA,QAAI,CAAC,SACtD,eAAe,MAAM,OAAO;AAAA,MAC9B;AAAA,IACF;AAAA,EACF;AACF;AAEA,IAAO,cAAQ;","names":[]} \ No newline at end of file diff --git a/plugins/openai/lib/tts.d.mts b/plugins/openai/lib/tts.d.mts new file mode 100644 index 00000000..6d2bae23 --- /dev/null +++ b/plugins/openai/lib/tts.d.mts @@ -0,0 +1,178 @@ +import * as _genkit_ai_ai_model from '@genkit-ai/ai/model'; +import { ModelAction } from '@genkit-ai/ai/model'; +import OpenAI from 'openai'; +import { z } from 'zod'; + +declare const TTSConfigSchema: z.ZodObject; + temperature: z.ZodOptional; + maxOutputTokens: z.ZodOptional; + topK: z.ZodOptional; + topP: z.ZodOptional; + stopSequences: z.ZodOptional>; +}, { + voice: z.ZodDefault>>; + speed: z.ZodOptional; + response_format: z.ZodOptional>; +}>, "strip", z.ZodTypeAny, { + voice: "alloy" | "echo" | "fable" | "onyx" | "nova" | "shimmer"; + response_format?: "mp3" | "opus" | "aac" | "flac" | "wav" | "pcm" | undefined; + version?: string | undefined; + temperature?: number | undefined; + maxOutputTokens?: number | undefined; + topK?: number | undefined; + topP?: number | undefined; + stopSequences?: string[] | undefined; + speed?: number | undefined; +}, { + response_format?: "mp3" | "opus" | "aac" | "flac" | "wav" | "pcm" | undefined; + version?: string | undefined; + temperature?: number | undefined; + maxOutputTokens?: number | undefined; + topK?: number | undefined; + topP?: number | undefined; + stopSequences?: string[] | undefined; + voice?: "alloy" | "echo" | "fable" | "onyx" | "nova" | "shimmer" | undefined; + speed?: number | undefined; +}>; +declare const tts1: _genkit_ai_ai_model.ModelReference; + temperature: z.ZodOptional; + maxOutputTokens: z.ZodOptional; + topK: z.ZodOptional; + topP: z.ZodOptional; + stopSequences: z.ZodOptional>; +}, { + voice: z.ZodDefault>>; + speed: z.ZodOptional; + response_format: z.ZodOptional>; +}>, "strip", z.ZodTypeAny, { + voice: "alloy" | "echo" | "fable" | "onyx" | "nova" | "shimmer"; + response_format?: "mp3" | "opus" | "aac" | "flac" | "wav" | "pcm" | undefined; + version?: string | undefined; + temperature?: number | undefined; + maxOutputTokens?: number | undefined; + topK?: number | undefined; + topP?: number | undefined; + stopSequences?: string[] | undefined; + speed?: number | undefined; +}, { + response_format?: "mp3" | "opus" | "aac" | "flac" | "wav" | "pcm" | undefined; + version?: string | undefined; + temperature?: number | undefined; + maxOutputTokens?: number | undefined; + topK?: number | undefined; + topP?: number | undefined; + stopSequences?: string[] | undefined; + voice?: "alloy" | "echo" | "fable" | "onyx" | "nova" | "shimmer" | undefined; + speed?: number | undefined; +}>>; +declare const tts1Hd: _genkit_ai_ai_model.ModelReference; + temperature: z.ZodOptional; + maxOutputTokens: z.ZodOptional; + topK: z.ZodOptional; + topP: z.ZodOptional; + stopSequences: z.ZodOptional>; +}, { + voice: z.ZodDefault>>; + speed: z.ZodOptional; + response_format: z.ZodOptional>; +}>, "strip", z.ZodTypeAny, { + voice: "alloy" | "echo" | "fable" | "onyx" | "nova" | "shimmer"; + response_format?: "mp3" | "opus" | "aac" | "flac" | "wav" | "pcm" | undefined; + version?: string | undefined; + temperature?: number | undefined; + maxOutputTokens?: number | undefined; + topK?: number | undefined; + topP?: number | undefined; + stopSequences?: string[] | undefined; + speed?: number | undefined; +}, { + response_format?: "mp3" | "opus" | "aac" | "flac" | "wav" | "pcm" | undefined; + version?: string | undefined; + temperature?: number | undefined; + maxOutputTokens?: number | undefined; + topK?: number | undefined; + topP?: number | undefined; + stopSequences?: string[] | undefined; + voice?: "alloy" | "echo" | "fable" | "onyx" | "nova" | "shimmer" | undefined; + speed?: number | undefined; +}>>; +declare const SUPPORTED_TTS_MODELS: { + 'tts-1': _genkit_ai_ai_model.ModelReference; + temperature: z.ZodOptional; + maxOutputTokens: z.ZodOptional; + topK: z.ZodOptional; + topP: z.ZodOptional; + stopSequences: z.ZodOptional>; + }, { + voice: z.ZodDefault>>; + speed: z.ZodOptional; + response_format: z.ZodOptional>; + }>, "strip", z.ZodTypeAny, { + voice: "alloy" | "echo" | "fable" | "onyx" | "nova" | "shimmer"; + response_format?: "mp3" | "opus" | "aac" | "flac" | "wav" | "pcm" | undefined; + version?: string | undefined; + temperature?: number | undefined; + maxOutputTokens?: number | undefined; + topK?: number | undefined; + topP?: number | undefined; + stopSequences?: string[] | undefined; + speed?: number | undefined; + }, { + response_format?: "mp3" | "opus" | "aac" | "flac" | "wav" | "pcm" | undefined; + version?: string | undefined; + temperature?: number | undefined; + maxOutputTokens?: number | undefined; + topK?: number | undefined; + topP?: number | undefined; + stopSequences?: string[] | undefined; + voice?: "alloy" | "echo" | "fable" | "onyx" | "nova" | "shimmer" | undefined; + speed?: number | undefined; + }>>; + 'tts-1-hd': _genkit_ai_ai_model.ModelReference; + temperature: z.ZodOptional; + maxOutputTokens: z.ZodOptional; + topK: z.ZodOptional; + topP: z.ZodOptional; + stopSequences: z.ZodOptional>; + }, { + voice: z.ZodDefault>>; + speed: z.ZodOptional; + response_format: z.ZodOptional>; + }>, "strip", z.ZodTypeAny, { + voice: "alloy" | "echo" | "fable" | "onyx" | "nova" | "shimmer"; + response_format?: "mp3" | "opus" | "aac" | "flac" | "wav" | "pcm" | undefined; + version?: string | undefined; + temperature?: number | undefined; + maxOutputTokens?: number | undefined; + topK?: number | undefined; + topP?: number | undefined; + stopSequences?: string[] | undefined; + speed?: number | undefined; + }, { + response_format?: "mp3" | "opus" | "aac" | "flac" | "wav" | "pcm" | undefined; + version?: string | undefined; + temperature?: number | undefined; + maxOutputTokens?: number | undefined; + topK?: number | undefined; + topP?: number | undefined; + stopSequences?: string[] | undefined; + voice?: "alloy" | "echo" | "fable" | "onyx" | "nova" | "shimmer" | undefined; + speed?: number | undefined; + }>>; +}; +declare const RESPONSE_FORMAT_MEDIA_TYPES: { + mp3: string; + opus: string; + aac: string; + flac: string; + wav: string; + pcm: string; +}; +declare function ttsModel(name: string, client: OpenAI): ModelAction; + +export { RESPONSE_FORMAT_MEDIA_TYPES, SUPPORTED_TTS_MODELS, TTSConfigSchema, tts1, tts1Hd, ttsModel }; diff --git a/plugins/openai/lib/tts.d.ts b/plugins/openai/lib/tts.d.ts new file mode 100644 index 00000000..6d2bae23 --- /dev/null +++ b/plugins/openai/lib/tts.d.ts @@ -0,0 +1,178 @@ +import * as _genkit_ai_ai_model from '@genkit-ai/ai/model'; +import { ModelAction } from '@genkit-ai/ai/model'; +import OpenAI from 'openai'; +import { z } from 'zod'; + +declare const TTSConfigSchema: z.ZodObject; + temperature: z.ZodOptional; + maxOutputTokens: z.ZodOptional; + topK: z.ZodOptional; + topP: z.ZodOptional; + stopSequences: z.ZodOptional>; +}, { + voice: z.ZodDefault>>; + speed: z.ZodOptional; + response_format: z.ZodOptional>; +}>, "strip", z.ZodTypeAny, { + voice: "alloy" | "echo" | "fable" | "onyx" | "nova" | "shimmer"; + response_format?: "mp3" | "opus" | "aac" | "flac" | "wav" | "pcm" | undefined; + version?: string | undefined; + temperature?: number | undefined; + maxOutputTokens?: number | undefined; + topK?: number | undefined; + topP?: number | undefined; + stopSequences?: string[] | undefined; + speed?: number | undefined; +}, { + response_format?: "mp3" | "opus" | "aac" | "flac" | "wav" | "pcm" | undefined; + version?: string | undefined; + temperature?: number | undefined; + maxOutputTokens?: number | undefined; + topK?: number | undefined; + topP?: number | undefined; + stopSequences?: string[] | undefined; + voice?: "alloy" | "echo" | "fable" | "onyx" | "nova" | "shimmer" | undefined; + speed?: number | undefined; +}>; +declare const tts1: _genkit_ai_ai_model.ModelReference; + temperature: z.ZodOptional; + maxOutputTokens: z.ZodOptional; + topK: z.ZodOptional; + topP: z.ZodOptional; + stopSequences: z.ZodOptional>; +}, { + voice: z.ZodDefault>>; + speed: z.ZodOptional; + response_format: z.ZodOptional>; +}>, "strip", z.ZodTypeAny, { + voice: "alloy" | "echo" | "fable" | "onyx" | "nova" | "shimmer"; + response_format?: "mp3" | "opus" | "aac" | "flac" | "wav" | "pcm" | undefined; + version?: string | undefined; + temperature?: number | undefined; + maxOutputTokens?: number | undefined; + topK?: number | undefined; + topP?: number | undefined; + stopSequences?: string[] | undefined; + speed?: number | undefined; +}, { + response_format?: "mp3" | "opus" | "aac" | "flac" | "wav" | "pcm" | undefined; + version?: string | undefined; + temperature?: number | undefined; + maxOutputTokens?: number | undefined; + topK?: number | undefined; + topP?: number | undefined; + stopSequences?: string[] | undefined; + voice?: "alloy" | "echo" | "fable" | "onyx" | "nova" | "shimmer" | undefined; + speed?: number | undefined; +}>>; +declare const tts1Hd: _genkit_ai_ai_model.ModelReference; + temperature: z.ZodOptional; + maxOutputTokens: z.ZodOptional; + topK: z.ZodOptional; + topP: z.ZodOptional; + stopSequences: z.ZodOptional>; +}, { + voice: z.ZodDefault>>; + speed: z.ZodOptional; + response_format: z.ZodOptional>; +}>, "strip", z.ZodTypeAny, { + voice: "alloy" | "echo" | "fable" | "onyx" | "nova" | "shimmer"; + response_format?: "mp3" | "opus" | "aac" | "flac" | "wav" | "pcm" | undefined; + version?: string | undefined; + temperature?: number | undefined; + maxOutputTokens?: number | undefined; + topK?: number | undefined; + topP?: number | undefined; + stopSequences?: string[] | undefined; + speed?: number | undefined; +}, { + response_format?: "mp3" | "opus" | "aac" | "flac" | "wav" | "pcm" | undefined; + version?: string | undefined; + temperature?: number | undefined; + maxOutputTokens?: number | undefined; + topK?: number | undefined; + topP?: number | undefined; + stopSequences?: string[] | undefined; + voice?: "alloy" | "echo" | "fable" | "onyx" | "nova" | "shimmer" | undefined; + speed?: number | undefined; +}>>; +declare const SUPPORTED_TTS_MODELS: { + 'tts-1': _genkit_ai_ai_model.ModelReference; + temperature: z.ZodOptional; + maxOutputTokens: z.ZodOptional; + topK: z.ZodOptional; + topP: z.ZodOptional; + stopSequences: z.ZodOptional>; + }, { + voice: z.ZodDefault>>; + speed: z.ZodOptional; + response_format: z.ZodOptional>; + }>, "strip", z.ZodTypeAny, { + voice: "alloy" | "echo" | "fable" | "onyx" | "nova" | "shimmer"; + response_format?: "mp3" | "opus" | "aac" | "flac" | "wav" | "pcm" | undefined; + version?: string | undefined; + temperature?: number | undefined; + maxOutputTokens?: number | undefined; + topK?: number | undefined; + topP?: number | undefined; + stopSequences?: string[] | undefined; + speed?: number | undefined; + }, { + response_format?: "mp3" | "opus" | "aac" | "flac" | "wav" | "pcm" | undefined; + version?: string | undefined; + temperature?: number | undefined; + maxOutputTokens?: number | undefined; + topK?: number | undefined; + topP?: number | undefined; + stopSequences?: string[] | undefined; + voice?: "alloy" | "echo" | "fable" | "onyx" | "nova" | "shimmer" | undefined; + speed?: number | undefined; + }>>; + 'tts-1-hd': _genkit_ai_ai_model.ModelReference; + temperature: z.ZodOptional; + maxOutputTokens: z.ZodOptional; + topK: z.ZodOptional; + topP: z.ZodOptional; + stopSequences: z.ZodOptional>; + }, { + voice: z.ZodDefault>>; + speed: z.ZodOptional; + response_format: z.ZodOptional>; + }>, "strip", z.ZodTypeAny, { + voice: "alloy" | "echo" | "fable" | "onyx" | "nova" | "shimmer"; + response_format?: "mp3" | "opus" | "aac" | "flac" | "wav" | "pcm" | undefined; + version?: string | undefined; + temperature?: number | undefined; + maxOutputTokens?: number | undefined; + topK?: number | undefined; + topP?: number | undefined; + stopSequences?: string[] | undefined; + speed?: number | undefined; + }, { + response_format?: "mp3" | "opus" | "aac" | "flac" | "wav" | "pcm" | undefined; + version?: string | undefined; + temperature?: number | undefined; + maxOutputTokens?: number | undefined; + topK?: number | undefined; + topP?: number | undefined; + stopSequences?: string[] | undefined; + voice?: "alloy" | "echo" | "fable" | "onyx" | "nova" | "shimmer" | undefined; + speed?: number | undefined; + }>>; +}; +declare const RESPONSE_FORMAT_MEDIA_TYPES: { + mp3: string; + opus: string; + aac: string; + flac: string; + wav: string; + pcm: string; +}; +declare function ttsModel(name: string, client: OpenAI): ModelAction; + +export { RESPONSE_FORMAT_MEDIA_TYPES, SUPPORTED_TTS_MODELS, TTSConfigSchema, tts1, tts1Hd, ttsModel }; diff --git a/plugins/openai/lib/tts.js b/plugins/openai/lib/tts.js new file mode 100644 index 00000000..e94068d0 --- /dev/null +++ b/plugins/openai/lib/tts.js @@ -0,0 +1,182 @@ +"use strict"; +var __defProp = Object.defineProperty; +var __defProps = Object.defineProperties; +var __getOwnPropDesc = Object.getOwnPropertyDescriptor; +var __getOwnPropDescs = Object.getOwnPropertyDescriptors; +var __getOwnPropNames = Object.getOwnPropertyNames; +var __getOwnPropSymbols = Object.getOwnPropertySymbols; +var __hasOwnProp = Object.prototype.hasOwnProperty; +var __propIsEnum = Object.prototype.propertyIsEnumerable; +var __defNormalProp = (obj, key, value) => key in obj ? __defProp(obj, key, { enumerable: true, configurable: true, writable: true, value }) : obj[key] = value; +var __spreadValues = (a, b) => { + for (var prop in b || (b = {})) + if (__hasOwnProp.call(b, prop)) + __defNormalProp(a, prop, b[prop]); + if (__getOwnPropSymbols) + for (var prop of __getOwnPropSymbols(b)) { + if (__propIsEnum.call(b, prop)) + __defNormalProp(a, prop, b[prop]); + } + return a; +}; +var __spreadProps = (a, b) => __defProps(a, __getOwnPropDescs(b)); +var __export = (target, all) => { + for (var name in all) + __defProp(target, name, { get: all[name], enumerable: true }); +}; +var __copyProps = (to, from, except, desc) => { + if (from && typeof from === "object" || typeof from === "function") { + for (let key of __getOwnPropNames(from)) + if (!__hasOwnProp.call(to, key) && key !== except) + __defProp(to, key, { get: () => from[key], enumerable: !(desc = __getOwnPropDesc(from, key)) || desc.enumerable }); + } + return to; +}; +var __toCommonJS = (mod) => __copyProps(__defProp({}, "__esModule", { value: true }), mod); +var __async = (__this, __arguments, generator) => { + return new Promise((resolve, reject) => { + var fulfilled = (value) => { + try { + step(generator.next(value)); + } catch (e) { + reject(e); + } + }; + var rejected = (value) => { + try { + step(generator.throw(value)); + } catch (e) { + reject(e); + } + }; + var step = (x) => x.done ? resolve(x.value) : Promise.resolve(x.value).then(fulfilled, rejected); + step((generator = generator.apply(__this, __arguments)).next()); + }); +}; +var tts_exports = {}; +__export(tts_exports, { + RESPONSE_FORMAT_MEDIA_TYPES: () => RESPONSE_FORMAT_MEDIA_TYPES, + SUPPORTED_TTS_MODELS: () => SUPPORTED_TTS_MODELS, + TTSConfigSchema: () => TTSConfigSchema, + tts1: () => tts1, + tts1Hd: () => tts1Hd, + ttsModel: () => ttsModel +}); +module.exports = __toCommonJS(tts_exports); +var import_ai = require("@genkit-ai/ai"); +var import_model = require("@genkit-ai/ai/model"); +var import_zod = require("zod"); +const TTSConfigSchema = import_model.GenerationCommonConfigSchema.extend({ + voice: import_zod.z.enum(["alloy", "echo", "fable", "onyx", "nova", "shimmer"]).optional().default("alloy"), + speed: import_zod.z.number().min(0.25).max(4).optional(), + response_format: import_zod.z.enum(["mp3", "opus", "aac", "flac", "wav", "pcm"]).optional() +}); +const tts1 = (0, import_model.modelRef)({ + name: "openai/tts-1", + info: { + label: "OpenAI - Text-to-speech 1", + supports: { + media: false, + output: ["media"], + multiturn: false, + systemRole: false, + tools: false + } + }, + configSchema: TTSConfigSchema +}); +const tts1Hd = (0, import_model.modelRef)({ + name: "openai/tts-1-hd", + info: { + label: "OpenAI - Text-to-speech 1 HD", + supports: { + media: false, + output: ["media"], + multiturn: false, + systemRole: false, + tools: false + } + }, + configSchema: TTSConfigSchema +}); +const SUPPORTED_TTS_MODELS = { + "tts-1": tts1, + "tts-1-hd": tts1Hd +}; +const RESPONSE_FORMAT_MEDIA_TYPES = { + mp3: "audio/mpeg", + opus: "audio/opus", + aac: "audio/aac", + flac: "audio/flac", + wav: "audio/wav", + pcm: "audio/L16" +}; +function toTTSRequest(modelName, request) { + var _a, _b, _c, _d, _e; + const mappedModelName = ((_a = request.config) == null ? void 0 : _a.version) || modelName; + const options = { + model: mappedModelName, + input: new import_ai.Message(request.messages[0]).text(), + voice: (_c = (_b = request.config) == null ? void 0 : _b.voice) != null ? _c : "alloy", + speed: (_d = request.config) == null ? void 0 : _d.speed, + response_format: (_e = request.config) == null ? void 0 : _e.response_format + }; + for (const k in options) { + if (options[k] === void 0) { + delete options[k]; + } + } + return options; +} +function toGenerateResponse(result, responseFormat = "mp3") { + const mediaType = RESPONSE_FORMAT_MEDIA_TYPES[responseFormat]; + return { + candidates: [ + { + index: 0, + finishReason: "stop", + message: { + role: "model", + content: [ + { + media: { + contentType: mediaType, + url: `data:${mediaType};base64,${result.toString("base64")}` + } + } + ] + } + } + ] + }; +} +function ttsModel(name, client) { + const modelId = `openai/${name}`; + const model = SUPPORTED_TTS_MODELS[name]; + if (!model) + throw new Error(`Unsupported model: ${name}`); + return (0, import_model.defineModel)( + __spreadProps(__spreadValues({ + name: modelId + }, model.info), { + configSchema: model.configSchema + }), + (request) => __async(this, null, function* () { + const ttsRequest = toTTSRequest(name, request); + const result = yield client.audio.speech.create(ttsRequest); + const resultArrayBuffer = yield result.arrayBuffer(); + const resultBuffer = Buffer.from(new Uint8Array(resultArrayBuffer)); + return toGenerateResponse(resultBuffer, ttsRequest.response_format); + }) + ); +} +// Annotate the CommonJS export names for ESM import in node: +0 && (module.exports = { + RESPONSE_FORMAT_MEDIA_TYPES, + SUPPORTED_TTS_MODELS, + TTSConfigSchema, + tts1, + tts1Hd, + ttsModel +}); +//# sourceMappingURL=tts.js.map \ No newline at end of file diff --git a/plugins/openai/lib/tts.js.map b/plugins/openai/lib/tts.js.map new file mode 100644 index 00000000..31019f56 --- /dev/null +++ b/plugins/openai/lib/tts.js.map @@ -0,0 +1 @@ +{"version":3,"sources":["../src/tts.ts"],"sourcesContent":["/**\n * Copyright 2024 The Fire Company\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\nimport { Message } from '@genkit-ai/ai';\nimport {\n GenerationCommonConfigSchema,\n defineModel,\n modelRef,\n type GenerateRequest,\n type GenerateResponseData,\n type ModelAction,\n} from '@genkit-ai/ai/model';\nimport OpenAI from 'openai';\nimport { type SpeechCreateParams } from 'openai/resources/audio/index.mjs';\nimport { z } from 'zod';\n\nexport const TTSConfigSchema = GenerationCommonConfigSchema.extend({\n voice: z\n .enum(['alloy', 'echo', 'fable', 'onyx', 'nova', 'shimmer'])\n .optional()\n .default('alloy'),\n speed: z.number().min(0.25).max(4.0).optional(),\n response_format: z\n .enum(['mp3', 'opus', 'aac', 'flac', 'wav', 'pcm'])\n .optional(),\n});\n\nexport const tts1 = modelRef({\n name: 'openai/tts-1',\n info: {\n label: 'OpenAI - Text-to-speech 1',\n supports: {\n media: false,\n output: ['media'],\n multiturn: false,\n systemRole: false,\n tools: false,\n },\n },\n configSchema: TTSConfigSchema,\n});\n\nexport const tts1Hd = modelRef({\n name: 'openai/tts-1-hd',\n info: {\n label: 'OpenAI - Text-to-speech 1 HD',\n supports: {\n media: false,\n output: ['media'],\n multiturn: false,\n systemRole: false,\n tools: false,\n },\n },\n configSchema: TTSConfigSchema,\n});\n\nexport const SUPPORTED_TTS_MODELS = {\n 'tts-1': tts1,\n 'tts-1-hd': tts1Hd,\n};\n\nexport const RESPONSE_FORMAT_MEDIA_TYPES = {\n mp3: 'audio/mpeg',\n opus: 'audio/opus',\n aac: 'audio/aac',\n flac: 'audio/flac',\n wav: 'audio/wav',\n pcm: 'audio/L16',\n};\n\nfunction toTTSRequest(\n modelName: string,\n request: GenerateRequest\n): SpeechCreateParams {\n const mappedModelName = request.config?.version || modelName;\n const options: SpeechCreateParams = {\n model: mappedModelName,\n input: new Message(request.messages[0]).text(),\n voice: request.config?.voice ?? 'alloy',\n speed: request.config?.speed,\n response_format: request.config?.response_format,\n };\n for (const k in options) {\n if (options[k] === undefined) {\n delete options[k];\n }\n }\n return options;\n}\n\nfunction toGenerateResponse(\n result: Buffer,\n responseFormat: z.infer['response_format'] = 'mp3'\n): GenerateResponseData {\n const mediaType = RESPONSE_FORMAT_MEDIA_TYPES[responseFormat];\n return {\n candidates: [\n {\n index: 0,\n finishReason: 'stop',\n message: {\n role: 'model',\n content: [\n {\n media: {\n contentType: mediaType,\n url: `data:${mediaType};base64,${result.toString('base64')}`,\n },\n },\n ],\n },\n },\n ],\n };\n}\n\nexport function ttsModel(\n name: string,\n client: OpenAI\n): ModelAction {\n const modelId = `openai/${name}`;\n const model = SUPPORTED_TTS_MODELS[name];\n if (!model) throw new Error(`Unsupported model: ${name}`);\n\n return defineModel(\n {\n name: modelId,\n ...model.info,\n configSchema: model.configSchema,\n },\n async (request) => {\n const ttsRequest = toTTSRequest(name, request);\n const result = await client.audio.speech.create(ttsRequest);\n const resultArrayBuffer = await result.arrayBuffer();\n const resultBuffer = Buffer.from(new Uint8Array(resultArrayBuffer));\n return toGenerateResponse(resultBuffer, ttsRequest.response_format);\n }\n );\n}\n"],"mappings":";;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAgBA,gBAAwB;AACxB,mBAOO;AAGP,iBAAkB;AAEX,MAAM,kBAAkB,0CAA6B,OAAO;AAAA,EACjE,OAAO,aACJ,KAAK,CAAC,SAAS,QAAQ,SAAS,QAAQ,QAAQ,SAAS,CAAC,EAC1D,SAAS,EACT,QAAQ,OAAO;AAAA,EAClB,OAAO,aAAE,OAAO,EAAE,IAAI,IAAI,EAAE,IAAI,CAAG,EAAE,SAAS;AAAA,EAC9C,iBAAiB,aACd,KAAK,CAAC,OAAO,QAAQ,OAAO,QAAQ,OAAO,KAAK,CAAC,EACjD,SAAS;AACd,CAAC;AAEM,MAAM,WAAO,uBAAS;AAAA,EAC3B,MAAM;AAAA,EACN,MAAM;AAAA,IACJ,OAAO;AAAA,IACP,UAAU;AAAA,MACR,OAAO;AAAA,MACP,QAAQ,CAAC,OAAO;AAAA,MAChB,WAAW;AAAA,MACX,YAAY;AAAA,MACZ,OAAO;AAAA,IACT;AAAA,EACF;AAAA,EACA,cAAc;AAChB,CAAC;AAEM,MAAM,aAAS,uBAAS;AAAA,EAC7B,MAAM;AAAA,EACN,MAAM;AAAA,IACJ,OAAO;AAAA,IACP,UAAU;AAAA,MACR,OAAO;AAAA,MACP,QAAQ,CAAC,OAAO;AAAA,MAChB,WAAW;AAAA,MACX,YAAY;AAAA,MACZ,OAAO;AAAA,IACT;AAAA,EACF;AAAA,EACA,cAAc;AAChB,CAAC;AAEM,MAAM,uBAAuB;AAAA,EAClC,SAAS;AAAA,EACT,YAAY;AACd;AAEO,MAAM,8BAA8B;AAAA,EACzC,KAAK;AAAA,EACL,MAAM;AAAA,EACN,KAAK;AAAA,EACL,MAAM;AAAA,EACN,KAAK;AAAA,EACL,KAAK;AACP;AAEA,SAAS,aACP,WACA,SACoB;AAvFtB;AAwFE,QAAM,oBAAkB,aAAQ,WAAR,mBAAgB,YAAW;AACnD,QAAM,UAA8B;AAAA,IAClC,OAAO;AAAA,IACP,OAAO,IAAI,kBAAQ,QAAQ,SAAS,CAAC,CAAC,EAAE,KAAK;AAAA,IAC7C,QAAO,mBAAQ,WAAR,mBAAgB,UAAhB,YAAyB;AAAA,IAChC,QAAO,aAAQ,WAAR,mBAAgB;AAAA,IACvB,kBAAiB,aAAQ,WAAR,mBAAgB;AAAA,EACnC;AACA,aAAW,KAAK,SAAS;AACvB,QAAI,QAAQ,CAAC,MAAM,QAAW;AAC5B,aAAO,QAAQ,CAAC;AAAA,IAClB;AAAA,EACF;AACA,SAAO;AACT;AAEA,SAAS,mBACP,QACA,iBAAqE,OAC/C;AACtB,QAAM,YAAY,4BAA4B,cAAc;AAC5D,SAAO;AAAA,IACL,YAAY;AAAA,MACV;AAAA,QACE,OAAO;AAAA,QACP,cAAc;AAAA,QACd,SAAS;AAAA,UACP,MAAM;AAAA,UACN,SAAS;AAAA,YACP;AAAA,cACE,OAAO;AAAA,gBACL,aAAa;AAAA,gBACb,KAAK,QAAQ,SAAS,WAAW,OAAO,SAAS,QAAQ,CAAC;AAAA,cAC5D;AAAA,YACF;AAAA,UACF;AAAA,QACF;AAAA,MACF;AAAA,IACF;AAAA,EACF;AACF;AAEO,SAAS,SACd,MACA,QACqC;AACrC,QAAM,UAAU,UAAU,IAAI;AAC9B,QAAM,QAAQ,qBAAqB,IAAI;AACvC,MAAI,CAAC;AAAO,UAAM,IAAI,MAAM,sBAAsB,IAAI,EAAE;AAExD,aAAO;AAAA,IACL;AAAA,MACE,MAAM;AAAA,OACH,MAAM,OAFX;AAAA,MAGE,cAAc,MAAM;AAAA,IACtB;AAAA,IACA,CAAO,YAAY;AACjB,YAAM,aAAa,aAAa,MAAM,OAAO;AAC7C,YAAM,SAAS,MAAM,OAAO,MAAM,OAAO,OAAO,UAAU;AAC1D,YAAM,oBAAoB,MAAM,OAAO,YAAY;AACnD,YAAM,eAAe,OAAO,KAAK,IAAI,WAAW,iBAAiB,CAAC;AAClE,aAAO,mBAAmB,cAAc,WAAW,eAAe;AAAA,IACpE;AAAA,EACF;AACF;","names":[]} \ No newline at end of file diff --git a/plugins/openai/lib/tts.mjs b/plugins/openai/lib/tts.mjs new file mode 100644 index 00000000..51aaac94 --- /dev/null +++ b/plugins/openai/lib/tts.mjs @@ -0,0 +1,125 @@ +import { + __async, + __spreadProps, + __spreadValues +} from "./chunk-WFI2LP4G.mjs"; +import { Message } from "@genkit-ai/ai"; +import { + GenerationCommonConfigSchema, + defineModel, + modelRef +} from "@genkit-ai/ai/model"; +import { z } from "zod"; +const TTSConfigSchema = GenerationCommonConfigSchema.extend({ + voice: z.enum(["alloy", "echo", "fable", "onyx", "nova", "shimmer"]).optional().default("alloy"), + speed: z.number().min(0.25).max(4).optional(), + response_format: z.enum(["mp3", "opus", "aac", "flac", "wav", "pcm"]).optional() +}); +const tts1 = modelRef({ + name: "openai/tts-1", + info: { + label: "OpenAI - Text-to-speech 1", + supports: { + media: false, + output: ["media"], + multiturn: false, + systemRole: false, + tools: false + } + }, + configSchema: TTSConfigSchema +}); +const tts1Hd = modelRef({ + name: "openai/tts-1-hd", + info: { + label: "OpenAI - Text-to-speech 1 HD", + supports: { + media: false, + output: ["media"], + multiturn: false, + systemRole: false, + tools: false + } + }, + configSchema: TTSConfigSchema +}); +const SUPPORTED_TTS_MODELS = { + "tts-1": tts1, + "tts-1-hd": tts1Hd +}; +const RESPONSE_FORMAT_MEDIA_TYPES = { + mp3: "audio/mpeg", + opus: "audio/opus", + aac: "audio/aac", + flac: "audio/flac", + wav: "audio/wav", + pcm: "audio/L16" +}; +function toTTSRequest(modelName, request) { + var _a, _b, _c, _d, _e; + const mappedModelName = ((_a = request.config) == null ? void 0 : _a.version) || modelName; + const options = { + model: mappedModelName, + input: new Message(request.messages[0]).text(), + voice: (_c = (_b = request.config) == null ? void 0 : _b.voice) != null ? _c : "alloy", + speed: (_d = request.config) == null ? void 0 : _d.speed, + response_format: (_e = request.config) == null ? void 0 : _e.response_format + }; + for (const k in options) { + if (options[k] === void 0) { + delete options[k]; + } + } + return options; +} +function toGenerateResponse(result, responseFormat = "mp3") { + const mediaType = RESPONSE_FORMAT_MEDIA_TYPES[responseFormat]; + return { + candidates: [ + { + index: 0, + finishReason: "stop", + message: { + role: "model", + content: [ + { + media: { + contentType: mediaType, + url: `data:${mediaType};base64,${result.toString("base64")}` + } + } + ] + } + } + ] + }; +} +function ttsModel(name, client) { + const modelId = `openai/${name}`; + const model = SUPPORTED_TTS_MODELS[name]; + if (!model) + throw new Error(`Unsupported model: ${name}`); + return defineModel( + __spreadProps(__spreadValues({ + name: modelId + }, model.info), { + configSchema: model.configSchema + }), + (request) => __async(this, null, function* () { + const ttsRequest = toTTSRequest(name, request); + const result = yield client.audio.speech.create(ttsRequest); + const resultArrayBuffer = yield result.arrayBuffer(); + const resultBuffer = Buffer.from(new Uint8Array(resultArrayBuffer)); + return toGenerateResponse(resultBuffer, ttsRequest.response_format); + }) + ); +} +export { + RESPONSE_FORMAT_MEDIA_TYPES, + SUPPORTED_TTS_MODELS, + TTSConfigSchema, + tts1, + tts1Hd, + ttsModel +}; +//# sourceMappingURL=tts.mjs.map \ No newline at end of file diff --git a/plugins/openai/lib/tts.mjs.map b/plugins/openai/lib/tts.mjs.map new file mode 100644 index 00000000..bc36f397 --- /dev/null +++ b/plugins/openai/lib/tts.mjs.map @@ -0,0 +1 @@ +{"version":3,"sources":["../src/tts.ts"],"sourcesContent":["/**\n * Copyright 2024 The Fire Company\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\nimport { Message } from '@genkit-ai/ai';\nimport {\n GenerationCommonConfigSchema,\n defineModel,\n modelRef,\n type GenerateRequest,\n type GenerateResponseData,\n type ModelAction,\n} from '@genkit-ai/ai/model';\nimport OpenAI from 'openai';\nimport { type SpeechCreateParams } from 'openai/resources/audio/index.mjs';\nimport { z } from 'zod';\n\nexport const TTSConfigSchema = GenerationCommonConfigSchema.extend({\n voice: z\n .enum(['alloy', 'echo', 'fable', 'onyx', 'nova', 'shimmer'])\n .optional()\n .default('alloy'),\n speed: z.number().min(0.25).max(4.0).optional(),\n response_format: z\n .enum(['mp3', 'opus', 'aac', 'flac', 'wav', 'pcm'])\n .optional(),\n});\n\nexport const tts1 = modelRef({\n name: 'openai/tts-1',\n info: {\n label: 'OpenAI - Text-to-speech 1',\n supports: {\n media: false,\n output: ['media'],\n multiturn: false,\n systemRole: false,\n tools: false,\n },\n },\n configSchema: TTSConfigSchema,\n});\n\nexport const tts1Hd = modelRef({\n name: 'openai/tts-1-hd',\n info: {\n label: 'OpenAI - Text-to-speech 1 HD',\n supports: {\n media: false,\n output: ['media'],\n multiturn: false,\n systemRole: false,\n tools: false,\n },\n },\n configSchema: TTSConfigSchema,\n});\n\nexport const SUPPORTED_TTS_MODELS = {\n 'tts-1': tts1,\n 'tts-1-hd': tts1Hd,\n};\n\nexport const RESPONSE_FORMAT_MEDIA_TYPES = {\n mp3: 'audio/mpeg',\n opus: 'audio/opus',\n aac: 'audio/aac',\n flac: 'audio/flac',\n wav: 'audio/wav',\n pcm: 'audio/L16',\n};\n\nfunction toTTSRequest(\n modelName: string,\n request: GenerateRequest\n): SpeechCreateParams {\n const mappedModelName = request.config?.version || modelName;\n const options: SpeechCreateParams = {\n model: mappedModelName,\n input: new Message(request.messages[0]).text(),\n voice: request.config?.voice ?? 'alloy',\n speed: request.config?.speed,\n response_format: request.config?.response_format,\n };\n for (const k in options) {\n if (options[k] === undefined) {\n delete options[k];\n }\n }\n return options;\n}\n\nfunction toGenerateResponse(\n result: Buffer,\n responseFormat: z.infer['response_format'] = 'mp3'\n): GenerateResponseData {\n const mediaType = RESPONSE_FORMAT_MEDIA_TYPES[responseFormat];\n return {\n candidates: [\n {\n index: 0,\n finishReason: 'stop',\n message: {\n role: 'model',\n content: [\n {\n media: {\n contentType: mediaType,\n url: `data:${mediaType};base64,${result.toString('base64')}`,\n },\n },\n ],\n },\n },\n ],\n };\n}\n\nexport function ttsModel(\n name: string,\n client: OpenAI\n): ModelAction {\n const modelId = `openai/${name}`;\n const model = SUPPORTED_TTS_MODELS[name];\n if (!model) throw new Error(`Unsupported model: ${name}`);\n\n return defineModel(\n {\n name: modelId,\n ...model.info,\n configSchema: model.configSchema,\n },\n async (request) => {\n const ttsRequest = toTTSRequest(name, request);\n const result = await client.audio.speech.create(ttsRequest);\n const resultArrayBuffer = await result.arrayBuffer();\n const resultBuffer = Buffer.from(new Uint8Array(resultArrayBuffer));\n return toGenerateResponse(resultBuffer, ttsRequest.response_format);\n }\n );\n}\n"],"mappings":";;;;;AAgBA,SAAS,eAAe;AACxB;AAAA,EACE;AAAA,EACA;AAAA,EACA;AAAA,OAIK;AAGP,SAAS,SAAS;AAEX,MAAM,kBAAkB,6BAA6B,OAAO;AAAA,EACjE,OAAO,EACJ,KAAK,CAAC,SAAS,QAAQ,SAAS,QAAQ,QAAQ,SAAS,CAAC,EAC1D,SAAS,EACT,QAAQ,OAAO;AAAA,EAClB,OAAO,EAAE,OAAO,EAAE,IAAI,IAAI,EAAE,IAAI,CAAG,EAAE,SAAS;AAAA,EAC9C,iBAAiB,EACd,KAAK,CAAC,OAAO,QAAQ,OAAO,QAAQ,OAAO,KAAK,CAAC,EACjD,SAAS;AACd,CAAC;AAEM,MAAM,OAAO,SAAS;AAAA,EAC3B,MAAM;AAAA,EACN,MAAM;AAAA,IACJ,OAAO;AAAA,IACP,UAAU;AAAA,MACR,OAAO;AAAA,MACP,QAAQ,CAAC,OAAO;AAAA,MAChB,WAAW;AAAA,MACX,YAAY;AAAA,MACZ,OAAO;AAAA,IACT;AAAA,EACF;AAAA,EACA,cAAc;AAChB,CAAC;AAEM,MAAM,SAAS,SAAS;AAAA,EAC7B,MAAM;AAAA,EACN,MAAM;AAAA,IACJ,OAAO;AAAA,IACP,UAAU;AAAA,MACR,OAAO;AAAA,MACP,QAAQ,CAAC,OAAO;AAAA,MAChB,WAAW;AAAA,MACX,YAAY;AAAA,MACZ,OAAO;AAAA,IACT;AAAA,EACF;AAAA,EACA,cAAc;AAChB,CAAC;AAEM,MAAM,uBAAuB;AAAA,EAClC,SAAS;AAAA,EACT,YAAY;AACd;AAEO,MAAM,8BAA8B;AAAA,EACzC,KAAK;AAAA,EACL,MAAM;AAAA,EACN,KAAK;AAAA,EACL,MAAM;AAAA,EACN,KAAK;AAAA,EACL,KAAK;AACP;AAEA,SAAS,aACP,WACA,SACoB;AAvFtB;AAwFE,QAAM,oBAAkB,aAAQ,WAAR,mBAAgB,YAAW;AACnD,QAAM,UAA8B;AAAA,IAClC,OAAO;AAAA,IACP,OAAO,IAAI,QAAQ,QAAQ,SAAS,CAAC,CAAC,EAAE,KAAK;AAAA,IAC7C,QAAO,mBAAQ,WAAR,mBAAgB,UAAhB,YAAyB;AAAA,IAChC,QAAO,aAAQ,WAAR,mBAAgB;AAAA,IACvB,kBAAiB,aAAQ,WAAR,mBAAgB;AAAA,EACnC;AACA,aAAW,KAAK,SAAS;AACvB,QAAI,QAAQ,CAAC,MAAM,QAAW;AAC5B,aAAO,QAAQ,CAAC;AAAA,IAClB;AAAA,EACF;AACA,SAAO;AACT;AAEA,SAAS,mBACP,QACA,iBAAqE,OAC/C;AACtB,QAAM,YAAY,4BAA4B,cAAc;AAC5D,SAAO;AAAA,IACL,YAAY;AAAA,MACV;AAAA,QACE,OAAO;AAAA,QACP,cAAc;AAAA,QACd,SAAS;AAAA,UACP,MAAM;AAAA,UACN,SAAS;AAAA,YACP;AAAA,cACE,OAAO;AAAA,gBACL,aAAa;AAAA,gBACb,KAAK,QAAQ,SAAS,WAAW,OAAO,SAAS,QAAQ,CAAC;AAAA,cAC5D;AAAA,YACF;AAAA,UACF;AAAA,QACF;AAAA,MACF;AAAA,IACF;AAAA,EACF;AACF;AAEO,SAAS,SACd,MACA,QACqC;AACrC,QAAM,UAAU,UAAU,IAAI;AAC9B,QAAM,QAAQ,qBAAqB,IAAI;AACvC,MAAI,CAAC;AAAO,UAAM,IAAI,MAAM,sBAAsB,IAAI,EAAE;AAExD,SAAO;AAAA,IACL;AAAA,MACE,MAAM;AAAA,OACH,MAAM,OAFX;AAAA,MAGE,cAAc,MAAM;AAAA,IACtB;AAAA,IACA,CAAO,YAAY;AACjB,YAAM,aAAa,aAAa,MAAM,OAAO;AAC7C,YAAM,SAAS,MAAM,OAAO,MAAM,OAAO,OAAO,UAAU;AAC1D,YAAM,oBAAoB,MAAM,OAAO,YAAY;AACnD,YAAM,eAAe,OAAO,KAAK,IAAI,WAAW,iBAAiB,CAAC;AAClE,aAAO,mBAAmB,cAAc,WAAW,eAAe;AAAA,IACpE;AAAA,EACF;AACF;","names":[]} \ No newline at end of file diff --git a/plugins/openai/lib/whisper.d.mts b/plugins/openai/lib/whisper.d.mts new file mode 100644 index 00000000..e5856a0c --- /dev/null +++ b/plugins/openai/lib/whisper.d.mts @@ -0,0 +1,72 @@ +import * as _genkit_ai_ai_model from '@genkit-ai/ai/model'; +import { ModelAction } from '@genkit-ai/ai/model'; +import OpenAI from 'openai'; +import { z } from 'zod'; + +declare const Whisper1ConfigSchema: z.ZodObject; + temperature: z.ZodOptional; + maxOutputTokens: z.ZodOptional; + topK: z.ZodOptional; + topP: z.ZodOptional; + stopSequences: z.ZodOptional>; +}, { + language: z.ZodOptional; + timestamp_granularities: z.ZodOptional, "many">>; + response_format: z.ZodOptional>; +}>, "strip", z.ZodTypeAny, { + response_format?: "text" | "json" | "srt" | "verbose_json" | "vtt" | undefined; + version?: string | undefined; + temperature?: number | undefined; + maxOutputTokens?: number | undefined; + topK?: number | undefined; + topP?: number | undefined; + stopSequences?: string[] | undefined; + language?: string | undefined; + timestamp_granularities?: ("word" | "segment")[] | undefined; +}, { + response_format?: "text" | "json" | "srt" | "verbose_json" | "vtt" | undefined; + version?: string | undefined; + temperature?: number | undefined; + maxOutputTokens?: number | undefined; + topK?: number | undefined; + topP?: number | undefined; + stopSequences?: string[] | undefined; + language?: string | undefined; + timestamp_granularities?: ("word" | "segment")[] | undefined; +}>; +declare const whisper1: _genkit_ai_ai_model.ModelReference; + temperature: z.ZodOptional; + maxOutputTokens: z.ZodOptional; + topK: z.ZodOptional; + topP: z.ZodOptional; + stopSequences: z.ZodOptional>; +}, { + language: z.ZodOptional; + timestamp_granularities: z.ZodOptional, "many">>; + response_format: z.ZodOptional>; +}>, "strip", z.ZodTypeAny, { + response_format?: "text" | "json" | "srt" | "verbose_json" | "vtt" | undefined; + version?: string | undefined; + temperature?: number | undefined; + maxOutputTokens?: number | undefined; + topK?: number | undefined; + topP?: number | undefined; + stopSequences?: string[] | undefined; + language?: string | undefined; + timestamp_granularities?: ("word" | "segment")[] | undefined; +}, { + response_format?: "text" | "json" | "srt" | "verbose_json" | "vtt" | undefined; + version?: string | undefined; + temperature?: number | undefined; + maxOutputTokens?: number | undefined; + topK?: number | undefined; + topP?: number | undefined; + stopSequences?: string[] | undefined; + language?: string | undefined; + timestamp_granularities?: ("word" | "segment")[] | undefined; +}>>; +declare function whisper1Model(client: OpenAI): ModelAction; + +export { Whisper1ConfigSchema, whisper1, whisper1Model }; diff --git a/plugins/openai/lib/whisper.d.ts b/plugins/openai/lib/whisper.d.ts new file mode 100644 index 00000000..e5856a0c --- /dev/null +++ b/plugins/openai/lib/whisper.d.ts @@ -0,0 +1,72 @@ +import * as _genkit_ai_ai_model from '@genkit-ai/ai/model'; +import { ModelAction } from '@genkit-ai/ai/model'; +import OpenAI from 'openai'; +import { z } from 'zod'; + +declare const Whisper1ConfigSchema: z.ZodObject; + temperature: z.ZodOptional; + maxOutputTokens: z.ZodOptional; + topK: z.ZodOptional; + topP: z.ZodOptional; + stopSequences: z.ZodOptional>; +}, { + language: z.ZodOptional; + timestamp_granularities: z.ZodOptional, "many">>; + response_format: z.ZodOptional>; +}>, "strip", z.ZodTypeAny, { + response_format?: "text" | "json" | "srt" | "verbose_json" | "vtt" | undefined; + version?: string | undefined; + temperature?: number | undefined; + maxOutputTokens?: number | undefined; + topK?: number | undefined; + topP?: number | undefined; + stopSequences?: string[] | undefined; + language?: string | undefined; + timestamp_granularities?: ("word" | "segment")[] | undefined; +}, { + response_format?: "text" | "json" | "srt" | "verbose_json" | "vtt" | undefined; + version?: string | undefined; + temperature?: number | undefined; + maxOutputTokens?: number | undefined; + topK?: number | undefined; + topP?: number | undefined; + stopSequences?: string[] | undefined; + language?: string | undefined; + timestamp_granularities?: ("word" | "segment")[] | undefined; +}>; +declare const whisper1: _genkit_ai_ai_model.ModelReference; + temperature: z.ZodOptional; + maxOutputTokens: z.ZodOptional; + topK: z.ZodOptional; + topP: z.ZodOptional; + stopSequences: z.ZodOptional>; +}, { + language: z.ZodOptional; + timestamp_granularities: z.ZodOptional, "many">>; + response_format: z.ZodOptional>; +}>, "strip", z.ZodTypeAny, { + response_format?: "text" | "json" | "srt" | "verbose_json" | "vtt" | undefined; + version?: string | undefined; + temperature?: number | undefined; + maxOutputTokens?: number | undefined; + topK?: number | undefined; + topP?: number | undefined; + stopSequences?: string[] | undefined; + language?: string | undefined; + timestamp_granularities?: ("word" | "segment")[] | undefined; +}, { + response_format?: "text" | "json" | "srt" | "verbose_json" | "vtt" | undefined; + version?: string | undefined; + temperature?: number | undefined; + maxOutputTokens?: number | undefined; + topK?: number | undefined; + topP?: number | undefined; + stopSequences?: string[] | undefined; + language?: string | undefined; + timestamp_granularities?: ("word" | "segment")[] | undefined; +}>>; +declare function whisper1Model(client: OpenAI): ModelAction; + +export { Whisper1ConfigSchema, whisper1, whisper1Model }; diff --git a/plugins/openai/lib/whisper.js b/plugins/openai/lib/whisper.js new file mode 100644 index 00000000..671a9a7c --- /dev/null +++ b/plugins/openai/lib/whisper.js @@ -0,0 +1,166 @@ +"use strict"; +var __defProp = Object.defineProperty; +var __defProps = Object.defineProperties; +var __getOwnPropDesc = Object.getOwnPropertyDescriptor; +var __getOwnPropDescs = Object.getOwnPropertyDescriptors; +var __getOwnPropNames = Object.getOwnPropertyNames; +var __getOwnPropSymbols = Object.getOwnPropertySymbols; +var __hasOwnProp = Object.prototype.hasOwnProperty; +var __propIsEnum = Object.prototype.propertyIsEnumerable; +var __defNormalProp = (obj, key, value) => key in obj ? __defProp(obj, key, { enumerable: true, configurable: true, writable: true, value }) : obj[key] = value; +var __spreadValues = (a, b) => { + for (var prop in b || (b = {})) + if (__hasOwnProp.call(b, prop)) + __defNormalProp(a, prop, b[prop]); + if (__getOwnPropSymbols) + for (var prop of __getOwnPropSymbols(b)) { + if (__propIsEnum.call(b, prop)) + __defNormalProp(a, prop, b[prop]); + } + return a; +}; +var __spreadProps = (a, b) => __defProps(a, __getOwnPropDescs(b)); +var __export = (target, all) => { + for (var name in all) + __defProp(target, name, { get: all[name], enumerable: true }); +}; +var __copyProps = (to, from, except, desc) => { + if (from && typeof from === "object" || typeof from === "function") { + for (let key of __getOwnPropNames(from)) + if (!__hasOwnProp.call(to, key) && key !== except) + __defProp(to, key, { get: () => from[key], enumerable: !(desc = __getOwnPropDesc(from, key)) || desc.enumerable }); + } + return to; +}; +var __toCommonJS = (mod) => __copyProps(__defProp({}, "__esModule", { value: true }), mod); +var __async = (__this, __arguments, generator) => { + return new Promise((resolve, reject) => { + var fulfilled = (value) => { + try { + step(generator.next(value)); + } catch (e) { + reject(e); + } + }; + var rejected = (value) => { + try { + step(generator.throw(value)); + } catch (e) { + reject(e); + } + }; + var step = (x) => x.done ? resolve(x.value) : Promise.resolve(x.value).then(fulfilled, rejected); + step((generator = generator.apply(__this, __arguments)).next()); + }); +}; +var whisper_exports = {}; +__export(whisper_exports, { + Whisper1ConfigSchema: () => Whisper1ConfigSchema, + whisper1: () => whisper1, + whisper1Model: () => whisper1Model +}); +module.exports = __toCommonJS(whisper_exports); +var import_ai = require("@genkit-ai/ai"); +var import_model = require("@genkit-ai/ai/model"); +var import_zod = require("zod"); +const Whisper1ConfigSchema = import_model.GenerationCommonConfigSchema.extend({ + language: import_zod.z.string().optional(), + timestamp_granularities: import_zod.z.array(import_zod.z.enum(["word", "segment"])).optional(), + response_format: import_zod.z.enum(["json", "text", "srt", "verbose_json", "vtt"]).optional() +}); +const whisper1 = (0, import_model.modelRef)({ + name: "openai/whisper-1", + info: { + label: "OpenAI - Whisper", + supports: { + media: true, + output: ["text", "json"], + multiturn: false, + systemRole: false, + tools: false + } + }, + configSchema: Whisper1ConfigSchema +}); +function toWhisper1Request(request) { + var _a, _b, _c, _d, _e, _f; + const message = new import_ai.Message(request.messages[0]); + const media = message.media(); + if (!(media == null ? void 0 : media.url)) { + throw new Error("No media found in the request"); + } + const mediaBuffer = Buffer.from( + media.url.slice(media.url.indexOf(",") + 1), + "base64" + ); + const mediaFile = new File([mediaBuffer], "input", { + type: (_a = media.contentType) != null ? _a : media.url.slice("data:".length, media.url.indexOf(";")) + }); + const options = { + model: "whisper-1", + file: mediaFile, + prompt: message.text(), + temperature: (_b = request.config) == null ? void 0 : _b.temperature, + language: (_c = request.config) == null ? void 0 : _c.language, + timestamp_granularities: (_d = request.config) == null ? void 0 : _d.timestamp_granularities + }; + const outputFormat = (_e = request.output) == null ? void 0 : _e.format; + const customFormat = (_f = request.config) == null ? void 0 : _f.response_format; + if (outputFormat && customFormat) { + if (outputFormat === "json" && customFormat !== "json" && customFormat !== "verbose_json") { + throw new Error( + `Custom response format ${customFormat} is not compatible with output format ${outputFormat}` + ); + } + } + if (outputFormat === "media") { + throw new Error(`Output format ${outputFormat} is not supported.`); + } + options.response_format = customFormat || outputFormat || "text"; + for (const k in options) { + if (options[k] === void 0) { + delete options[k]; + } + } + return options; +} +function toGenerateResponse(result) { + return { + candidates: [ + { + index: 0, + finishReason: "stop", + message: { + role: "model", + content: [ + { + text: typeof result === "string" ? result : result.text + } + ] + } + } + ] + }; +} +function whisper1Model(client) { + return (0, import_model.defineModel)( + __spreadProps(__spreadValues({ + name: whisper1.name + }, whisper1.info), { + configSchema: whisper1.configSchema + }), + (request) => __async(this, null, function* () { + const result = yield client.audio.transcriptions.create( + toWhisper1Request(request) + ); + return toGenerateResponse(result); + }) + ); +} +// Annotate the CommonJS export names for ESM import in node: +0 && (module.exports = { + Whisper1ConfigSchema, + whisper1, + whisper1Model +}); +//# sourceMappingURL=whisper.js.map \ No newline at end of file diff --git a/plugins/openai/lib/whisper.js.map b/plugins/openai/lib/whisper.js.map new file mode 100644 index 00000000..62794cbd --- /dev/null +++ b/plugins/openai/lib/whisper.js.map @@ -0,0 +1 @@ +{"version":3,"sources":["../src/whisper.ts"],"sourcesContent":["/**\n * Copyright 2024 The Fire Company\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\nimport { Message } from '@genkit-ai/ai';\nimport {\n GenerationCommonConfigSchema,\n defineModel,\n modelRef,\n type GenerateRequest,\n type GenerateResponseData,\n type ModelAction,\n} from '@genkit-ai/ai/model';\nimport OpenAI from 'openai';\nimport {\n type TranscriptionCreateParams,\n type Transcription,\n} from 'openai/resources/audio/index.mjs';\nimport { z } from 'zod';\n\nexport const Whisper1ConfigSchema = GenerationCommonConfigSchema.extend({\n language: z.string().optional(),\n timestamp_granularities: z.array(z.enum(['word', 'segment'])).optional(),\n response_format: z\n .enum(['json', 'text', 'srt', 'verbose_json', 'vtt'])\n .optional(),\n});\n\nexport const whisper1 = modelRef({\n name: 'openai/whisper-1',\n info: {\n label: 'OpenAI - Whisper',\n supports: {\n media: true,\n output: ['text', 'json'],\n multiturn: false,\n systemRole: false,\n tools: false,\n },\n },\n configSchema: Whisper1ConfigSchema,\n});\n\nfunction toWhisper1Request(\n request: GenerateRequest\n): TranscriptionCreateParams {\n const message = new Message(request.messages[0]);\n const media = message.media();\n if (!media?.url) {\n throw new Error('No media found in the request');\n }\n const mediaBuffer = Buffer.from(\n media.url.slice(media.url.indexOf(',') + 1),\n 'base64'\n );\n const mediaFile = new File([mediaBuffer], 'input', {\n type:\n media.contentType ??\n media.url.slice('data:'.length, media.url.indexOf(';')),\n });\n const options: TranscriptionCreateParams = {\n model: 'whisper-1',\n file: mediaFile,\n prompt: message.text(),\n temperature: request.config?.temperature,\n language: request.config?.language,\n timestamp_granularities: request.config?.timestamp_granularities,\n };\n const outputFormat = request.output?.format;\n const customFormat = request.config?.response_format;\n if (outputFormat && customFormat) {\n if (\n outputFormat === 'json' &&\n customFormat !== 'json' &&\n customFormat !== 'verbose_json'\n ) {\n throw new Error(\n `Custom response format ${customFormat} is not compatible with output format ${outputFormat}`\n );\n }\n }\n if (outputFormat === 'media') {\n throw new Error(`Output format ${outputFormat} is not supported.`);\n }\n options.response_format = customFormat || outputFormat || 'text';\n for (const k in options) {\n if (options[k] === undefined) {\n delete options[k];\n }\n }\n return options;\n}\n\nfunction toGenerateResponse(\n result: Transcription | string\n): GenerateResponseData {\n return {\n candidates: [\n {\n index: 0,\n finishReason: 'stop',\n message: {\n role: 'model',\n content: [\n {\n text: typeof result === 'string' ? result : result.text,\n },\n ],\n },\n },\n ],\n };\n}\n\nexport function whisper1Model(\n client: OpenAI\n): ModelAction {\n return defineModel(\n {\n name: whisper1.name,\n ...whisper1.info,\n configSchema: whisper1.configSchema,\n },\n async (request) => {\n const result = await client.audio.transcriptions.create(\n toWhisper1Request(request)\n );\n return toGenerateResponse(result);\n }\n );\n}\n"],"mappings":";;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAgBA,gBAAwB;AACxB,mBAOO;AAMP,iBAAkB;AAEX,MAAM,uBAAuB,0CAA6B,OAAO;AAAA,EACtE,UAAU,aAAE,OAAO,EAAE,SAAS;AAAA,EAC9B,yBAAyB,aAAE,MAAM,aAAE,KAAK,CAAC,QAAQ,SAAS,CAAC,CAAC,EAAE,SAAS;AAAA,EACvE,iBAAiB,aACd,KAAK,CAAC,QAAQ,QAAQ,OAAO,gBAAgB,KAAK,CAAC,EACnD,SAAS;AACd,CAAC;AAEM,MAAM,eAAW,uBAAS;AAAA,EAC/B,MAAM;AAAA,EACN,MAAM;AAAA,IACJ,OAAO;AAAA,IACP,UAAU;AAAA,MACR,OAAO;AAAA,MACP,QAAQ,CAAC,QAAQ,MAAM;AAAA,MACvB,WAAW;AAAA,MACX,YAAY;AAAA,MACZ,OAAO;AAAA,IACT;AAAA,EACF;AAAA,EACA,cAAc;AAChB,CAAC;AAED,SAAS,kBACP,SAC2B;AAzD7B;AA0DE,QAAM,UAAU,IAAI,kBAAQ,QAAQ,SAAS,CAAC,CAAC;AAC/C,QAAM,QAAQ,QAAQ,MAAM;AAC5B,MAAI,EAAC,+BAAO,MAAK;AACf,UAAM,IAAI,MAAM,+BAA+B;AAAA,EACjD;AACA,QAAM,cAAc,OAAO;AAAA,IACzB,MAAM,IAAI,MAAM,MAAM,IAAI,QAAQ,GAAG,IAAI,CAAC;AAAA,IAC1C;AAAA,EACF;AACA,QAAM,YAAY,IAAI,KAAK,CAAC,WAAW,GAAG,SAAS;AAAA,IACjD,OACE,WAAM,gBAAN,YACA,MAAM,IAAI,MAAM,QAAQ,QAAQ,MAAM,IAAI,QAAQ,GAAG,CAAC;AAAA,EAC1D,CAAC;AACD,QAAM,UAAqC;AAAA,IACzC,OAAO;AAAA,IACP,MAAM;AAAA,IACN,QAAQ,QAAQ,KAAK;AAAA,IACrB,cAAa,aAAQ,WAAR,mBAAgB;AAAA,IAC7B,WAAU,aAAQ,WAAR,mBAAgB;AAAA,IAC1B,0BAAyB,aAAQ,WAAR,mBAAgB;AAAA,EAC3C;AACA,QAAM,gBAAe,aAAQ,WAAR,mBAAgB;AACrC,QAAM,gBAAe,aAAQ,WAAR,mBAAgB;AACrC,MAAI,gBAAgB,cAAc;AAChC,QACE,iBAAiB,UACjB,iBAAiB,UACjB,iBAAiB,gBACjB;AACA,YAAM,IAAI;AAAA,QACR,0BAA0B,YAAY,yCAAyC,YAAY;AAAA,MAC7F;AAAA,IACF;AAAA,EACF;AACA,MAAI,iBAAiB,SAAS;AAC5B,UAAM,IAAI,MAAM,iBAAiB,YAAY,oBAAoB;AAAA,EACnE;AACA,UAAQ,kBAAkB,gBAAgB,gBAAgB;AAC1D,aAAW,KAAK,SAAS;AACvB,QAAI,QAAQ,CAAC,MAAM,QAAW;AAC5B,aAAO,QAAQ,CAAC;AAAA,IAClB;AAAA,EACF;AACA,SAAO;AACT;AAEA,SAAS,mBACP,QACsB;AACtB,SAAO;AAAA,IACL,YAAY;AAAA,MACV;AAAA,QACE,OAAO;AAAA,QACP,cAAc;AAAA,QACd,SAAS;AAAA,UACP,MAAM;AAAA,UACN,SAAS;AAAA,YACP;AAAA,cACE,MAAM,OAAO,WAAW,WAAW,SAAS,OAAO;AAAA,YACrD;AAAA,UACF;AAAA,QACF;AAAA,MACF;AAAA,IACF;AAAA,EACF;AACF;AAEO,SAAS,cACd,QAC0C;AAC1C,aAAO;AAAA,IACL;AAAA,MACE,MAAM,SAAS;AAAA,OACZ,SAAS,OAFd;AAAA,MAGE,cAAc,SAAS;AAAA,IACzB;AAAA,IACA,CAAO,YAAY;AACjB,YAAM,SAAS,MAAM,OAAO,MAAM,eAAe;AAAA,QAC/C,kBAAkB,OAAO;AAAA,MAC3B;AACA,aAAO,mBAAmB,MAAM;AAAA,IAClC;AAAA,EACF;AACF;","names":[]} \ No newline at end of file diff --git a/plugins/openai/lib/whisper.mjs b/plugins/openai/lib/whisper.mjs new file mode 100644 index 00000000..93ab6740 --- /dev/null +++ b/plugins/openai/lib/whisper.mjs @@ -0,0 +1,112 @@ +import { + __async, + __spreadProps, + __spreadValues +} from "./chunk-WFI2LP4G.mjs"; +import { Message } from "@genkit-ai/ai"; +import { + GenerationCommonConfigSchema, + defineModel, + modelRef +} from "@genkit-ai/ai/model"; +import { z } from "zod"; +const Whisper1ConfigSchema = GenerationCommonConfigSchema.extend({ + language: z.string().optional(), + timestamp_granularities: z.array(z.enum(["word", "segment"])).optional(), + response_format: z.enum(["json", "text", "srt", "verbose_json", "vtt"]).optional() +}); +const whisper1 = modelRef({ + name: "openai/whisper-1", + info: { + label: "OpenAI - Whisper", + supports: { + media: true, + output: ["text", "json"], + multiturn: false, + systemRole: false, + tools: false + } + }, + configSchema: Whisper1ConfigSchema +}); +function toWhisper1Request(request) { + var _a, _b, _c, _d, _e, _f; + const message = new Message(request.messages[0]); + const media = message.media(); + if (!(media == null ? void 0 : media.url)) { + throw new Error("No media found in the request"); + } + const mediaBuffer = Buffer.from( + media.url.slice(media.url.indexOf(",") + 1), + "base64" + ); + const mediaFile = new File([mediaBuffer], "input", { + type: (_a = media.contentType) != null ? _a : media.url.slice("data:".length, media.url.indexOf(";")) + }); + const options = { + model: "whisper-1", + file: mediaFile, + prompt: message.text(), + temperature: (_b = request.config) == null ? void 0 : _b.temperature, + language: (_c = request.config) == null ? void 0 : _c.language, + timestamp_granularities: (_d = request.config) == null ? void 0 : _d.timestamp_granularities + }; + const outputFormat = (_e = request.output) == null ? void 0 : _e.format; + const customFormat = (_f = request.config) == null ? void 0 : _f.response_format; + if (outputFormat && customFormat) { + if (outputFormat === "json" && customFormat !== "json" && customFormat !== "verbose_json") { + throw new Error( + `Custom response format ${customFormat} is not compatible with output format ${outputFormat}` + ); + } + } + if (outputFormat === "media") { + throw new Error(`Output format ${outputFormat} is not supported.`); + } + options.response_format = customFormat || outputFormat || "text"; + for (const k in options) { + if (options[k] === void 0) { + delete options[k]; + } + } + return options; +} +function toGenerateResponse(result) { + return { + candidates: [ + { + index: 0, + finishReason: "stop", + message: { + role: "model", + content: [ + { + text: typeof result === "string" ? result : result.text + } + ] + } + } + ] + }; +} +function whisper1Model(client) { + return defineModel( + __spreadProps(__spreadValues({ + name: whisper1.name + }, whisper1.info), { + configSchema: whisper1.configSchema + }), + (request) => __async(this, null, function* () { + const result = yield client.audio.transcriptions.create( + toWhisper1Request(request) + ); + return toGenerateResponse(result); + }) + ); +} +export { + Whisper1ConfigSchema, + whisper1, + whisper1Model +}; +//# sourceMappingURL=whisper.mjs.map \ No newline at end of file diff --git a/plugins/openai/lib/whisper.mjs.map b/plugins/openai/lib/whisper.mjs.map new file mode 100644 index 00000000..f441655d --- /dev/null +++ b/plugins/openai/lib/whisper.mjs.map @@ -0,0 +1 @@ +{"version":3,"sources":["../src/whisper.ts"],"sourcesContent":["/**\n * Copyright 2024 The Fire Company\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\nimport { Message } from '@genkit-ai/ai';\nimport {\n GenerationCommonConfigSchema,\n defineModel,\n modelRef,\n type GenerateRequest,\n type GenerateResponseData,\n type ModelAction,\n} from '@genkit-ai/ai/model';\nimport OpenAI from 'openai';\nimport {\n type TranscriptionCreateParams,\n type Transcription,\n} from 'openai/resources/audio/index.mjs';\nimport { z } from 'zod';\n\nexport const Whisper1ConfigSchema = GenerationCommonConfigSchema.extend({\n language: z.string().optional(),\n timestamp_granularities: z.array(z.enum(['word', 'segment'])).optional(),\n response_format: z\n .enum(['json', 'text', 'srt', 'verbose_json', 'vtt'])\n .optional(),\n});\n\nexport const whisper1 = modelRef({\n name: 'openai/whisper-1',\n info: {\n label: 'OpenAI - Whisper',\n supports: {\n media: true,\n output: ['text', 'json'],\n multiturn: false,\n systemRole: false,\n tools: false,\n },\n },\n configSchema: Whisper1ConfigSchema,\n});\n\nfunction toWhisper1Request(\n request: GenerateRequest\n): TranscriptionCreateParams {\n const message = new Message(request.messages[0]);\n const media = message.media();\n if (!media?.url) {\n throw new Error('No media found in the request');\n }\n const mediaBuffer = Buffer.from(\n media.url.slice(media.url.indexOf(',') + 1),\n 'base64'\n );\n const mediaFile = new File([mediaBuffer], 'input', {\n type:\n media.contentType ??\n media.url.slice('data:'.length, media.url.indexOf(';')),\n });\n const options: TranscriptionCreateParams = {\n model: 'whisper-1',\n file: mediaFile,\n prompt: message.text(),\n temperature: request.config?.temperature,\n language: request.config?.language,\n timestamp_granularities: request.config?.timestamp_granularities,\n };\n const outputFormat = request.output?.format;\n const customFormat = request.config?.response_format;\n if (outputFormat && customFormat) {\n if (\n outputFormat === 'json' &&\n customFormat !== 'json' &&\n customFormat !== 'verbose_json'\n ) {\n throw new Error(\n `Custom response format ${customFormat} is not compatible with output format ${outputFormat}`\n );\n }\n }\n if (outputFormat === 'media') {\n throw new Error(`Output format ${outputFormat} is not supported.`);\n }\n options.response_format = customFormat || outputFormat || 'text';\n for (const k in options) {\n if (options[k] === undefined) {\n delete options[k];\n }\n }\n return options;\n}\n\nfunction toGenerateResponse(\n result: Transcription | string\n): GenerateResponseData {\n return {\n candidates: [\n {\n index: 0,\n finishReason: 'stop',\n message: {\n role: 'model',\n content: [\n {\n text: typeof result === 'string' ? result : result.text,\n },\n ],\n },\n },\n ],\n };\n}\n\nexport function whisper1Model(\n client: OpenAI\n): ModelAction {\n return defineModel(\n {\n name: whisper1.name,\n ...whisper1.info,\n configSchema: whisper1.configSchema,\n },\n async (request) => {\n const result = await client.audio.transcriptions.create(\n toWhisper1Request(request)\n );\n return toGenerateResponse(result);\n }\n );\n}\n"],"mappings":";;;;;AAgBA,SAAS,eAAe;AACxB;AAAA,EACE;AAAA,EACA;AAAA,EACA;AAAA,OAIK;AAMP,SAAS,SAAS;AAEX,MAAM,uBAAuB,6BAA6B,OAAO;AAAA,EACtE,UAAU,EAAE,OAAO,EAAE,SAAS;AAAA,EAC9B,yBAAyB,EAAE,MAAM,EAAE,KAAK,CAAC,QAAQ,SAAS,CAAC,CAAC,EAAE,SAAS;AAAA,EACvE,iBAAiB,EACd,KAAK,CAAC,QAAQ,QAAQ,OAAO,gBAAgB,KAAK,CAAC,EACnD,SAAS;AACd,CAAC;AAEM,MAAM,WAAW,SAAS;AAAA,EAC/B,MAAM;AAAA,EACN,MAAM;AAAA,IACJ,OAAO;AAAA,IACP,UAAU;AAAA,MACR,OAAO;AAAA,MACP,QAAQ,CAAC,QAAQ,MAAM;AAAA,MACvB,WAAW;AAAA,MACX,YAAY;AAAA,MACZ,OAAO;AAAA,IACT;AAAA,EACF;AAAA,EACA,cAAc;AAChB,CAAC;AAED,SAAS,kBACP,SAC2B;AAzD7B;AA0DE,QAAM,UAAU,IAAI,QAAQ,QAAQ,SAAS,CAAC,CAAC;AAC/C,QAAM,QAAQ,QAAQ,MAAM;AAC5B,MAAI,EAAC,+BAAO,MAAK;AACf,UAAM,IAAI,MAAM,+BAA+B;AAAA,EACjD;AACA,QAAM,cAAc,OAAO;AAAA,IACzB,MAAM,IAAI,MAAM,MAAM,IAAI,QAAQ,GAAG,IAAI,CAAC;AAAA,IAC1C;AAAA,EACF;AACA,QAAM,YAAY,IAAI,KAAK,CAAC,WAAW,GAAG,SAAS;AAAA,IACjD,OACE,WAAM,gBAAN,YACA,MAAM,IAAI,MAAM,QAAQ,QAAQ,MAAM,IAAI,QAAQ,GAAG,CAAC;AAAA,EAC1D,CAAC;AACD,QAAM,UAAqC;AAAA,IACzC,OAAO;AAAA,IACP,MAAM;AAAA,IACN,QAAQ,QAAQ,KAAK;AAAA,IACrB,cAAa,aAAQ,WAAR,mBAAgB;AAAA,IAC7B,WAAU,aAAQ,WAAR,mBAAgB;AAAA,IAC1B,0BAAyB,aAAQ,WAAR,mBAAgB;AAAA,EAC3C;AACA,QAAM,gBAAe,aAAQ,WAAR,mBAAgB;AACrC,QAAM,gBAAe,aAAQ,WAAR,mBAAgB;AACrC,MAAI,gBAAgB,cAAc;AAChC,QACE,iBAAiB,UACjB,iBAAiB,UACjB,iBAAiB,gBACjB;AACA,YAAM,IAAI;AAAA,QACR,0BAA0B,YAAY,yCAAyC,YAAY;AAAA,MAC7F;AAAA,IACF;AAAA,EACF;AACA,MAAI,iBAAiB,SAAS;AAC5B,UAAM,IAAI,MAAM,iBAAiB,YAAY,oBAAoB;AAAA,EACnE;AACA,UAAQ,kBAAkB,gBAAgB,gBAAgB;AAC1D,aAAW,KAAK,SAAS;AACvB,QAAI,QAAQ,CAAC,MAAM,QAAW;AAC5B,aAAO,QAAQ,CAAC;AAAA,IAClB;AAAA,EACF;AACA,SAAO;AACT;AAEA,SAAS,mBACP,QACsB;AACtB,SAAO;AAAA,IACL,YAAY;AAAA,MACV;AAAA,QACE,OAAO;AAAA,QACP,cAAc;AAAA,QACd,SAAS;AAAA,UACP,MAAM;AAAA,UACN,SAAS;AAAA,YACP;AAAA,cACE,MAAM,OAAO,WAAW,WAAW,SAAS,OAAO;AAAA,YACrD;AAAA,UACF;AAAA,QACF;AAAA,MACF;AAAA,IACF;AAAA,EACF;AACF;AAEO,SAAS,cACd,QAC0C;AAC1C,SAAO;AAAA,IACL;AAAA,MACE,MAAM,SAAS;AAAA,OACZ,SAAS,OAFd;AAAA,MAGE,cAAc,SAAS;AAAA,IACzB;AAAA,IACA,CAAO,YAAY;AACjB,YAAM,SAAS,MAAM,OAAO,MAAM,eAAe;AAAA,QAC/C,kBAAkB,OAAO;AAAA,MAC3B;AACA,aAAO,mBAAmB,MAAM;AAAA,IAClC;AAAA,EACF;AACF;","names":[]} \ No newline at end of file