From 4f008573229f63c72e8a8d830ec85328a70cc027 Mon Sep 17 00:00:00 2001 From: Shyam Raghuwanshi Date: Sat, 3 Aug 2024 15:43:16 +0530 Subject: [PATCH] feat/adding-llama-and-gemini-classes (#405) * feat/adding-llama-and-gemini-classes * fixing-gemini --- JS/edgechains/arakoodev/package.json | 2 +- JS/edgechains/arakoodev/src/ai/src/index.ts | 3 + .../arakoodev/src/ai/src/lib/gemini/gemini.ts | 92 +++++++++++++ .../arakoodev/src/ai/src/lib/llama/llama.ts | 103 ++++++++++++++ .../src/lib/openai/openai.ts} | 8 +- .../src/tests/openAiEndpoints.test.ts | 0 .../src/tests/streaming.test.ts | 0 .../src/{openai => ai}/src/types/index.ts | 2 +- .../arakoodev/src/openai/src/index.ts | 2 - .../src/lib/streaming/OpenAiStreaming.ts | 129 ------------------ 10 files changed, 204 insertions(+), 137 deletions(-) create mode 100644 JS/edgechains/arakoodev/src/ai/src/index.ts create mode 100644 JS/edgechains/arakoodev/src/ai/src/lib/gemini/gemini.ts create mode 100644 JS/edgechains/arakoodev/src/ai/src/lib/llama/llama.ts rename JS/edgechains/arakoodev/src/{openai/src/lib/endpoints/OpenAiEndpoint.ts => ai/src/lib/openai/openai.ts} (98%) rename JS/edgechains/arakoodev/src/{openai => ai}/src/tests/openAiEndpoints.test.ts (100%) rename JS/edgechains/arakoodev/src/{openai => ai}/src/tests/streaming.test.ts (100%) rename JS/edgechains/arakoodev/src/{openai => ai}/src/types/index.ts (91%) delete mode 100644 JS/edgechains/arakoodev/src/openai/src/index.ts delete mode 100644 JS/edgechains/arakoodev/src/openai/src/lib/streaming/OpenAiStreaming.ts diff --git a/JS/edgechains/arakoodev/package.json b/JS/edgechains/arakoodev/package.json index 5eafd3fdb..39739a2f7 100644 --- a/JS/edgechains/arakoodev/package.json +++ b/JS/edgechains/arakoodev/package.json @@ -6,7 +6,7 @@ "dist" ], "exports": { - "./openai": "./dist/openai/src/index.js", + "./ai": "./dist/ai/src/index.js", "./vector-db": "./dist/vector-db/src/index.js", "./document-loader": "./dist/document-loader/src/index.js", "./splitter": "./dist/splitter/src/index.js", diff --git a/JS/edgechains/arakoodev/src/ai/src/index.ts b/JS/edgechains/arakoodev/src/ai/src/index.ts new file mode 100644 index 000000000..74f601aaa --- /dev/null +++ b/JS/edgechains/arakoodev/src/ai/src/index.ts @@ -0,0 +1,3 @@ +export { OpenAI } from "./lib/openai/openai.js"; +export { GeminiAI } from "./lib/gemini/gemini.js"; +export { LlamaAI } from "./lib/llama/llama.js"; diff --git a/JS/edgechains/arakoodev/src/ai/src/lib/gemini/gemini.ts b/JS/edgechains/arakoodev/src/ai/src/lib/gemini/gemini.ts new file mode 100644 index 000000000..5d27f8c11 --- /dev/null +++ b/JS/edgechains/arakoodev/src/ai/src/lib/gemini/gemini.ts @@ -0,0 +1,92 @@ +import axios from "axios"; +import { retry } from "@lifeomic/attempt" +const url = "https://generativelanguage.googleapis.com/v1/models/gemini-pro:generateContent"; + +interface GeminiAIConstructionOptions { + apiKey?: string; +} + +type SafetyRating = { + category: "HARM_CATEGORY_SEXUALLY_EXPLICIT" | "HARM_CATEGORY_HATE_SPEECH" | "HARM_CATEGORY_HARASSMENT" | "HARM_CATEGORY_DANGEROUS_CONTENT"; + probability: "NEGLIGIBLE" | "LOW" | "MEDIUM" | "HIGH"; +}; + +type ContentPart = { + text: string; +}; + +type Content = { + parts: ContentPart[]; + role: string; +}; + +type Candidate = { + content: Content; + finishReason: string; + index: number; + safetyRatings: SafetyRating[]; +}; + +type UsageMetadata = { + promptTokenCount: number; + candidatesTokenCount: number; + totalTokenCount: number; +}; + +type Response = { + candidates: Candidate[]; + usageMetadata: UsageMetadata; +}; + + +type responseMimeType = "text/plain" | "application/json" + + +interface GeminiAIChatOptions { + model?: string; + max_output_tokens?: number; + temperature?: number; + prompt: string; + max_retry?: number; + responseType?: responseMimeType; + delay?: number +} + +export class GeminiAI { + apiKey: string; + constructor(options: GeminiAIConstructionOptions) { + this.apiKey = options.apiKey || process.env.GEMINI_API_KEY || ""; + } + + async chat(chatOptions: GeminiAIChatOptions): Promise { + let data = JSON.stringify({ + "contents": [ + { + "role": "user", + "parts": [ + { + "text": chatOptions.prompt + } + ] + } + ] + }); + + let config = { + method: 'post', + maxBodyLength: Infinity, + url, + headers: { + 'Content-Type': 'application/json', + 'x-goog-api-key': this.apiKey + }, + temperature: chatOptions.temperature || "0.7", + responseMimeType: chatOptions.responseType || "text/plain", + "max_output_tokens": chatOptions.max_output_tokens || 1024, + data: data + }; + return await retry(async () => { + return (await axios.request(config)).data; + }, { maxAttempts: chatOptions.max_retry || 3, delay: chatOptions.delay || 200 }); + } +} diff --git a/JS/edgechains/arakoodev/src/ai/src/lib/llama/llama.ts b/JS/edgechains/arakoodev/src/ai/src/lib/llama/llama.ts new file mode 100644 index 000000000..52f5c6f11 --- /dev/null +++ b/JS/edgechains/arakoodev/src/ai/src/lib/llama/llama.ts @@ -0,0 +1,103 @@ + +import axios from "axios"; +import { role } from "../../types"; +import { retry } from "@lifeomic/attempt"; + +const url = 'https://api.llama-api.com/chat/completions' + +interface messageOption { + role: role; + content: string; + name?: string; +} + +interface llamaChatOptions { + model?: string; + role?: role; + max_tokens?: number; + temperature?: number; + prompt?: string; + messages?: messageOption[]; + stream?: boolean + max_retry?: number; + delay?: number +}[] + +export class LlamaAI { + apiKey: string + queue: string[] + constructor({ apiKey }: { apiKey: string }) { + this.apiKey = apiKey; + this.queue = []; + } + + async makeRequest(chatOptions: llamaChatOptions) { + try { + return await retry(async () => { + + return await axios + .post( + url, + { + model: chatOptions.model || "llama-13b-chat", + messages: chatOptions.prompt + ? [ + { + role: chatOptions.role || "user", + content: chatOptions.prompt, + }, + ] + : chatOptions.messages, + max_tokens: chatOptions.max_tokens || 1024, + stream: chatOptions.stream || false, + temperature: chatOptions.temperature || 0.7, + }, + { + headers: { Authorization: "Bearer " + this.apiKey }, + } + ) + }, { maxAttempts: chatOptions.max_retry || 3, delay: chatOptions.delay || 200 }); + } catch (error: any) { + console.log(error) + throw new Error(`Error while making request: ${error.message}`); + } + } + + async _runStreamForJupyter(apiRequestJson) { + const response = await this.makeRequest(apiRequestJson); + + for (const chunk of response.data) { + this.queue.push(chunk); + } + } + + async *getSequences() { + while (this.queue.length > 0) { + yield this.queue.shift(); + await new Promise(resolve => setTimeout(resolve, 100)); + } + } + + async runStream(apiRequestJson) { + await this._runStreamForJupyter(apiRequestJson); + this.getSequences(); + } + + async runSync(apiRequestJson) { + const response = await this.makeRequest(apiRequestJson); + + if (response.status !== 200) { + throw new Error(`POST ${response.status} ${response.data.detail}`); + } + + return response.data; + } + + chat(chatOptions: llamaChatOptions) { + if (chatOptions.stream) { + return this.runStream(chatOptions); + } else { + return this.runSync(chatOptions); + } + } +} diff --git a/JS/edgechains/arakoodev/src/openai/src/lib/endpoints/OpenAiEndpoint.ts b/JS/edgechains/arakoodev/src/ai/src/lib/openai/openai.ts similarity index 98% rename from JS/edgechains/arakoodev/src/openai/src/lib/endpoints/OpenAiEndpoint.ts rename to JS/edgechains/arakoodev/src/ai/src/lib/openai/openai.ts index f0590124f..91bc1af61 100644 --- a/JS/edgechains/arakoodev/src/openai/src/lib/endpoints/OpenAiEndpoint.ts +++ b/JS/edgechains/arakoodev/src/ai/src/lib/openai/openai.ts @@ -63,7 +63,7 @@ export class OpenAI { } async chat(chatOptions: OpenAIChatOptions): Promise { - const responce = await axios + const response = await axios .post( openAI_url, { @@ -99,13 +99,13 @@ export class OpenAI { console.log("Error creating request:", error.message); } }); - return responce[0].message; + return response[0].message; } async chatWithFunction( chatOptions: chatWithFunctionOptions ): Promise { - const responce = await axios + const response = await axios .post( openAI_url, { @@ -143,7 +143,7 @@ export class OpenAI { console.log("Error creating request:", error.message); } }); - return responce[0].message; + return response[0].message; } async generateEmbeddings(resp): Promise { diff --git a/JS/edgechains/arakoodev/src/openai/src/tests/openAiEndpoints.test.ts b/JS/edgechains/arakoodev/src/ai/src/tests/openAiEndpoints.test.ts similarity index 100% rename from JS/edgechains/arakoodev/src/openai/src/tests/openAiEndpoints.test.ts rename to JS/edgechains/arakoodev/src/ai/src/tests/openAiEndpoints.test.ts diff --git a/JS/edgechains/arakoodev/src/openai/src/tests/streaming.test.ts b/JS/edgechains/arakoodev/src/ai/src/tests/streaming.test.ts similarity index 100% rename from JS/edgechains/arakoodev/src/openai/src/tests/streaming.test.ts rename to JS/edgechains/arakoodev/src/ai/src/tests/streaming.test.ts diff --git a/JS/edgechains/arakoodev/src/openai/src/types/index.ts b/JS/edgechains/arakoodev/src/ai/src/types/index.ts similarity index 91% rename from JS/edgechains/arakoodev/src/openai/src/types/index.ts rename to JS/edgechains/arakoodev/src/ai/src/types/index.ts index 33485f21a..27a1a8ed1 100644 --- a/JS/edgechains/arakoodev/src/openai/src/types/index.ts +++ b/JS/edgechains/arakoodev/src/ai/src/types/index.ts @@ -21,4 +21,4 @@ export type ChatModel = | "gpt-3.5-turbo-0125" | "gpt-3.5-turbo-16k-0613"; -export type role = "user" | "assistant" | "system"; +export type role = "user" | "assistant" | "system"; \ No newline at end of file diff --git a/JS/edgechains/arakoodev/src/openai/src/index.ts b/JS/edgechains/arakoodev/src/openai/src/index.ts deleted file mode 100644 index f4afbfb01..000000000 --- a/JS/edgechains/arakoodev/src/openai/src/index.ts +++ /dev/null @@ -1,2 +0,0 @@ -export { OpenAI } from "./lib/endpoints/OpenAiEndpoint.js"; -export { Stream } from "./lib/streaming/OpenAiStreaming.js"; diff --git a/JS/edgechains/arakoodev/src/openai/src/lib/streaming/OpenAiStreaming.ts b/JS/edgechains/arakoodev/src/openai/src/lib/streaming/OpenAiStreaming.ts deleted file mode 100644 index 5fc508b1a..000000000 --- a/JS/edgechains/arakoodev/src/openai/src/lib/streaming/OpenAiStreaming.ts +++ /dev/null @@ -1,129 +0,0 @@ -//@ts-ignore -import { createParser, ParsedEvent, ReconnectInterval } from "eventsource-parser"; - -export interface OpenAIStreamPayload { - model?: string; - OpenApiKey?: string; - temperature?: number; - top_p?: number; - frequency_penalty?: number; - presence_penalty?: number; - max_tokens?: number; - stream?: boolean; - n?: number; -} - -export class Stream { - model?: string; - OpenApiKey?: string; - temperature?: number; - top_p?: number; - frequency_penalty?: number; - presence_penalty?: number; - max_tokens?: number; - stream?: boolean; - n?: number; - constructor(options: OpenAIStreamPayload = {}) { - this.model = options.model || "gpt-3.5-turbo"; - this.OpenApiKey = options.OpenApiKey || process.env.OPENAI_API_KEY || ""; - this.temperature = options.temperature || 0.7; - this.top_p = options.top_p || 1; - this.frequency_penalty = options.frequency_penalty || 0; - this.presence_penalty = options.presence_penalty || 0; - this.max_tokens = options.max_tokens || 500; - this.stream = options.stream || true; - this.n = options.n || 1; - } - - public encoder = new TextEncoder(); - public decoder = new TextDecoder(); - async OpenAIStream(prompt: string): Promise { - try { - const res = await fetch("https://api.openai.com/v1/chat/completions", { - method: "POST", - headers: { - Authorization: `Bearer ${this.OpenApiKey}`, - "content-type": "application/json", - }, - body: JSON.stringify({ - model: this.model, - messages: [{ role: "user", content: prompt }], - stream: this.stream, - temperature: this.temperature, - top_p: this.top_p, - n: this.n, - presence_penalty: this.presence_penalty, - frequency_penalty: this.frequency_penalty, - max_tokens: this.max_tokens, - }), - }); - const readableStream = new ReadableStream({ - start: async (controller) => { - // callback - const onParse = (event: ParsedEvent | ReconnectInterval) => { - if (event.type === "event") { - const data = event.data; - controller.enqueue(this.encoder.encode(data)); - } - }; - - // optimistic error handling - if (res.status !== 200) { - const data = { - status: res.status, - statusText: res.statusText, - body: await res.text(), - }; - console.log(`Error: recieved non-200 status code, ${JSON.stringify(data)}`); - controller.close(); - return; - } - - // stream response (SSE) from OpenAI may be fragmented into multiple chunks - // this ensures we properly read chunks and invoke an event for each SSE event stream - const parser = createParser(onParse); - // https://web.dev/streams/#asynchronous-iteration - for await (const chunk of res.body as any) { - parser.feed(this.decoder.decode(chunk)); - } - }, - }); - - let counter = 0; - const transformStream = new TransformStream({ - transform: async (chunk, controller) => { - const data = this.decoder.decode(chunk); - // https://beta.openai.com/docs/api-reference/completions/create#completions/create-stream - if (data === "[DONE]") { - controller.terminate(); - return; - } - try { - const json = JSON.parse(data); - const text = json.choices[0].delta?.content || ""; - if (counter < 2 && (text.match(/\n/) || []).length) { - // this is a prefix character (i.e., "\n\n"), do nothing - return; - } - // stream transformed JSON resposne as SSE - const payload = { text: text }; - // https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#event_stream_format - // controller.enqueue( - // this.encoder.encode(`data: ${JSON.stringify(payload)}\n\n`) - // ); - controller.enqueue(payload.text); - counter++; - } catch (e) { - // maybe parse error - console.log(e); - controller.error(e); - } - }, - }); - - return readableStream.pipeThrough(transformStream).getReader(); - } catch (error) { - console.log(error); - } - } -}