diff --git a/JS/edgechains/arakoodev/src/ai/src/lib/gemini/gemini.ts b/JS/edgechains/arakoodev/src/ai/src/lib/gemini/gemini.ts index 5d27f8c1..13f1bfcd 100644 --- a/JS/edgechains/arakoodev/src/ai/src/lib/gemini/gemini.ts +++ b/JS/edgechains/arakoodev/src/ai/src/lib/gemini/gemini.ts @@ -1,5 +1,5 @@ import axios from "axios"; -import { retry } from "@lifeomic/attempt" +import { retry } from "@lifeomic/attempt"; const url = "https://generativelanguage.googleapis.com/v1/models/gemini-pro:generateContent"; interface GeminiAIConstructionOptions { @@ -7,7 +7,11 @@ interface GeminiAIConstructionOptions { } type SafetyRating = { - category: "HARM_CATEGORY_SEXUALLY_EXPLICIT" | "HARM_CATEGORY_HATE_SPEECH" | "HARM_CATEGORY_HARASSMENT" | "HARM_CATEGORY_DANGEROUS_CONTENT"; + category: + | "HARM_CATEGORY_SEXUALLY_EXPLICIT" + | "HARM_CATEGORY_HATE_SPEECH" + | "HARM_CATEGORY_HARASSMENT" + | "HARM_CATEGORY_DANGEROUS_CONTENT"; probability: "NEGLIGIBLE" | "LOW" | "MEDIUM" | "HIGH"; }; @@ -38,9 +42,7 @@ type Response = { usageMetadata: UsageMetadata; }; - -type responseMimeType = "text/plain" | "application/json" - +type responseMimeType = "text/plain" | "application/json"; interface GeminiAIChatOptions { model?: string; @@ -49,7 +51,7 @@ interface GeminiAIChatOptions { prompt: string; max_retry?: number; responseType?: responseMimeType; - delay?: number + delay?: number; } export class GeminiAI { @@ -60,33 +62,36 @@ export class GeminiAI { async chat(chatOptions: GeminiAIChatOptions): Promise { let data = JSON.stringify({ - "contents": [ + contents: [ { - "role": "user", - "parts": [ + role: "user", + parts: [ { - "text": chatOptions.prompt - } - ] - } - ] + text: chatOptions.prompt, + }, + ], + }, + ], }); let config = { - method: 'post', + method: "post", maxBodyLength: Infinity, url, headers: { - 'Content-Type': 'application/json', - 'x-goog-api-key': this.apiKey + "Content-Type": "application/json", + "x-goog-api-key": this.apiKey, }, temperature: chatOptions.temperature || "0.7", responseMimeType: chatOptions.responseType || "text/plain", - "max_output_tokens": chatOptions.max_output_tokens || 1024, - data: data + max_output_tokens: chatOptions.max_output_tokens || 1024, + data: data, }; - return await retry(async () => { - return (await axios.request(config)).data; - }, { maxAttempts: chatOptions.max_retry || 3, delay: chatOptions.delay || 200 }); + return await retry( + async () => { + return (await axios.request(config)).data; + }, + { maxAttempts: chatOptions.max_retry || 3, delay: chatOptions.delay || 200 } + ); } } diff --git a/JS/edgechains/arakoodev/src/ai/src/lib/llama/llama.ts b/JS/edgechains/arakoodev/src/ai/src/lib/llama/llama.ts index 52f5c6f1..a577fcd3 100644 --- a/JS/edgechains/arakoodev/src/ai/src/lib/llama/llama.ts +++ b/JS/edgechains/arakoodev/src/ai/src/lib/llama/llama.ts @@ -1,9 +1,8 @@ - import axios from "axios"; import { role } from "../../types"; import { retry } from "@lifeomic/attempt"; -const url = 'https://api.llama-api.com/chat/completions' +const url = "https://api.llama-api.com/chat/completions"; interface messageOption { role: role; @@ -18,14 +17,15 @@ interface llamaChatOptions { temperature?: number; prompt?: string; messages?: messageOption[]; - stream?: boolean + stream?: boolean; max_retry?: number; - delay?: number -}[] + delay?: number; +} +[]; export class LlamaAI { - apiKey: string - queue: string[] + apiKey: string; + queue: string[]; constructor({ apiKey }: { apiKey: string }) { this.apiKey = apiKey; this.queue = []; @@ -33,20 +33,19 @@ export class LlamaAI { async makeRequest(chatOptions: llamaChatOptions) { try { - return await retry(async () => { - - return await axios - .post( + return await retry( + async () => { + return await axios.post( url, { model: chatOptions.model || "llama-13b-chat", messages: chatOptions.prompt ? [ - { - role: chatOptions.role || "user", - content: chatOptions.prompt, - }, - ] + { + role: chatOptions.role || "user", + content: chatOptions.prompt, + }, + ] : chatOptions.messages, max_tokens: chatOptions.max_tokens || 1024, stream: chatOptions.stream || false, @@ -55,10 +54,12 @@ export class LlamaAI { { headers: { Authorization: "Bearer " + this.apiKey }, } - ) - }, { maxAttempts: chatOptions.max_retry || 3, delay: chatOptions.delay || 200 }); + ); + }, + { maxAttempts: chatOptions.max_retry || 3, delay: chatOptions.delay || 200 } + ); } catch (error: any) { - console.log(error) + console.log(error); throw new Error(`Error while making request: ${error.message}`); } } @@ -74,7 +75,7 @@ export class LlamaAI { async *getSequences() { while (this.queue.length > 0) { yield this.queue.shift(); - await new Promise(resolve => setTimeout(resolve, 100)); + await new Promise((resolve) => setTimeout(resolve, 100)); } } diff --git a/JS/edgechains/arakoodev/src/ai/src/types/index.ts b/JS/edgechains/arakoodev/src/ai/src/types/index.ts index 27a1a8ed..33485f21 100644 --- a/JS/edgechains/arakoodev/src/ai/src/types/index.ts +++ b/JS/edgechains/arakoodev/src/ai/src/types/index.ts @@ -21,4 +21,4 @@ export type ChatModel = | "gpt-3.5-turbo-0125" | "gpt-3.5-turbo-16k-0613"; -export type role = "user" | "assistant" | "system"; \ No newline at end of file +export type role = "user" | "assistant" | "system"; diff --git a/package.json b/package.json index 0c465121..d59631d6 100644 --- a/package.json +++ b/package.json @@ -1,8 +1,8 @@ { "dependencies": { "@microsoft/eslint-formatter-sarif": "^3.1.0", - "@typescript-eslint/eslint-plugin": "^8.0.0", - "@typescript-eslint/parser": "^8.0.0", + "@typescript-eslint/eslint-plugin": "^8.1.0", + "@typescript-eslint/parser": "^8.1.0", "eslint": "^8.57.0", "eslint-config-google": "^0.14.0", "eslint-config-prettier": "^9.1.0",