diff --git a/README.md b/README.md index 3520233a7d..6f2fbcbd03 100644 --- a/README.md +++ b/README.md @@ -1,4 +1,24 @@ # setup +export AWS_REGION=us-west-2 + +you need to requst access to the models in aws. not all the models are available everywhere, using us-west-2 + +https://us-west-2.console.aws.amazon.com/bedrock/home?region=us-west-2#/modelaccess + +list model +`aws bedrock list-foundation-models` + +install aws cli new ``` + curl "https://awscli.amazonaws.com/awscli-exe-linux-x86_64.zip" -o "awscliv2.zip" + unzip awscliv2.zip + sudo ./aws/install --bin-dir /usr/local/bin --install-dir /usr/local/aws-cli --update +``` + +``` +eval $(/usr/local/aws-cli/v2/current/bin/aws configure export-credentials --profile default --format env) +``` + + we dont use terraform for the ecr or the secrets because they will change so often here are the simple aws scripts for them. `aws ecr create-repository --repository-name agent/eliza` diff --git a/agent/src/index.ts b/agent/src/index.ts index 6b7eba11d7..d1f1bf6add 100644 --- a/agent/src/index.ts +++ b/agent/src/index.ts @@ -242,6 +242,8 @@ export function getTokenForProvider( // no key needed for llama_local or gaianet case ModelProviderName.LLAMALOCAL: return ""; + case ModelProviderName.BEDROCK: + return ""; case ModelProviderName.OLLAMA: return ""; case ModelProviderName.GAIANET: diff --git a/characters/eliza.character.json b/characters/eliza.character.json index 8a3498a02f..2b2175cb9e 100644 --- a/characters/eliza.character.json +++ b/characters/eliza.character.json @@ -2,13 +2,15 @@ "name": "TINE-IntrospectorIsNotEliza", "plugins": [ ], "clients": [], - "modelProvider": "ollama", - "settings": { - "secrets": {}, - "voice": { - "model": "en_US-hfc_female-medium" - } - }, + "modelProvider": "bedrock", + "settings": { + "model": "anthropic.claude-3-5-sonnet-20241022-v2:0", + "aws-region": "us-west-2", + "secrets": {}, + "voice": { + "model": "en_US-hfc_female-medium" + } + }, "system": "Help improve yourself. TINE-IntrospectorIsNotEliza", "bio": [ "Was born at a very young age." diff --git a/package.json b/package.json index 09f4c8c121..df2d7a996b 100644 --- a/package.json +++ b/package.json @@ -6,9 +6,9 @@ "build-docker": "turbo run build", "cleanstart": "if [ -f agent/data/db.sqlite ]; then rm agent/data/db.sqlite; fi && pnpm --filter \"@elizaos/agent\" start --isRoot", "cleanstart:debug": "if [ -f agent/data/db.sqlite ]; then rm agent/data/db.sqlite; fi && cross-env NODE_ENV=development VERBOSE=true DEBUG=eliza:* pnpm --filter \"@elizaos/agent\" start --isRoot", - "start": "pnpm --filter \"@elizaos/agent\" start --isRoot", + "start": "pnpm --filter \"@elizaos/agent\" start --isRoot --characters=characters/eliza.character.json", "start:client": "pnpm --dir client dev", - "start:debug": "cross-env NODE_ENV=development VERBOSE=true DEBUG=eliza:* pnpm --filter \"@elizaos/agent\" start --isRoot", + "start:debug": "cross-env NODE_ENV=development VERBOSE=true DEBUG=eliza:* pnpm --filter \"@elizaos/agent\" start --isRoot --characters=characters/eliza.character.json", "dev": "bash ./scripts/dev.sh", "lint": "bash ./scripts/lint.sh", "prettier-check": "npx prettier --check --cache .", diff --git a/packages/core/generation.ts b/packages/core/generation.ts deleted file mode 100644 index 3937b09fa4..0000000000 --- a/packages/core/generation.ts +++ /dev/null @@ -1,1973 +0,0 @@ -import { createAnthropic } from "@ai-sdk/anthropic"; -import { createGoogleGenerativeAI } from "@ai-sdk/google"; -import { createGroq } from "@ai-sdk/groq"; -import { createOpenAI } from "@ai-sdk/openai"; -import { RecursiveCharacterTextSplitter } from "langchain/text_splitter"; -import { - generateObject as aiGenerateObject, - generateText as aiGenerateText, - CoreTool, - GenerateObjectResult, - StepResult as AIStepResult, -} from "ai"; -import { Buffer } from "buffer"; -import { createOllama } from "ollama-ai-provider"; -import OpenAI from "openai"; -import { encodingForModel, TiktokenModel } from "js-tiktoken"; -import { AutoTokenizer } from "@huggingface/transformers"; -import Together from "together-ai"; -import { ZodSchema } from "zod"; -import { elizaLogger } from "./index.ts"; -import { getModel, models } from "./models.ts"; -import { - parseBooleanFromText, - parseJsonArrayFromText, - parseJSONObjectFromText, - parseShouldRespondFromText, - parseActionResponseFromText, -} from "./parsing.ts"; -import settings from "./settings.ts"; -import { - Content, - IAgentRuntime, - IImageDescriptionService, - ITextGenerationService, - ModelClass, - ModelProviderName, - ServiceType, - SearchResponse, - ActionResponse, - TelemetrySettings, - TokenizerType, -} from "./types.ts"; -import { fal } from "@fal-ai/client"; -import { tavily } from "@tavily/core"; - -type Tool = CoreTool; -type StepResult = AIStepResult; - -/** - * Trims the provided text context to a specified token limit using a tokenizer model and type. - * - * The function dynamically determines the truncation method based on the tokenizer settings - * provided by the runtime. If no tokenizer settings are defined, it defaults to using the - * TikToken truncation method with the "gpt-4o" model. - * - * @async - * @function trimTokens - * @param {string} context - The text to be tokenized and trimmed. - * @param {number} maxTokens - The maximum number of tokens allowed after truncation. - * @param {IAgentRuntime} runtime - The runtime interface providing tokenizer settings. - * - * @returns {Promise} A promise that resolves to the trimmed text. - * - * @throws {Error} Throws an error if the runtime settings are invalid or missing required fields. - * - * @example - * const trimmedText = await trimTokens("This is an example text", 50, runtime); - * console.log(trimmedText); // Output will be a truncated version of the input text. - */ -export async function trimTokens( - context: string, - maxTokens: number, - runtime: IAgentRuntime -) { - if (!context) return ""; - if (maxTokens <= 0) throw new Error("maxTokens must be positive"); - - const tokenizerModel = runtime.getSetting("TOKENIZER_MODEL"); - const tokenizerType = runtime.getSetting("TOKENIZER_TYPE"); - - if (!tokenizerModel || !tokenizerType) { - // Default to TikToken truncation using the "gpt-4o" model if tokenizer settings are not defined - return truncateTiktoken("gpt-4o", context, maxTokens); - } - - // Choose the truncation method based on tokenizer type - if (tokenizerType === TokenizerType.Auto) { - return truncateAuto(tokenizerModel, context, maxTokens); - } - - if (tokenizerType === TokenizerType.TikToken) { - return truncateTiktoken( - tokenizerModel as TiktokenModel, - context, - maxTokens - ); - } - - elizaLogger.warn(`Unsupported tokenizer type: ${tokenizerType}`); - return truncateTiktoken("gpt-4o", context, maxTokens); -} - -async function truncateAuto( - modelPath: string, - context: string, - maxTokens: number -) { - try { - const tokenizer = await AutoTokenizer.from_pretrained(modelPath); - const tokens = tokenizer.encode(context); - - // If already within limits, return unchanged - if (tokens.length <= maxTokens) { - return context; - } - - // Keep the most recent tokens by slicing from the end - const truncatedTokens = tokens.slice(-maxTokens); - - // Decode back to text - js-tiktoken decode() returns a string directly - return tokenizer.decode(truncatedTokens); - } catch (error) { - elizaLogger.error("Error in trimTokens:", error); - // Return truncated string if tokenization fails - return context.slice(-maxTokens * 4); // Rough estimate of 4 chars per token - } -} - -async function truncateTiktoken( - model: TiktokenModel, - context: string, - maxTokens: number -) { - try { - const encoding = encodingForModel(model); - - // Encode the text into tokens - const tokens = encoding.encode(context); - - // If already within limits, return unchanged - if (tokens.length <= maxTokens) { - return context; - } - - // Keep the most recent tokens by slicing from the end - const truncatedTokens = tokens.slice(-maxTokens); - - // Decode back to text - js-tiktoken decode() returns a string directly - return encoding.decode(truncatedTokens); - } catch (error) { - elizaLogger.error("Error in trimTokens:", error); - // Return truncated string if tokenization fails - return context.slice(-maxTokens * 4); // Rough estimate of 4 chars per token - } -} - -/** - * Send a message to the model for a text generateText - receive a string back and parse how you'd like - * @param opts - The options for the generateText request. - * @param opts.context The context of the message to be completed. - * @param opts.stop A list of strings to stop the generateText at. - * @param opts.model The model to use for generateText. - * @param opts.frequency_penalty The frequency penalty to apply to the generateText. - * @param opts.presence_penalty The presence penalty to apply to the generateText. - * @param opts.temperature The temperature to apply to the generateText. - * @param opts.max_context_length The maximum length of the context to apply to the generateText. - * @returns The completed message. - */ - -export async function generateText({ - runtime, - context, - modelClass, - tools = {}, - onStepFinish, - maxSteps = 1, - stop, - customSystemPrompt, -}: { - runtime: IAgentRuntime; - context: string; - modelClass: string; - tools?: Record; - onStepFinish?: (event: StepResult) => Promise | void; - maxSteps?: number; - stop?: string[]; - customSystemPrompt?: string; -}): Promise { - if (!context) { - console.error("generateText context is empty"); - return ""; - } - - elizaLogger.log("Generating text..."); - - elizaLogger.info("Generating text with options:", { - modelProvider: runtime.modelProvider, - model: modelClass, - }); - - const provider = runtime.modelProvider; - const endpoint = - runtime.character.modelEndpointOverride || models[provider].endpoint; - let model = models[provider].model[modelClass]; - - // allow character.json settings => secrets to override models - // FIXME: add MODEL_MEDIUM support - switch (provider) { - // if runtime.getSetting("LLAMACLOUD_MODEL_LARGE") is true and modelProvider is LLAMACLOUD, then use the large model - case ModelProviderName.LLAMACLOUD: - { - switch (modelClass) { - case ModelClass.LARGE: - { - model = - runtime.getSetting("LLAMACLOUD_MODEL_LARGE") || - model; - } - break; - case ModelClass.SMALL: - { - model = - runtime.getSetting("LLAMACLOUD_MODEL_SMALL") || - model; - } - break; - } - } - break; - case ModelProviderName.TOGETHER: - { - switch (modelClass) { - case ModelClass.LARGE: - { - model = - runtime.getSetting("TOGETHER_MODEL_LARGE") || - model; - } - break; - case ModelClass.SMALL: - { - model = - runtime.getSetting("TOGETHER_MODEL_SMALL") || - model; - } - break; - } - } - break; - case ModelProviderName.OPENROUTER: - { - switch (modelClass) { - case ModelClass.LARGE: - { - model = - runtime.getSetting("LARGE_OPENROUTER_MODEL") || - model; - } - break; - case ModelClass.SMALL: - { - model = - runtime.getSetting("SMALL_OPENROUTER_MODEL") || - model; - } - break; - } - } - break; - } - - elizaLogger.info("Selected model:", model); - - const modelConfiguration = runtime.character?.settings?.modelConfig; - const temperature = - modelConfiguration?.temperature || - models[provider].settings.temperature; - const frequency_penalty = - modelConfiguration?.frequency_penalty || - models[provider].settings.frequency_penalty; - const presence_penalty = - modelConfiguration?.presence_penalty || - models[provider].settings.presence_penalty; - const max_context_length = - modelConfiguration?.maxInputTokens || - models[provider].settings.maxInputTokens; - const max_response_length = - modelConfiguration?.max_response_length || - models[provider].settings.maxOutputTokens; - const experimental_telemetry = - modelConfiguration?.experimental_telemetry || - models[provider].settings.experimental_telemetry; - - const apiKey = runtime.token; - - try { - elizaLogger.debug( - `Trimming context to max length of ${max_context_length} tokens.` - ); - - context = await trimTokens(context, max_context_length, runtime); - - let response: string; - - const _stop = stop || models[provider].settings.stop; - elizaLogger.debug( - `Using provider: ${provider}, model: ${model}, temperature: ${temperature}, max response length: ${max_response_length}` - ); - - switch (provider) { - // OPENAI & LLAMACLOUD shared same structure. - case ModelProviderName.OPENAI: - case ModelProviderName.ALI_BAILIAN: - case ModelProviderName.VOLENGINE: - case ModelProviderName.LLAMACLOUD: - case ModelProviderName.NANOGPT: - case ModelProviderName.HYPERBOLIC: - case ModelProviderName.TOGETHER: - case ModelProviderName.AKASH_CHAT_API: { - elizaLogger.debug("Initializing OpenAI model."); - const openai = createOpenAI({ - apiKey, - baseURL: endpoint, - fetch: runtime.fetch, - }); - - const { text: openaiResponse } = await aiGenerateText({ - model: openai.languageModel(model), - prompt: context, - system: - runtime.character.system ?? - settings.SYSTEM_PROMPT ?? - undefined, - tools: tools, - onStepFinish: onStepFinish, - maxSteps: maxSteps, - temperature: temperature, - maxTokens: max_response_length, - frequencyPenalty: frequency_penalty, - presencePenalty: presence_penalty, - experimental_telemetry: experimental_telemetry, - }); - - response = openaiResponse; - elizaLogger.debug("Received response from OpenAI model."); - break; - } - - case ModelProviderName.ETERNALAI: { - elizaLogger.debug("Initializing EternalAI model."); - const openai = createOpenAI({ - apiKey, - baseURL: endpoint, - fetch: async (url: string, options: any) => { - const fetching = await runtime.fetch(url, options); - if ( - parseBooleanFromText( - runtime.getSetting("ETERNALAI_LOG") - ) - ) { - elizaLogger.info( - "Request data: ", - JSON.stringify(options, null, 2) - ); - const clonedResponse = fetching.clone(); - clonedResponse.json().then((data) => { - elizaLogger.info( - "Response data: ", - JSON.stringify(data, null, 2) - ); - }); - } - return fetching; - }, - }); - - const { text: openaiResponse } = await aiGenerateText({ - model: openai.languageModel(model), - prompt: context, - system: - runtime.character.system ?? - settings.SYSTEM_PROMPT ?? - undefined, - temperature: temperature, - maxTokens: max_response_length, - frequencyPenalty: frequency_penalty, - presencePenalty: presence_penalty, - }); - - response = openaiResponse; - elizaLogger.debug("Received response from EternalAI model."); - break; - } - - case ModelProviderName.GOOGLE: { - const google = createGoogleGenerativeAI({ - apiKey, - fetch: runtime.fetch, - }); - - const { text: googleResponse } = await aiGenerateText({ - model: google(model), - prompt: context, - system: - runtime.character.system ?? - settings.SYSTEM_PROMPT ?? - undefined, - tools: tools, - onStepFinish: onStepFinish, - maxSteps: maxSteps, - temperature: temperature, - maxTokens: max_response_length, - frequencyPenalty: frequency_penalty, - presencePenalty: presence_penalty, - experimental_telemetry: experimental_telemetry, - }); - - response = googleResponse; - elizaLogger.debug("Received response from Google model."); - break; - } - - case ModelProviderName.ANTHROPIC: { - elizaLogger.debug("Initializing Anthropic model."); - - const anthropic = createAnthropic({ - apiKey, - fetch: runtime.fetch, - }); - - const { text: anthropicResponse } = await aiGenerateText({ - model: anthropic.languageModel(model), - prompt: context, - system: - runtime.character.system ?? - settings.SYSTEM_PROMPT ?? - undefined, - tools: tools, - onStepFinish: onStepFinish, - maxSteps: maxSteps, - temperature: temperature, - maxTokens: max_response_length, - frequencyPenalty: frequency_penalty, - presencePenalty: presence_penalty, - experimental_telemetry: experimental_telemetry, - }); - - response = anthropicResponse; - elizaLogger.debug("Received response from Anthropic model."); - break; - } - - case ModelProviderName.CLAUDE_VERTEX: { - elizaLogger.debug("Initializing Claude Vertex model."); - - const anthropic = createAnthropic({ - apiKey, - fetch: runtime.fetch, - }); - - const { text: anthropicResponse } = await aiGenerateText({ - model: anthropic.languageModel(model), - prompt: context, - system: - runtime.character.system ?? - settings.SYSTEM_PROMPT ?? - undefined, - tools: tools, - onStepFinish: onStepFinish, - maxSteps: maxSteps, - temperature: temperature, - maxTokens: max_response_length, - frequencyPenalty: frequency_penalty, - presencePenalty: presence_penalty, - experimental_telemetry: experimental_telemetry, - }); - - response = anthropicResponse; - elizaLogger.debug( - "Received response from Claude Vertex model." - ); - break; - } - - case ModelProviderName.GROK: { - elizaLogger.debug("Initializing Grok model."); - const grok = createOpenAI({ - apiKey, - baseURL: endpoint, - fetch: runtime.fetch, - }); - - const { text: grokResponse } = await aiGenerateText({ - model: grok.languageModel(model, { - parallelToolCalls: false, - }), - prompt: context, - system: - runtime.character.system ?? - settings.SYSTEM_PROMPT ?? - undefined, - tools: tools, - onStepFinish: onStepFinish, - maxSteps: maxSteps, - temperature: temperature, - maxTokens: max_response_length, - frequencyPenalty: frequency_penalty, - presencePenalty: presence_penalty, - experimental_telemetry: experimental_telemetry, - }); - - response = grokResponse; - elizaLogger.debug("Received response from Grok model."); - break; - } - - case ModelProviderName.GROQ: { - const groq = createGroq({ apiKey, fetch: runtime.fetch }); - - const { text: groqResponse } = await aiGenerateText({ - model: groq.languageModel(model), - prompt: context, - temperature: temperature, - system: - runtime.character.system ?? - settings.SYSTEM_PROMPT ?? - undefined, - tools: tools, - onStepFinish: onStepFinish, - maxSteps: maxSteps, - maxTokens: max_response_length, - frequencyPenalty: frequency_penalty, - presencePenalty: presence_penalty, - experimental_telemetry: experimental_telemetry, - }); - - response = groqResponse; - break; - } - - case ModelProviderName.LLAMALOCAL: { - elizaLogger.debug( - "Using local Llama model for text completion." - ); - const textGenerationService = - runtime.getService( - ServiceType.TEXT_GENERATION - ); - - if (!textGenerationService) { - throw new Error("Text generation service not found"); - } - - response = await textGenerationService.queueTextCompletion( - context, - temperature, - _stop, - frequency_penalty, - presence_penalty, - max_response_length - ); - elizaLogger.debug("Received response from local Llama model."); - break; - } - - case ModelProviderName.REDPILL: { - elizaLogger.debug("Initializing RedPill model."); - const serverUrl = models[provider].endpoint; - const openai = createOpenAI({ - apiKey, - baseURL: serverUrl, - fetch: runtime.fetch, - }); - - const { text: redpillResponse } = await aiGenerateText({ - model: openai.languageModel(model), - prompt: context, - temperature: temperature, - system: - runtime.character.system ?? - settings.SYSTEM_PROMPT ?? - undefined, - tools: tools, - onStepFinish: onStepFinish, - maxSteps: maxSteps, - maxTokens: max_response_length, - frequencyPenalty: frequency_penalty, - presencePenalty: presence_penalty, - experimental_telemetry: experimental_telemetry, - }); - - response = redpillResponse; - elizaLogger.debug("Received response from redpill model."); - break; - } - - case ModelProviderName.OPENROUTER: { - elizaLogger.debug("Initializing OpenRouter model."); - const serverUrl = models[provider].endpoint; - const openrouter = createOpenAI({ - apiKey, - baseURL: serverUrl, - fetch: runtime.fetch, - }); - - const { text: openrouterResponse } = await aiGenerateText({ - model: openrouter.languageModel(model), - prompt: context, - temperature: temperature, - system: - runtime.character.system ?? - settings.SYSTEM_PROMPT ?? - undefined, - tools: tools, - onStepFinish: onStepFinish, - maxSteps: maxSteps, - maxTokens: max_response_length, - frequencyPenalty: frequency_penalty, - presencePenalty: presence_penalty, - experimental_telemetry: experimental_telemetry, - }); - - response = openrouterResponse; - elizaLogger.debug("Received response from OpenRouter model."); - break; - } - - case ModelProviderName.OLLAMA: - { - elizaLogger.debug("Initializing Ollama model."); - - const ollamaProvider = createOllama({ - baseURL: models[provider].endpoint + "/api", - fetch: runtime.fetch, - }); - const ollama = ollamaProvider(model); - - elizaLogger.debug("****** MODEL\n", model); - - const { text: ollamaResponse } = await aiGenerateText({ - model: ollama, - prompt: context, - tools: tools, - onStepFinish: onStepFinish, - temperature: temperature, - maxSteps: maxSteps, - maxTokens: max_response_length, - frequencyPenalty: frequency_penalty, - presencePenalty: presence_penalty, - experimental_telemetry: experimental_telemetry, - }); - - response = ollamaResponse; - } - elizaLogger.debug("Received response from Ollama model."); - break; - - case ModelProviderName.HEURIST: { - elizaLogger.debug("Initializing Heurist model."); - const heurist = createOpenAI({ - apiKey: apiKey, - baseURL: endpoint, - fetch: runtime.fetch, - }); - - const { text: heuristResponse } = await aiGenerateText({ - model: heurist.languageModel(model), - prompt: context, - system: - customSystemPrompt ?? - runtime.character.system ?? - settings.SYSTEM_PROMPT ?? - undefined, - tools: tools, - onStepFinish: onStepFinish, - temperature: temperature, - maxTokens: max_response_length, - maxSteps: maxSteps, - frequencyPenalty: frequency_penalty, - presencePenalty: presence_penalty, - experimental_telemetry: experimental_telemetry, - }); - - response = heuristResponse; - elizaLogger.debug("Received response from Heurist model."); - break; - } - case ModelProviderName.GAIANET: { - elizaLogger.debug("Initializing GAIANET model."); - - var baseURL = models[provider].endpoint; - if (!baseURL) { - switch (modelClass) { - case ModelClass.SMALL: - baseURL = - settings.SMALL_GAIANET_SERVER_URL || - "https://llama3b.gaia.domains/v1"; - break; - case ModelClass.MEDIUM: - baseURL = - settings.MEDIUM_GAIANET_SERVER_URL || - "https://llama8b.gaia.domains/v1"; - break; - case ModelClass.LARGE: - baseURL = - settings.LARGE_GAIANET_SERVER_URL || - "https://qwen72b.gaia.domains/v1"; - break; - } - } - - elizaLogger.debug("Using GAIANET model with baseURL:", baseURL); - - const openai = createOpenAI({ - apiKey, - baseURL: endpoint, - fetch: runtime.fetch, - }); - - const { text: openaiResponse } = await aiGenerateText({ - model: openai.languageModel(model), - prompt: context, - system: - runtime.character.system ?? - settings.SYSTEM_PROMPT ?? - undefined, - tools: tools, - onStepFinish: onStepFinish, - maxSteps: maxSteps, - temperature: temperature, - maxTokens: max_response_length, - frequencyPenalty: frequency_penalty, - presencePenalty: presence_penalty, - experimental_telemetry: experimental_telemetry, - }); - - response = openaiResponse; - elizaLogger.debug("Received response from GAIANET model."); - break; - } - - case ModelProviderName.GALADRIEL: { - elizaLogger.debug("Initializing Galadriel model."); - const galadriel = createOpenAI({ - apiKey: apiKey, - baseURL: endpoint, - fetch: runtime.fetch, - }); - - const { text: galadrielResponse } = await aiGenerateText({ - model: galadriel.languageModel(model), - prompt: context, - system: - runtime.character.system ?? - settings.SYSTEM_PROMPT ?? - undefined, - tools: tools, - onStepFinish: onStepFinish, - maxSteps: maxSteps, - temperature: temperature, - maxTokens: max_response_length, - frequencyPenalty: frequency_penalty, - presencePenalty: presence_penalty, - experimental_telemetry: experimental_telemetry, - }); - - response = galadrielResponse; - elizaLogger.debug("Received response from Galadriel model."); - break; - } - - case ModelProviderName.VENICE: { - elizaLogger.debug("Initializing Venice model."); - const venice = createOpenAI({ - apiKey: apiKey, - baseURL: endpoint, - }); - - const { text: veniceResponse } = await aiGenerateText({ - model: venice.languageModel(model), - prompt: context, - system: - runtime.character.system ?? - settings.SYSTEM_PROMPT ?? - undefined, - tools: tools, - onStepFinish: onStepFinish, - temperature: temperature, - maxSteps: maxSteps, - maxTokens: max_response_length, - }); - - response = veniceResponse; - elizaLogger.debug("Received response from Venice model."); - break; - } - - case ModelProviderName.INFERA: { - elizaLogger.debug("Initializing Infera model."); - const apiKey = settings.INFERA_API_KEY || runtime.token; - - const infera = createOpenAI({ - apiKey, - baseURL: endpoint, - headers: { - api_key: apiKey, - "Content-Type": "application/json", - }, - }); - - const { text: inferaResponse } = await aiGenerateText({ - model: infera.languageModel(model), - prompt: context, - system: - runtime.character.system ?? - settings.SYSTEM_PROMPT ?? - undefined, - temperature: temperature, - maxTokens: max_response_length, - frequencyPenalty: frequency_penalty, - presencePenalty: presence_penalty, - }); - - response = inferaResponse; - elizaLogger.debug("Received response from Infera model."); - break; - } - - default: { - const errorMessage = `Unsupported provider: ${provider}`; - elizaLogger.error(errorMessage); - throw new Error(errorMessage); - } - } - - return response; - } catch (error) { - elizaLogger.error("Error in generateText:", error); - throw error; - } -} - -/** - * Sends a message to the model to determine if it should respond to the given context. - * @param opts - The options for the generateText request - * @param opts.context The context to evaluate for response - * @param opts.stop A list of strings to stop the generateText at - * @param opts.model The model to use for generateText - * @param opts.frequency_penalty The frequency penalty to apply (0.0 to 2.0) - * @param opts.presence_penalty The presence penalty to apply (0.0 to 2.0) - * @param opts.temperature The temperature to control randomness (0.0 to 2.0) - * @param opts.serverUrl The URL of the API server - * @param opts.max_context_length Maximum allowed context length in tokens - * @param opts.max_response_length Maximum allowed response length in tokens - * @returns Promise resolving to "RESPOND", "IGNORE", "STOP" or null - */ -export async function generateShouldRespond({ - runtime, - context, - modelClass, -}: { - runtime: IAgentRuntime; - context: string; - modelClass: string; -}): Promise<"RESPOND" | "IGNORE" | "STOP" | null> { - let retryDelay = 1000; - while (true) { - try { - elizaLogger.debug( - "Attempting to generate text with context:", - context - ); - const response = await generateText({ - runtime, - context, - modelClass, - }); - - elizaLogger.debug("Received response from generateText:", response); - const parsedResponse = parseShouldRespondFromText(response.trim()); - if (parsedResponse) { - elizaLogger.debug("Parsed response:", parsedResponse); - return parsedResponse; - } else { - elizaLogger.debug("generateShouldRespond no response"); - } - } catch (error) { - elizaLogger.error("Error in generateShouldRespond:", error); - if ( - error instanceof TypeError && - error.message.includes("queueTextCompletion") - ) { - elizaLogger.error( - "TypeError: Cannot read properties of null (reading 'queueTextCompletion')" - ); - } - } - - elizaLogger.log(`Retrying in ${retryDelay}ms...`); - await new Promise((resolve) => setTimeout(resolve, retryDelay)); - retryDelay *= 2; - } -} - -/** - * Splits content into chunks of specified size with optional overlapping bleed sections - * @param content - The text content to split into chunks - * @param chunkSize - The maximum size of each chunk in tokens - * @param bleed - Number of characters to overlap between chunks (default: 100) - * @returns Promise resolving to array of text chunks with bleed sections - */ -export async function splitChunks( - content: string, - chunkSize: number = 512, - bleed: number = 20 -): Promise { - const textSplitter = new RecursiveCharacterTextSplitter({ - chunkSize: Number(chunkSize), - chunkOverlap: Number(bleed), - }); - - return textSplitter.splitText(content); -} - -/** - * Sends a message to the model and parses the response as a boolean value - * @param opts - The options for the generateText request - * @param opts.context The context to evaluate for the boolean response - * @param opts.stop A list of strings to stop the generateText at - * @param opts.model The model to use for generateText - * @param opts.frequency_penalty The frequency penalty to apply (0.0 to 2.0) - * @param opts.presence_penalty The presence penalty to apply (0.0 to 2.0) - * @param opts.temperature The temperature to control randomness (0.0 to 2.0) - * @param opts.serverUrl The URL of the API server - * @param opts.token The API token for authentication - * @param opts.max_context_length Maximum allowed context length in tokens - * @param opts.max_response_length Maximum allowed response length in tokens - * @returns Promise resolving to a boolean value parsed from the model's response - */ -export async function generateTrueOrFalse({ - runtime, - context = "", - modelClass, -}: { - runtime: IAgentRuntime; - context: string; - modelClass: string; -}): Promise { - let retryDelay = 1000; - - const stop = Array.from( - new Set([ - ...(models[runtime.modelProvider].settings.stop || []), - ["\n"], - ]) - ) as string[]; - - while (true) { - try { - const response = await generateText({ - stop, - runtime, - context, - modelClass, - }); - - const parsedResponse = parseBooleanFromText(response.trim()); - if (parsedResponse !== null) { - return parsedResponse; - } - } catch (error) { - elizaLogger.error("Error in generateTrueOrFalse:", error); - } - - await new Promise((resolve) => setTimeout(resolve, retryDelay)); - retryDelay *= 2; - } -} - -/** - * Send a message to the model and parse the response as a string array - * @param opts - The options for the generateText request - * @param opts.context The context/prompt to send to the model - * @param opts.stop Array of strings that will stop the model's generation if encountered - * @param opts.model The language model to use - * @param opts.frequency_penalty The frequency penalty to apply (0.0 to 2.0) - * @param opts.presence_penalty The presence penalty to apply (0.0 to 2.0) - * @param opts.temperature The temperature to control randomness (0.0 to 2.0) - * @param opts.serverUrl The URL of the API server - * @param opts.token The API token for authentication - * @param opts.max_context_length Maximum allowed context length in tokens - * @param opts.max_response_length Maximum allowed response length in tokens - * @returns Promise resolving to an array of strings parsed from the model's response - */ -export async function generateTextArray({ - runtime, - context, - modelClass, -}: { - runtime: IAgentRuntime; - context: string; - modelClass: string; -}): Promise { - if (!context) { - elizaLogger.error("generateTextArray context is empty"); - return []; - } - let retryDelay = 1000; - - while (true) { - try { - const response = await generateText({ - runtime, - context, - modelClass, - }); - - const parsedResponse = parseJsonArrayFromText(response); - if (parsedResponse) { - return parsedResponse; - } - } catch (error) { - elizaLogger.error("Error in generateTextArray:", error); - } - - await new Promise((resolve) => setTimeout(resolve, retryDelay)); - retryDelay *= 2; - } -} - -export async function generateObjectDeprecated({ - runtime, - context, - modelClass, -}: { - runtime: IAgentRuntime; - context: string; - modelClass: string; -}): Promise { - if (!context) { - elizaLogger.error("generateObjectDeprecated context is empty"); - return null; - } - let retryDelay = 1000; - - while (true) { - try { - // this is slightly different than generateObjectArray, in that we parse object, not object array - const response = await generateText({ - runtime, - context, - modelClass, - }); - const parsedResponse = parseJSONObjectFromText(response); - if (parsedResponse) { - return parsedResponse; - } - } catch (error) { - elizaLogger.error("Error in generateObject:", error); - } - - await new Promise((resolve) => setTimeout(resolve, retryDelay)); - retryDelay *= 2; - } -} - -export async function generateObjectArray({ - runtime, - context, - modelClass, -}: { - runtime: IAgentRuntime; - context: string; - modelClass: string; -}): Promise { - if (!context) { - elizaLogger.error("generateObjectArray context is empty"); - return []; - } - let retryDelay = 1000; - - while (true) { - try { - const response = await generateText({ - runtime, - context, - modelClass, - }); - - const parsedResponse = parseJsonArrayFromText(response); - if (parsedResponse) { - return parsedResponse; - } - } catch (error) { - elizaLogger.error("Error in generateTextArray:", error); - } - - await new Promise((resolve) => setTimeout(resolve, retryDelay)); - retryDelay *= 2; - } -} - -/** - * Send a message to the model for generateText. - * @param opts - The options for the generateText request. - * @param opts.context The context of the message to be completed. - * @param opts.stop A list of strings to stop the generateText at. - * @param opts.model The model to use for generateText. - * @param opts.frequency_penalty The frequency penalty to apply to the generateText. - * @param opts.presence_penalty The presence penalty to apply to the generateText. - * @param opts.temperature The temperature to apply to the generateText. - * @param opts.max_context_length The maximum length of the context to apply to the generateText. - * @returns The completed message. - */ -export async function generateMessageResponse({ - runtime, - context, - modelClass, -}: { - runtime: IAgentRuntime; - context: string; - modelClass: string; -}): Promise { - const provider = runtime.modelProvider; - const max_context_length = models[provider].settings.maxInputTokens; - - context = await trimTokens(context, max_context_length, runtime); - let retryLength = 1000; // exponential backoff - while (true) { - try { - elizaLogger.log("Generating message response.."); - - const response = await generateText({ - runtime, - context, - modelClass, - }); - - // try parsing the response as JSON, if null then try again - const parsedContent = parseJSONObjectFromText(response) as Content; - if (!parsedContent) { - elizaLogger.debug("parsedContent is null, retrying"); - continue; - } - - return parsedContent; - } catch (error) { - elizaLogger.error("ERROR:", error); - // wait for 2 seconds - retryLength *= 2; - await new Promise((resolve) => setTimeout(resolve, retryLength)); - elizaLogger.debug("Retrying..."); - } - } -} - -export const generateImage = async ( - data: { - prompt: string; - width: number; - height: number; - count?: number; - negativePrompt?: string; - numIterations?: number; - guidanceScale?: number; - seed?: number; - modelId?: string; - jobId?: string; - stylePreset?: string; - hideWatermark?: boolean; - }, - runtime: IAgentRuntime -): Promise<{ - success: boolean; - data?: string[]; - error?: any; -}> => { - const model = getModel(runtime.imageModelProvider, ModelClass.IMAGE); - const modelSettings = models[runtime.imageModelProvider].imageSettings; - - elizaLogger.info("Generating image with options:", { - imageModelProvider: model, - }); - - const apiKey = - runtime.imageModelProvider === runtime.modelProvider - ? runtime.token - : (() => { - // First try to match the specific provider - switch (runtime.imageModelProvider) { - case ModelProviderName.HEURIST: - return runtime.getSetting("HEURIST_API_KEY"); - case ModelProviderName.TOGETHER: - return runtime.getSetting("TOGETHER_API_KEY"); - case ModelProviderName.FAL: - return runtime.getSetting("FAL_API_KEY"); - case ModelProviderName.OPENAI: - return runtime.getSetting("OPENAI_API_KEY"); - case ModelProviderName.VENICE: - return runtime.getSetting("VENICE_API_KEY"); - case ModelProviderName.LIVEPEER: - return runtime.getSetting("LIVEPEER_GATEWAY_URL"); - default: - // If no specific match, try the fallback chain - return ( - runtime.getSetting("HEURIST_API_KEY") ?? - runtime.getSetting("TOGETHER_API_KEY") ?? - runtime.getSetting("FAL_API_KEY") ?? - runtime.getSetting("OPENAI_API_KEY") ?? - runtime.getSetting("VENICE_API_KEY") ?? - runtime.getSetting("LIVEPEER_GATEWAY_URL") - ); - } - })(); - try { - if (runtime.imageModelProvider === ModelProviderName.HEURIST) { - const response = await fetch( - "http://sequencer.heurist.xyz/submit_job", - { - method: "POST", - headers: { - Authorization: `Bearer ${apiKey}`, - "Content-Type": "application/json", - }, - body: JSON.stringify({ - job_id: data.jobId || crypto.randomUUID(), - model_input: { - SD: { - prompt: data.prompt, - neg_prompt: data.negativePrompt, - num_iterations: data.numIterations || 20, - width: data.width || 512, - height: data.height || 512, - guidance_scale: data.guidanceScale || 3, - seed: data.seed || -1, - }, - }, - model_id: data.modelId || "FLUX.1-dev", - deadline: 60, - priority: 1, - }), - } - ); - - if (!response.ok) { - throw new Error( - `Heurist image generation failed: ${response.statusText}` - ); - } - - const imageURL = await response.json(); - return { success: true, data: [imageURL] }; - } else if ( - runtime.imageModelProvider === ModelProviderName.TOGETHER || - // for backwards compat - runtime.imageModelProvider === ModelProviderName.LLAMACLOUD - ) { - const together = new Together({ apiKey: apiKey as string }); - const response = await together.images.create({ - model: "black-forest-labs/FLUX.1-schnell", - prompt: data.prompt, - width: data.width, - height: data.height, - steps: modelSettings?.steps ?? 4, - n: data.count, - }); - - // Add type assertion to handle the response properly - const togetherResponse = - response as unknown as TogetherAIImageResponse; - - if ( - !togetherResponse.data || - !Array.isArray(togetherResponse.data) - ) { - throw new Error("Invalid response format from Together AI"); - } - - // Rest of the code remains the same... - const base64s = await Promise.all( - togetherResponse.data.map(async (image) => { - if (!image.url) { - elizaLogger.error("Missing URL in image data:", image); - throw new Error("Missing URL in Together AI response"); - } - - // Fetch the image from the URL - const imageResponse = await fetch(image.url); - if (!imageResponse.ok) { - throw new Error( - `Failed to fetch image: ${imageResponse.statusText}` - ); - } - - // Convert to blob and then to base64 - const blob = await imageResponse.blob(); - const arrayBuffer = await blob.arrayBuffer(); - const base64 = Buffer.from(arrayBuffer).toString("base64"); - - // Return with proper MIME type - return `data:image/jpeg;base64,${base64}`; - }) - ); - - if (base64s.length === 0) { - throw new Error("No images generated by Together AI"); - } - - elizaLogger.debug(`Generated ${base64s.length} images`); - return { success: true, data: base64s }; - } else if (runtime.imageModelProvider === ModelProviderName.FAL) { - fal.config({ - credentials: apiKey as string, - }); - - // Prepare the input parameters according to their schema - const input = { - prompt: data.prompt, - image_size: "square" as const, - num_inference_steps: modelSettings?.steps ?? 50, - guidance_scale: data.guidanceScale || 3.5, - num_images: data.count, - enable_safety_checker: - runtime.getSetting("FAL_AI_ENABLE_SAFETY_CHECKER") === - "true", - safety_tolerance: Number( - runtime.getSetting("FAL_AI_SAFETY_TOLERANCE") || "2" - ), - output_format: "png" as const, - seed: data.seed ?? 6252023, - ...(runtime.getSetting("FAL_AI_LORA_PATH") - ? { - loras: [ - { - path: runtime.getSetting("FAL_AI_LORA_PATH"), - scale: 1, - }, - ], - } - : {}), - }; - - // Subscribe to the model - const result = await fal.subscribe(model, { - input, - logs: true, - onQueueUpdate: (update) => { - if (update.status === "IN_PROGRESS") { - elizaLogger.info(update.logs.map((log) => log.message)); - } - }, - }); - - // Convert the returned image URLs to base64 to match existing functionality - const base64Promises = result.data.images.map(async (image) => { - const response = await fetch(image.url); - const blob = await response.blob(); - const buffer = await blob.arrayBuffer(); - const base64 = Buffer.from(buffer).toString("base64"); - return `data:${image.content_type};base64,${base64}`; - }); - - const base64s = await Promise.all(base64Promises); - return { success: true, data: base64s }; - } else if (runtime.imageModelProvider === ModelProviderName.VENICE) { - const response = await fetch( - "https://api.venice.ai/api/v1/image/generate", - { - method: "POST", - headers: { - Authorization: `Bearer ${apiKey}`, - "Content-Type": "application/json", - }, - body: JSON.stringify({ - model: data.modelId || "fluently-xl", - prompt: data.prompt, - negative_prompt: data.negativePrompt, - width: data.width, - height: data.height, - steps: data.numIterations, - seed: data.seed, - style_preset: data.stylePreset, - hide_watermark: data.hideWatermark, - }), - } - ); - - const result = await response.json(); - - if (!result.images || !Array.isArray(result.images)) { - throw new Error("Invalid response format from Venice AI"); - } - - const base64s = result.images.map((base64String) => { - if (!base64String) { - throw new Error( - "Empty base64 string in Venice AI response" - ); - } - return `data:image/png;base64,${base64String}`; - }); - - return { success: true, data: base64s }; - } else if (runtime.imageModelProvider === ModelProviderName.LIVEPEER) { - if (!apiKey) { - throw new Error("Livepeer Gateway is not defined"); - } - try { - const baseUrl = new URL(apiKey); - if (!baseUrl.protocol.startsWith("http")) { - throw new Error("Invalid Livepeer Gateway URL protocol"); - } - const response = await fetch( - `${baseUrl.toString()}text-to-image`, - { - method: "POST", - headers: { - "Content-Type": "application/json", - }, - body: JSON.stringify({ - model_id: - data.modelId || "ByteDance/SDXL-Lightning", - prompt: data.prompt, - width: data.width || 1024, - height: data.height || 1024, - }), - } - ); - const result = await response.json(); - if (!result.images?.length) { - throw new Error("No images generated"); - } - const base64Images = await Promise.all( - result.images.map(async (image) => { - console.log("imageUrl console log", image.url); - let imageUrl; - if (image.url.includes("http")) { - imageUrl = image.url; - } else { - imageUrl = `${apiKey}${image.url}`; - } - const imageResponse = await fetch(imageUrl); - if (!imageResponse.ok) { - throw new Error( - `Failed to fetch image: ${imageResponse.statusText}` - ); - } - const blob = await imageResponse.blob(); - const arrayBuffer = await blob.arrayBuffer(); - const base64 = - Buffer.from(arrayBuffer).toString("base64"); - return `data:image/jpeg;base64,${base64}`; - }) - ); - return { - success: true, - data: base64Images, - }; - } catch (error) { - console.error(error); - return { success: false, error: error }; - } - } else { - let targetSize = `${data.width}x${data.height}`; - if ( - targetSize !== "1024x1024" && - targetSize !== "1792x1024" && - targetSize !== "1024x1792" - ) { - targetSize = "1024x1024"; - } - const openaiApiKey = runtime.getSetting("OPENAI_API_KEY") as string; - if (!openaiApiKey) { - throw new Error("OPENAI_API_KEY is not set"); - } - const openai = new OpenAI({ - apiKey: openaiApiKey as string, - }); - const response = await openai.images.generate({ - model, - prompt: data.prompt, - size: targetSize as "1024x1024" | "1792x1024" | "1024x1792", - n: data.count, - response_format: "b64_json", - }); - const base64s = response.data.map( - (image) => `data:image/png;base64,${image.b64_json}` - ); - return { success: true, data: base64s }; - } - } catch (error) { - console.error(error); - return { success: false, error: error }; - } -}; - -export const generateCaption = async ( - data: { imageUrl: string }, - runtime: IAgentRuntime -): Promise<{ - title: string; - description: string; -}> => { - const { imageUrl } = data; - const imageDescriptionService = - runtime.getService( - ServiceType.IMAGE_DESCRIPTION - ); - - if (!imageDescriptionService) { - throw new Error("Image description service not found"); - } - - const resp = await imageDescriptionService.describeImage(imageUrl); - return { - title: resp.title.trim(), - description: resp.description.trim(), - }; -}; - -export const generateWebSearch = async ( - query: string, - runtime: IAgentRuntime -): Promise => { - try { - const apiKey = runtime.getSetting("TAVILY_API_KEY") as string; - if (!apiKey) { - throw new Error("TAVILY_API_KEY is not set"); - } - const tvly = tavily({ apiKey }); - const response = await tvly.search(query, { - includeAnswer: true, - maxResults: 3, // 5 (default) - topic: "general", // "general"(default) "news" - searchDepth: "basic", // "basic"(default) "advanced" - includeImages: false, // false (default) true - }); - return response; - } catch (error) { - elizaLogger.error("Error:", error); - } -}; -/** - * Configuration options for generating objects with a model. - */ -export interface GenerationOptions { - runtime: IAgentRuntime; - context: string; - modelClass: ModelClass; - schema?: ZodSchema; - schemaName?: string; - schemaDescription?: string; - stop?: string[]; - mode?: "auto" | "json" | "tool"; - experimental_providerMetadata?: Record; -} - -/** - * Base settings for model generation. - */ -interface ModelSettings { - prompt: string; - temperature: number; - maxTokens: number; - frequencyPenalty: number; - presencePenalty: number; - stop?: string[]; - experimental_telemetry?: TelemetrySettings; -} - -/** - * Generates structured objects from a prompt using specified AI models and configuration options. - * - * @param {GenerationOptions} options - Configuration options for generating objects. - * @returns {Promise} - A promise that resolves to an array of generated objects. - * @throws {Error} - Throws an error if the provider is unsupported or if generation fails. - */ -export const generateObject = async ({ - runtime, - context, - modelClass, - schema, - schemaName, - schemaDescription, - stop, - mode = "json", -}: GenerationOptions): Promise> => { - if (!context) { - const errorMessage = "generateObject context is empty"; - console.error(errorMessage); - throw new Error(errorMessage); - } - - const provider = runtime.modelProvider; - const model = models[provider].model[modelClass]; - const temperature = models[provider].settings.temperature; - const frequency_penalty = models[provider].settings.frequency_penalty; - const presence_penalty = models[provider].settings.presence_penalty; - const max_context_length = models[provider].settings.maxInputTokens; - const max_response_length = models[provider].settings.maxOutputTokens; - const experimental_telemetry = - models[provider].settings.experimental_telemetry; - const apiKey = runtime.token; - - try { - context = await trimTokens(context, max_context_length, runtime); - - const modelOptions: ModelSettings = { - prompt: context, - temperature, - maxTokens: max_response_length, - frequencyPenalty: frequency_penalty, - presencePenalty: presence_penalty, - stop: stop || models[provider].settings.stop, - experimental_telemetry: experimental_telemetry, - }; - - const response = await handleProvider({ - provider, - model, - apiKey, - schema, - schemaName, - schemaDescription, - mode, - modelOptions, - runtime, - context, - modelClass, - }); - - return response; - } catch (error) { - console.error("Error in generateObject:", error); - throw error; - } -}; - -/** - * Interface for provider-specific generation options. - */ -interface ProviderOptions { - runtime: IAgentRuntime; - provider: ModelProviderName; - model: any; - apiKey: string; - schema?: ZodSchema; - schemaName?: string; - schemaDescription?: string; - mode?: "auto" | "json" | "tool"; - experimental_providerMetadata?: Record; - modelOptions: ModelSettings; - modelClass: string; - context: string; -} - -/** - * Handles AI generation based on the specified provider. - * - * @param {ProviderOptions} options - Configuration options specific to the provider. - * @returns {Promise} - A promise that resolves to an array of generated objects. - */ -export async function handleProvider( - options: ProviderOptions -): Promise> { - const { provider, runtime, context, modelClass } = options; - switch (provider) { - case ModelProviderName.OPENAI: - case ModelProviderName.ETERNALAI: - case ModelProviderName.ALI_BAILIAN: - case ModelProviderName.VOLENGINE: - case ModelProviderName.LLAMACLOUD: - case ModelProviderName.TOGETHER: - case ModelProviderName.NANOGPT: - case ModelProviderName.INFERA: - case ModelProviderName.AKASH_CHAT_API: - return await handleOpenAI(options); - case ModelProviderName.ANTHROPIC: - case ModelProviderName.CLAUDE_VERTEX: - return await handleAnthropic(options); - case ModelProviderName.GROK: - return await handleGrok(options); - case ModelProviderName.GROQ: - return await handleGroq(options); - case ModelProviderName.LLAMALOCAL: - return await generateObjectDeprecated({ - runtime, - context, - modelClass, - }); - case ModelProviderName.GOOGLE: - return await handleGoogle(options); - case ModelProviderName.REDPILL: - return await handleRedPill(options); - case ModelProviderName.OPENROUTER: - return await handleOpenRouter(options); - case ModelProviderName.OLLAMA: - return await handleOllama(options); - default: { - const errorMessage = `Unsupported provider: ${provider}`; - elizaLogger.error(errorMessage); - throw new Error(errorMessage); - } - } -} -/** - * Handles object generation for OpenAI. - * - * @param {ProviderOptions} options - Options specific to OpenAI. - * @returns {Promise>} - A promise that resolves to generated objects. - */ -async function handleOpenAI({ - model, - apiKey, - schema, - schemaName, - schemaDescription, - mode, - modelOptions, -}: ProviderOptions): Promise> { - const baseURL = models.openai.endpoint || undefined; - const openai = createOpenAI({ apiKey, baseURL }); - return await aiGenerateObject({ - model: openai.languageModel(model), - schema, - schemaName, - schemaDescription, - mode, - ...modelOptions, - }); -} - -/** - * Handles object generation for Anthropic models. - * - * @param {ProviderOptions} options - Options specific to Anthropic. - * @returns {Promise>} - A promise that resolves to generated objects. - */ -async function handleAnthropic({ - model, - apiKey, - schema, - schemaName, - schemaDescription, - mode, - modelOptions, -}: ProviderOptions): Promise> { - const anthropic = createAnthropic({ apiKey }); - return await aiGenerateObject({ - model: anthropic.languageModel(model), - schema, - schemaName, - schemaDescription, - mode, - ...modelOptions, - }); -} - -/** - * Handles object generation for Grok models. - * - * @param {ProviderOptions} options - Options specific to Grok. - * @returns {Promise>} - A promise that resolves to generated objects. - */ -async function handleGrok({ - model, - apiKey, - schema, - schemaName, - schemaDescription, - mode, - modelOptions, -}: ProviderOptions): Promise> { - const grok = createOpenAI({ apiKey, baseURL: models.grok.endpoint }); - return await aiGenerateObject({ - model: grok.languageModel(model, { parallelToolCalls: false }), - schema, - schemaName, - schemaDescription, - mode, - ...modelOptions, - }); -} - -/** - * Handles object generation for Groq models. - * - * @param {ProviderOptions} options - Options specific to Groq. - * @returns {Promise>} - A promise that resolves to generated objects. - */ -async function handleGroq({ - model, - apiKey, - schema, - schemaName, - schemaDescription, - mode, - modelOptions, -}: ProviderOptions): Promise> { - const groq = createGroq({ apiKey }); - return await aiGenerateObject({ - model: groq.languageModel(model), - schema, - schemaName, - schemaDescription, - mode, - ...modelOptions, - }); -} - -/** - * Handles object generation for Google models. - * - * @param {ProviderOptions} options - Options specific to Google. - * @returns {Promise>} - A promise that resolves to generated objects. - */ -async function handleGoogle({ - model, - apiKey: _apiKey, - schema, - schemaName, - schemaDescription, - mode, - modelOptions, -}: ProviderOptions): Promise> { - const google = createGoogleGenerativeAI(); - return await aiGenerateObject({ - model: google(model), - schema, - schemaName, - schemaDescription, - mode, - ...modelOptions, - }); -} - -/** - * Handles object generation for Redpill models. - * - * @param {ProviderOptions} options - Options specific to Redpill. - * @returns {Promise>} - A promise that resolves to generated objects. - */ -async function handleRedPill({ - model, - apiKey, - schema, - schemaName, - schemaDescription, - mode, - modelOptions, -}: ProviderOptions): Promise> { - const redPill = createOpenAI({ apiKey, baseURL: models.redpill.endpoint }); - return await aiGenerateObject({ - model: redPill.languageModel(model), - schema, - schemaName, - schemaDescription, - mode, - ...modelOptions, - }); -} - -/** - * Handles object generation for OpenRouter models. - * - * @param {ProviderOptions} options - Options specific to OpenRouter. - * @returns {Promise>} - A promise that resolves to generated objects. - */ -async function handleOpenRouter({ - model, - apiKey, - schema, - schemaName, - schemaDescription, - mode, - modelOptions, -}: ProviderOptions): Promise> { - const openRouter = createOpenAI({ - apiKey, - baseURL: models.openrouter.endpoint, - }); - return await aiGenerateObject({ - model: openRouter.languageModel(model), - schema, - schemaName, - schemaDescription, - mode, - ...modelOptions, - }); -} - -/** - * Handles object generation for Ollama models. - * - * @param {ProviderOptions} options - Options specific to Ollama. - * @returns {Promise>} - A promise that resolves to generated objects. - */ -async function handleOllama({ - model, - schema, - schemaName, - schemaDescription, - mode, - modelOptions, - provider, -}: ProviderOptions): Promise> { - const ollamaProvider = createOllama({ - baseURL: models[provider].endpoint + "/api", - }); - const ollama = ollamaProvider(model); - return await aiGenerateObject({ - model: ollama, - schema, - schemaName, - schemaDescription, - mode, - ...modelOptions, - }); -} - -// Add type definition for Together AI response -interface TogetherAIImageResponse { - data: Array<{ - url: string; - content_type?: string; - image_type?: string; - }>; -} - -export async function generateTweetActions({ - runtime, - context, - modelClass, -}: { - runtime: IAgentRuntime; - context: string; - modelClass: string; -}): Promise { - let retryDelay = 1000; - while (true) { - try { - const response = await generateText({ - runtime, - context, - modelClass, - }); - console.debug( - "Received response from generateText for tweet actions:", - response - ); - const { actions } = parseActionResponseFromText(response.trim()); - if (actions) { - console.debug("Parsed tweet actions:", actions); - return actions; - } else { - elizaLogger.debug("generateTweetActions no valid response"); - } - } catch (error) { - elizaLogger.error("Error in generateTweetActions:", error); - if ( - error instanceof TypeError && - error.message.includes("queueTextCompletion") - ) { - elizaLogger.error( - "TypeError: Cannot read properties of null (reading 'queueTextCompletion')" - ); - } - } - elizaLogger.log(`Retrying in ${retryDelay}ms...`); - await new Promise((resolve) => setTimeout(resolve, retryDelay)); - retryDelay *= 2; - } -} diff --git a/packages/core/package.json b/packages/core/package.json index f0b62d364b..daa124884b 100644 --- a/packages/core/package.json +++ b/packages/core/package.json @@ -64,6 +64,7 @@ "typescript": "5.6.3" }, "dependencies": { + "@ai-sdk/amazon-bedrock": "^1.0.8", "@ai-sdk/anthropic": "0.0.56", "@ai-sdk/google": "0.0.55", "@ai-sdk/google-vertex": "0.0.43", diff --git a/packages/core/src/embedding.ts b/packages/core/src/embedding.ts index 73cc657f00..9d1c6ba2fc 100644 --- a/packages/core/src/embedding.ts +++ b/packages/core/src/embedding.ts @@ -2,7 +2,7 @@ import { getEmbeddingModelSettings, getEndpoint } from "./models.ts"; import { IAgentRuntime, ModelProviderName } from "./types.ts"; import settings from "./settings.ts"; import elizaLogger from "./logger.ts"; -import LocalEmbeddingModelManager from "./localembeddingManager.ts"; +//import LocalEmbeddingModelManager from "./localembeddingManager.ts"; interface EmbeddingOptions { model: string; @@ -229,17 +229,17 @@ export async function embed(runtime: IAgentRuntime, input: string) { }); } - // BGE - try local first if in Node - if (isNode) { - try { - return await getLocalEmbedding(input); - } catch (error) { - elizaLogger.warn( - "Local embedding failed, falling back to remote", - error - ); - } - } + // // BGE - try local first if in Node + // if (isNode) { + // try { + // return await getLocalEmbedding(input); + // } catch (error) { + // elizaLogger.warn( + // "Local embedding failed, falling back to remote", + // error + // ); + // } + // } // Fallback to remote override return await getRemoteEmbedding(input, { @@ -251,17 +251,17 @@ export async function embed(runtime: IAgentRuntime, input: string) { dimensions: config.dimensions, }); - async function getLocalEmbedding(input: string): Promise { - elizaLogger.debug("DEBUG - Inside getLocalEmbedding function"); - - try { - const embeddingManager = LocalEmbeddingModelManager.getInstance(); - return await embeddingManager.generateEmbedding(input); - } catch (error) { - elizaLogger.error("Local embedding failed:", error); - throw error; - } - } + // async function getLocalEmbedding(input: string): Promise { + // elizaLogger.debug("DEBUG - Inside getLocalEmbedding function"); + + // try { + // const embeddingManager = LocalEmbeddingModelManager.getInstance(); + // return await embeddingManager.generateEmbedding(input); + // } catch (error) { + // elizaLogger.error("Local embedding failed:", error); + // throw error; + // } + // } async function retrieveCachedEmbedding( runtime: IAgentRuntime, diff --git a/packages/core/src/generation.ts b/packages/core/src/generation.ts index 728f1a98bf..3d51f187ee 100644 --- a/packages/core/src/generation.ts +++ b/packages/core/src/generation.ts @@ -1,4 +1,5 @@ import { createAnthropic } from "@ai-sdk/anthropic"; +import { bedrock } from '@ai-sdk/amazon-bedrock'; import { createGoogleGenerativeAI } from "@ai-sdk/google"; import { createGroq } from "@ai-sdk/groq"; import { createOpenAI } from "@ai-sdk/openai"; @@ -45,7 +46,6 @@ import { IVerifiableInferenceAdapter, VerifiableInferenceOptions, VerifiableInferenceResult, - VerifiableInferenceProvider, TelemetrySettings, TokenizerType, } from "./types.ts"; @@ -504,7 +504,37 @@ export async function generateText({ break; } - case ModelProviderName.CLAUDE_VERTEX: { + case ModelProviderName.BEDROCK: { + elizaLogger.debug("Initializing Bedrock model."); + try { + const { text: bedrockResponse } = await aiGenerateText({ + model: bedrock(model), + prompt: context, + /* + system: + runtime.character.system ?? + settings.SYSTEM_PROMPT ?? + undefined, + */ + //tools: tools, + //onStepFinish: onStepFinish, + //maxSteps: maxSteps, + temperature: temperature, + maxTokens: max_response_length, + //frequencyPenalty: frequency_penalty, + //presencePenalty: presence_penalty, + // experimental_telemetry: experimental_telemetry, + }); + response = bedrockResponse; + elizaLogger.debug("Received response from bedrock model."); + } catch (error) { + elizaLogger.error("Error in bedrock:", error); + throw error; + } + break; + } + + case ModelProviderName.CLAUDE_VERTEX: { elizaLogger.debug("Initializing Claude Vertex model."); const anthropic = createAnthropic({ diff --git a/packages/core/src/localembeddingManager.ts b/packages/core/src/localembeddingManager.ts index e6f853a934..dd60e012e6 100644 --- a/packages/core/src/localembeddingManager.ts +++ b/packages/core/src/localembeddingManager.ts @@ -1,11 +1,11 @@ import path from "node:path"; import { fileURLToPath } from "url"; -import { FlagEmbedding, EmbeddingModel } from "fastembed"; +//import { FlagEmbedding, EmbeddingModel } from "fastembed"; import elizaLogger from "./logger"; class LocalEmbeddingModelManager { private static instance: LocalEmbeddingModelManager | null; - private model: FlagEmbedding | null = null; + private model: null = null; private initPromise: Promise | null = null; private initializationLock = false; @@ -79,11 +79,11 @@ class LocalEmbeddingModelManager { elizaLogger.debug("Initializing BGE embedding model..."); - this.model = await FlagEmbedding.init({ - cacheDir: cacheDir, - model: EmbeddingModel.BGESmallENV15, - maxLength: 512, - }); + // this.model = await FlagEmbedding.init({ + // cacheDir: cacheDir, + // model: EmbeddingModel.BGESmallENV15, + // maxLength: 512, + // }); elizaLogger.debug("BGE model initialized successfully"); } catch (error) { diff --git a/packages/core/src/models.ts b/packages/core/src/models.ts index 2c54db91dd..6112d2abb9 100644 --- a/packages/core/src/models.ts +++ b/packages/core/src/models.ts @@ -49,6 +49,44 @@ export const models: Models = { }, }, }, + [ModelProviderName.BEDROCK]: { + model: { + [ModelClass.SMALL]: { + name: settings.SMALL_BEDROCK_MODEL || "amazon.nova-lite-v1:0", + stop: [], + maxInputTokens: 128000, + maxOutputTokens: 8192, + frequency_penalty: 0.0, + presence_penalty: 0.0, + temperature: 0.6, + + }, + [ModelClass.MEDIUM]: { + name: settings.MEDIUM_BEDROCK_MODEL || "amazon.nova-lite-v1:0", + stop: [], + maxInputTokens: 128000, + maxOutputTokens: 8192, + frequency_penalty: 0.0, + presence_penalty: 0.0, + temperature: 0.6, + + }, + [ModelClass.EMBEDDING]: { + name: settings.EMBEDDING_BEDROCK_MODEL || "amazon.titan-embed-text-v1", + }, + + [ModelClass.LARGE]: { + name: settings.LARGE_BEDROCK_MODEL || "amazon.nova-lite-v1:0", + stop: [], + maxInputTokens: 128000, + maxOutputTokens: 8192, + frequency_penalty: 0.0, + presence_penalty: 0.0, + temperature: 0.6, + + } + }, + }, [ModelProviderName.ETERNALAI]: { endpoint: settings.ETERNALAI_URL, model: { diff --git a/packages/core/src/ragknowledge.ts b/packages/core/src/ragknowledge.ts index 57f4e452c7..934725f228 100644 --- a/packages/core/src/ragknowledge.ts +++ b/packages/core/src/ragknowledge.ts @@ -367,7 +367,7 @@ export class RAGKnowledgeManager implements IRAGKnowledgeManager { }; const startTime = Date.now(); - let content = file.content; + const content = file.content; try { const fileSizeKB = new TextEncoder().encode(content).length / 1024; diff --git a/packages/core/src/types.ts b/packages/core/src/types.ts index 5fc1bc8331..804a344f01 100644 --- a/packages/core/src/types.ts +++ b/packages/core/src/types.ts @@ -204,6 +204,7 @@ export type Models = { [ModelProviderName.OPENAI]: Model; [ModelProviderName.ETERNALAI]: Model; [ModelProviderName.ANTHROPIC]: Model; + [ModelProviderName.BEDROCK]: Model; [ModelProviderName.GROK]: Model; [ModelProviderName.GROQ]: Model; [ModelProviderName.LLAMACLOUD]: Model; @@ -235,6 +236,7 @@ export enum ModelProviderName { OPENAI = "openai", ETERNALAI = "eternalai", ANTHROPIC = "anthropic", + BEDROCK = "bedrock", GROK = "grok", GROQ = "groq", LLAMACLOUD = "llama_cloud", diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml index 91bb5dd1ae..cb22e84b0b 100644 --- a/pnpm-lock.yaml +++ b/pnpm-lock.yaml @@ -568,6 +568,9 @@ importers: packages/core: dependencies: + '@ai-sdk/amazon-bedrock': + specifier: ^1.0.8 + version: 1.0.8(zod@3.23.8) '@ai-sdk/anthropic': specifier: 0.0.56 version: 0.0.56(zod@3.23.8) @@ -721,7 +724,7 @@ importers: version: 2.79.2 ts-jest: specifier: 29.2.5 - version: 29.2.5(@babel/core@7.26.0)(@jest/transform@29.7.0)(@jest/types@29.6.3)(babel-jest@29.7.0(@babel/core@7.26.0))(jest@29.7.0(@types/node@22.8.4)(babel-plugin-macros@3.1.0)(ts-node@10.9.2(@swc/core@1.10.4(@swc/helpers@0.5.15))(@types/node@22.8.4)(typescript@5.6.3)))(typescript@5.6.3) + version: 29.2.5(@babel/core@7.26.0)(@jest/transform@29.7.0)(@jest/types@29.6.3)(babel-jest@29.7.0(@babel/core@7.26.0))(esbuild@0.24.2)(jest@29.7.0(@types/node@22.8.4)(babel-plugin-macros@3.1.0)(ts-node@10.9.2(@swc/core@1.10.4(@swc/helpers@0.5.15))(@types/node@22.8.4)(typescript@5.6.3)))(typescript@5.6.3) ts-node: specifier: 10.9.2 version: 10.9.2(@swc/core@1.10.4(@swc/helpers@0.5.15))(@types/node@22.8.4)(typescript@5.6.3) @@ -1117,6 +1120,12 @@ packages: '@adraffy/ens-normalize@1.11.0': resolution: {integrity: sha512-/3DDPKHqqIqxUULp8yP4zODUY1i+2xvVWsv8A79xGWdCAG+8sb0hRh0Rk2QyOJUnnbyPUAZYcpBuRe3nS2OIUg==} + '@ai-sdk/amazon-bedrock@1.0.8': + resolution: {integrity: sha512-xR1xTyzMpzUjKfi+tCPUxs3B5y6UwWg33G63W7AwiXTpo8C7jgf7EFLsdsRX1ivqyXhP0b9U9gSRYTEbiS1sgA==} + engines: {node: '>=18'} + peerDependencies: + zod: ^3.0.0 + '@ai-sdk/anthropic@0.0.56': resolution: {integrity: sha512-FC/XbeFANFp8rHH+zEZF34cvRu9T42rQxw9QnUzJ1LXTi1cWjxYOx2Zo4vfg0iofxxqgOe4fT94IdT2ERQ89bA==} engines: {node: '>=18'} @@ -1190,6 +1199,15 @@ packages: zod: optional: true + '@ai-sdk/provider-utils@2.0.7': + resolution: {integrity: sha512-4sfPlKEALHPXLmMFcPlYksst3sWBJXmCDZpIBJisRrmwGG6Nn3mq0N1Zu/nZaGcrWZoOY+HT2Wbxla1oTElYHQ==} + engines: {node: '>=18'} + peerDependencies: + zod: ^3.0.0 + peerDependenciesMeta: + zod: + optional: true + '@ai-sdk/provider@0.0.24': resolution: {integrity: sha512-XMsNGJdGO+L0cxhhegtqZ8+T6nn4EoShS819OvCgI2kLbYTIvk0GWFGD0AXJmxkxs3DrpsJxKAFukFR7bvTkgQ==} engines: {node: '>=18'} @@ -1436,6 +1454,127 @@ packages: '@asamuzakjp/css-color@2.8.2': resolution: {integrity: sha512-RtWv9jFN2/bLExuZgFFZ0I3pWWeezAHGgrmjqGGWclATl1aDe3yhCUaI0Ilkp6OCk9zX7+FjvDasEX8Q9Rxc5w==} + '@aws-crypto/crc32@5.2.0': + resolution: {integrity: sha512-nLbCWqQNgUiwwtFsen1AdzAtvuLRsQS8rYgMuxCrdKf9kOssamGLuPwyTY9wyYblNr9+1XM8v6zoDTPPSIeANg==} + engines: {node: '>=16.0.0'} + + '@aws-crypto/sha256-browser@5.2.0': + resolution: {integrity: sha512-AXfN/lGotSQwu6HNcEsIASo7kWXZ5HYWvfOmSNKDsEqC4OashTp8alTmaz+F7TC2L083SFv5RdB+qU3Vs1kZqw==} + + '@aws-crypto/sha256-js@5.2.0': + resolution: {integrity: sha512-FFQQyu7edu4ufvIZ+OadFpHHOt+eSTBaYaki44c+akjg7qZg9oOQeLlk77F6tSYqjDAFClrHJk9tMf0HdVyOvA==} + engines: {node: '>=16.0.0'} + + '@aws-crypto/supports-web-crypto@5.2.0': + resolution: {integrity: sha512-iAvUotm021kM33eCdNfwIN//F77/IADDSs58i+MDaOqFrVjZo9bAal0NK7HurRuWLLpF1iLX7gbWrjHjeo+YFg==} + + '@aws-crypto/util@5.2.0': + resolution: {integrity: sha512-4RkU9EsI6ZpBve5fseQlGNUWKMa1RLPQ1dnjnQoe07ldfIzcsGb5hC5W0Dm7u423KWzawlrpbjXBrXCEv9zazQ==} + + '@aws-sdk/client-bedrock-runtime@3.726.0': + resolution: {integrity: sha512-ZiC/OPk3YpMUQrbsIEqZhxgkqSjG0cNiA+GglkK4NkQRll0wqGzGbVhAHx9PmKu2veij26OOPf6hgkRjS5wEfw==} + engines: {node: '>=18.0.0'} + + '@aws-sdk/client-sso-oidc@3.726.0': + resolution: {integrity: sha512-5JzTX9jwev7+y2Jkzjz0pd1wobB5JQfPOQF3N2DrJ5Pao0/k6uRYwE4NqB0p0HlGrMTDm7xNq7OSPPIPG575Jw==} + engines: {node: '>=18.0.0'} + peerDependencies: + '@aws-sdk/client-sts': ^3.726.0 + + '@aws-sdk/client-sso@3.726.0': + resolution: {integrity: sha512-NM5pjv2qglEc4XN3nnDqtqGsSGv1k5YTmzDo3W3pObItHmpS8grSeNfX9zSH+aVl0Q8hE4ZIgvTPNZ+GzwVlqg==} + engines: {node: '>=18.0.0'} + + '@aws-sdk/client-sts@3.726.0': + resolution: {integrity: sha512-047EqXv2BAn/43eP92zsozPnR3paFFMsj5gjytx9kGNtp+WV0fUZNztCOobtouAxBY0ZQ8Xx5RFnmjpRb6Kjsg==} + engines: {node: '>=18.0.0'} + + '@aws-sdk/core@3.723.0': + resolution: {integrity: sha512-UraXNmvqj3vScSsTkjMwQkhei30BhXlW5WxX6JacMKVtl95c7z0qOXquTWeTalYkFfulfdirUhvSZrl+hcyqTw==} + engines: {node: '>=18.0.0'} + + '@aws-sdk/credential-provider-env@3.723.0': + resolution: {integrity: sha512-OuH2yULYUHTVDUotBoP/9AEUIJPn81GQ/YBtZLoo2QyezRJ2QiO/1epVtbJlhNZRwXrToLEDmQGA2QfC8c7pbA==} + engines: {node: '>=18.0.0'} + + '@aws-sdk/credential-provider-http@3.723.0': + resolution: {integrity: sha512-DTsKC6xo/kz/ZSs1IcdbQMTgiYbpGTGEd83kngFc1bzmw7AmK92DBZKNZpumf8R/UfSpTcj9zzUUmrWz1kD0eQ==} + engines: {node: '>=18.0.0'} + + '@aws-sdk/credential-provider-ini@3.726.0': + resolution: {integrity: sha512-seTtcKL2+gZX6yK1QRPr5mDJIBOatrpoyrO8D5b8plYtV/PDbDW3mtDJSWFHet29G61ZmlNElyXRqQCXn9WX+A==} + engines: {node: '>=18.0.0'} + peerDependencies: + '@aws-sdk/client-sts': ^3.726.0 + + '@aws-sdk/credential-provider-node@3.726.0': + resolution: {integrity: sha512-jjsewBcw/uLi24x8JbnuDjJad4VA9ROCE94uVRbEnGmUEsds75FWOKp3fWZLQlmjLtzsIbJOZLALkZP86liPaw==} + engines: {node: '>=18.0.0'} + + '@aws-sdk/credential-provider-process@3.723.0': + resolution: {integrity: sha512-fgupvUjz1+jeoCBA7GMv0L6xEk92IN6VdF4YcFhsgRHlHvNgm7ayaoKQg7pz2JAAhG/3jPX6fp0ASNy+xOhmPA==} + engines: {node: '>=18.0.0'} + + '@aws-sdk/credential-provider-sso@3.726.0': + resolution: {integrity: sha512-WxkN76WeB08j2yw7jUH9yCMPxmT9eBFd9ZA/aACG7yzOIlsz7gvG3P2FQ0tVg25GHM0E4PdU3p/ByTOawzcOAg==} + engines: {node: '>=18.0.0'} + + '@aws-sdk/credential-provider-web-identity@3.723.0': + resolution: {integrity: sha512-tl7pojbFbr3qLcOE6xWaNCf1zEfZrIdSJtOPeSXfV/thFMMAvIjgf3YN6Zo1a6cxGee8zrV/C8PgOH33n+Ev/A==} + engines: {node: '>=18.0.0'} + peerDependencies: + '@aws-sdk/client-sts': ^3.723.0 + + '@aws-sdk/middleware-host-header@3.723.0': + resolution: {integrity: sha512-LLVzLvk299pd7v4jN9yOSaWDZDfH0SnBPb6q+FDPaOCMGBY8kuwQso7e/ozIKSmZHRMGO3IZrflasHM+rI+2YQ==} + engines: {node: '>=18.0.0'} + + '@aws-sdk/middleware-logger@3.723.0': + resolution: {integrity: sha512-chASQfDG5NJ8s5smydOEnNK7N0gDMyuPbx7dYYcm1t/PKtnVfvWF+DHCTrRC2Ej76gLJVCVizlAJKM8v8Kg3cg==} + engines: {node: '>=18.0.0'} + + '@aws-sdk/middleware-recursion-detection@3.723.0': + resolution: {integrity: sha512-7usZMtoynT9/jxL/rkuDOFQ0C2mhXl4yCm67Rg7GNTstl67u7w5WN1aIRImMeztaKlw8ExjoTyo6WTs1Kceh7A==} + engines: {node: '>=18.0.0'} + + '@aws-sdk/middleware-user-agent@3.726.0': + resolution: {integrity: sha512-hZvzuE5S0JmFie1r68K2wQvJbzyxJFdzltj9skgnnwdvLe8F/tz7MqLkm28uV0m4jeHk0LpiBo6eZaPkQiwsZQ==} + engines: {node: '>=18.0.0'} + + '@aws-sdk/region-config-resolver@3.723.0': + resolution: {integrity: sha512-tGF/Cvch3uQjZIj34LY2mg8M2Dr4kYG8VU8Yd0dFnB1ybOEOveIK/9ypUo9ycZpB9oO6q01KRe5ijBaxNueUQg==} + engines: {node: '>=18.0.0'} + + '@aws-sdk/token-providers@3.723.0': + resolution: {integrity: sha512-hniWi1x4JHVwKElANh9afKIMUhAutHVBRD8zo6usr0PAoj+Waf220+1ULS74GXtLXAPCiNXl5Og+PHA7xT8ElQ==} + engines: {node: '>=18.0.0'} + peerDependencies: + '@aws-sdk/client-sso-oidc': ^3.723.0 + + '@aws-sdk/types@3.723.0': + resolution: {integrity: sha512-LmK3kwiMZG1y5g3LGihT9mNkeNOmwEyPk6HGcJqh0wOSV4QpWoKu2epyKE4MLQNUUlz2kOVbVbOrwmI6ZcteuA==} + engines: {node: '>=18.0.0'} + + '@aws-sdk/util-endpoints@3.726.0': + resolution: {integrity: sha512-sLd30ASsPMoPn3XBK50oe/bkpJ4N8Bpb7SbhoxcY3Lk+fSASaWxbbXE81nbvCnkxrZCvkPOiDHzJCp1E2im71A==} + engines: {node: '>=18.0.0'} + + '@aws-sdk/util-locate-window@3.723.0': + resolution: {integrity: sha512-Yf2CS10BqK688DRsrKI/EO6B8ff5J86NXe4C+VCysK7UOgN0l1zOTeTukZ3H8Q9tYYX3oaF1961o8vRkFm7Nmw==} + engines: {node: '>=18.0.0'} + + '@aws-sdk/util-user-agent-browser@3.723.0': + resolution: {integrity: sha512-Wh9I6j2jLhNFq6fmXydIpqD1WyQLyTfSxjW9B+PXSnPyk3jtQW8AKQur7p97rO8LAUzVI0bv8kb3ZzDEVbquIg==} + + '@aws-sdk/util-user-agent-node@3.726.0': + resolution: {integrity: sha512-iEj6KX9o6IQf23oziorveRqyzyclWai95oZHDJtYav3fvLJKStwSjygO4xSF7ycHcTYeCHSLO1FFOHgGVs4Viw==} + engines: {node: '>=18.0.0'} + peerDependencies: + aws-crt: '>=1.0.0' + peerDependenciesMeta: + aws-crt: + optional: true + '@babel/code-frame@7.26.2': resolution: {integrity: sha512-RJlIHRueQgwWitWgF8OdFYGZX328Ax5BCemNGlqHfplnRT9ESi8JkFlvaVYbS+UubVY6dpv87Fs2u5M29iNFVQ==} engines: {node: '>=6.9.0'} @@ -4778,6 +4917,194 @@ packages: '@slorber/remark-comment@1.0.0': resolution: {integrity: sha512-RCE24n7jsOj1M0UPvIQCHTe7fI0sFL4S2nwKVWwHyVr/wI/H8GosgsJGyhnsZoGFnD/P2hLf1mSbrrgSLN93NA==} + '@smithy/abort-controller@4.0.1': + resolution: {integrity: sha512-fiUIYgIgRjMWznk6iLJz35K2YxSLHzLBA/RC6lBrKfQ8fHbPfvk7Pk9UvpKoHgJjI18MnbPuEju53zcVy6KF1g==} + engines: {node: '>=18.0.0'} + + '@smithy/config-resolver@4.0.1': + resolution: {integrity: sha512-Igfg8lKu3dRVkTSEm98QpZUvKEOa71jDX4vKRcvJVyRc3UgN3j7vFMf0s7xLQhYmKa8kyJGQgUJDOV5V3neVlQ==} + engines: {node: '>=18.0.0'} + + '@smithy/core@3.1.0': + resolution: {integrity: sha512-swFv0wQiK7TGHeuAp6lfF5Kw1dHWsTrCuc+yh4Kh05gEShjsE2RUxHucEerR9ih9JITNtaHcSpUThn5Y/vDw0A==} + engines: {node: '>=18.0.0'} + + '@smithy/credential-provider-imds@4.0.1': + resolution: {integrity: sha512-l/qdInaDq1Zpznpmev/+52QomsJNZ3JkTl5yrTl02V6NBgJOQ4LY0SFw/8zsMwj3tLe8vqiIuwF6nxaEwgf6mg==} + engines: {node: '>=18.0.0'} + + '@smithy/eventstream-codec@4.0.1': + resolution: {integrity: sha512-Q2bCAAR6zXNVtJgifsU16ZjKGqdw/DyecKNgIgi7dlqw04fqDu0mnq+JmGphqheypVc64CYq3azSuCpAdFk2+A==} + engines: {node: '>=18.0.0'} + + '@smithy/eventstream-serde-browser@4.0.1': + resolution: {integrity: sha512-HbIybmz5rhNg+zxKiyVAnvdM3vkzjE6ccrJ620iPL8IXcJEntd3hnBl+ktMwIy12Te/kyrSbUb8UCdnUT4QEdA==} + engines: {node: '>=18.0.0'} + + '@smithy/eventstream-serde-config-resolver@4.0.1': + resolution: {integrity: sha512-lSipaiq3rmHguHa3QFF4YcCM3VJOrY9oq2sow3qlhFY+nBSTF/nrO82MUQRPrxHQXA58J5G1UnU2WuJfi465BA==} + engines: {node: '>=18.0.0'} + + '@smithy/eventstream-serde-node@4.0.1': + resolution: {integrity: sha512-o4CoOI6oYGYJ4zXo34U8X9szDe3oGjmHgsMGiZM0j4vtNoT+h80TLnkUcrLZR3+E6HIxqW+G+9WHAVfl0GXK0Q==} + engines: {node: '>=18.0.0'} + + '@smithy/eventstream-serde-universal@4.0.1': + resolution: {integrity: sha512-Z94uZp0tGJuxds3iEAZBqGU2QiaBHP4YytLUjwZWx+oUeohCsLyUm33yp4MMBmhkuPqSbQCXq5hDet6JGUgHWA==} + engines: {node: '>=18.0.0'} + + '@smithy/fetch-http-handler@5.0.1': + resolution: {integrity: sha512-3aS+fP28urrMW2KTjb6z9iFow6jO8n3MFfineGbndvzGZit3taZhKWtTorf+Gp5RpFDDafeHlhfsGlDCXvUnJA==} + engines: {node: '>=18.0.0'} + + '@smithy/hash-node@4.0.1': + resolution: {integrity: sha512-TJ6oZS+3r2Xu4emVse1YPB3Dq3d8RkZDKcPr71Nj/lJsdAP1c7oFzYqEn1IBc915TsgLl2xIJNuxCz+gLbLE0w==} + engines: {node: '>=18.0.0'} + + '@smithy/invalid-dependency@4.0.1': + resolution: {integrity: sha512-gdudFPf4QRQ5pzj7HEnu6FhKRi61BfH/Gk5Yf6O0KiSbr1LlVhgjThcvjdu658VE6Nve8vaIWB8/fodmS1rBPQ==} + engines: {node: '>=18.0.0'} + + '@smithy/is-array-buffer@2.2.0': + resolution: {integrity: sha512-GGP3O9QFD24uGeAXYUjwSTXARoqpZykHadOmA8G5vfJPK0/DC67qa//0qvqrJzL1xc8WQWX7/yc7fwudjPHPhA==} + engines: {node: '>=14.0.0'} + + '@smithy/is-array-buffer@4.0.0': + resolution: {integrity: sha512-saYhF8ZZNoJDTvJBEWgeBccCg+yvp1CX+ed12yORU3NilJScfc6gfch2oVb4QgxZrGUx3/ZJlb+c/dJbyupxlw==} + engines: {node: '>=18.0.0'} + + '@smithy/middleware-content-length@4.0.1': + resolution: {integrity: sha512-OGXo7w5EkB5pPiac7KNzVtfCW2vKBTZNuCctn++TTSOMpe6RZO/n6WEC1AxJINn3+vWLKW49uad3lo/u0WJ9oQ==} + engines: {node: '>=18.0.0'} + + '@smithy/middleware-endpoint@4.0.1': + resolution: {integrity: sha512-hCCOPu9+sRI7Wj0rZKKnGylKXBEd9cQJetzjQqe8cT4PWvtQAbvNVa6cgAONiZg9m8LaXtP9/waxm3C3eO4hiw==} + engines: {node: '>=18.0.0'} + + '@smithy/middleware-retry@4.0.1': + resolution: {integrity: sha512-n3g2zZFgOWaz2ZYCy8+4wxSmq+HSTD8QKkRhFDv+nkxY1o7gzyp4PDz/+tOdcNPMPZ/A6Mt4aVECYNjQNiaHJw==} + engines: {node: '>=18.0.0'} + + '@smithy/middleware-serde@4.0.1': + resolution: {integrity: sha512-Fh0E2SOF+S+P1+CsgKyiBInAt3o2b6Qk7YOp2W0Qx2XnfTdfMuSDKUEcnrtpxCzgKJnqXeLUZYqtThaP0VGqtA==} + engines: {node: '>=18.0.0'} + + '@smithy/middleware-stack@4.0.1': + resolution: {integrity: sha512-dHwDmrtR/ln8UTHpaIavRSzeIk5+YZTBtLnKwDW3G2t6nAupCiQUvNzNoHBpik63fwUaJPtlnMzXbQrNFWssIA==} + engines: {node: '>=18.0.0'} + + '@smithy/node-config-provider@4.0.1': + resolution: {integrity: sha512-8mRTjvCtVET8+rxvmzRNRR0hH2JjV0DFOmwXPrISmTIJEfnCBugpYYGAsCj8t41qd+RB5gbheSQ/6aKZCQvFLQ==} + engines: {node: '>=18.0.0'} + + '@smithy/node-http-handler@4.0.1': + resolution: {integrity: sha512-ddQc7tvXiVLC5c3QKraGWde761KSk+mboCheZoWtuqnXh5l0WKyFy3NfDIM/dsKrI9HlLVH/21pi9wWK2gUFFA==} + engines: {node: '>=18.0.0'} + + '@smithy/property-provider@4.0.1': + resolution: {integrity: sha512-o+VRiwC2cgmk/WFV0jaETGOtX16VNPp2bSQEzu0whbReqE1BMqsP2ami2Vi3cbGVdKu1kq9gQkDAGKbt0WOHAQ==} + engines: {node: '>=18.0.0'} + + '@smithy/protocol-http@5.0.1': + resolution: {integrity: sha512-TE4cpj49jJNB/oHyh/cRVEgNZaoPaxd4vteJNB0yGidOCVR0jCw/hjPVsT8Q8FRmj8Bd3bFZt8Dh7xGCT+xMBQ==} + engines: {node: '>=18.0.0'} + + '@smithy/querystring-builder@4.0.1': + resolution: {integrity: sha512-wU87iWZoCbcqrwszsOewEIuq+SU2mSoBE2CcsLwE0I19m0B2gOJr1MVjxWcDQYOzHbR1xCk7AcOBbGFUYOKvdg==} + engines: {node: '>=18.0.0'} + + '@smithy/querystring-parser@4.0.1': + resolution: {integrity: sha512-Ma2XC7VS9aV77+clSFylVUnPZRindhB7BbmYiNOdr+CHt/kZNJoPP0cd3QxCnCFyPXC4eybmyE98phEHkqZ5Jw==} + engines: {node: '>=18.0.0'} + + '@smithy/service-error-classification@4.0.1': + resolution: {integrity: sha512-3JNjBfOWpj/mYfjXJHB4Txc/7E4LVq32bwzE7m28GN79+M1f76XHflUaSUkhOriprPDzev9cX/M+dEB80DNDKA==} + engines: {node: '>=18.0.0'} + + '@smithy/shared-ini-file-loader@4.0.1': + resolution: {integrity: sha512-hC8F6qTBbuHRI/uqDgqqi6J0R4GtEZcgrZPhFQnMhfJs3MnUTGSnR1NSJCJs5VWlMydu0kJz15M640fJlRsIOw==} + engines: {node: '>=18.0.0'} + + '@smithy/signature-v4@5.0.1': + resolution: {integrity: sha512-nCe6fQ+ppm1bQuw5iKoeJ0MJfz2os7Ic3GBjOkLOPtavbD1ONoyE3ygjBfz2ythFWm4YnRm6OxW+8p/m9uCoIA==} + engines: {node: '>=18.0.0'} + + '@smithy/smithy-client@4.1.0': + resolution: {integrity: sha512-NiboZnrsrZY+Cy5hQNbYi+nVNssXVi2I+yL4CIKNIanOhH8kpC5PKQ2jx/MQpwVr21a3XcVoQBArlpRF36OeEQ==} + engines: {node: '>=18.0.0'} + + '@smithy/types@4.1.0': + resolution: {integrity: sha512-enhjdwp4D7CXmwLtD6zbcDMbo6/T6WtuuKCY49Xxc6OMOmUWlBEBDREsxxgV2LIdeQPW756+f97GzcgAwp3iLw==} + engines: {node: '>=18.0.0'} + + '@smithy/url-parser@4.0.1': + resolution: {integrity: sha512-gPXcIEUtw7VlK8f/QcruNXm7q+T5hhvGu9tl63LsJPZ27exB6dtNwvh2HIi0v7JcXJ5emBxB+CJxwaLEdJfA+g==} + engines: {node: '>=18.0.0'} + + '@smithy/util-base64@4.0.0': + resolution: {integrity: sha512-CvHfCmO2mchox9kjrtzoHkWHxjHZzaFojLc8quxXY7WAAMAg43nuxwv95tATVgQFNDwd4M9S1qFzj40Ul41Kmg==} + engines: {node: '>=18.0.0'} + + '@smithy/util-body-length-browser@4.0.0': + resolution: {integrity: sha512-sNi3DL0/k64/LO3A256M+m3CDdG6V7WKWHdAiBBMUN8S3hK3aMPhwnPik2A/a2ONN+9doY9UxaLfgqsIRg69QA==} + engines: {node: '>=18.0.0'} + + '@smithy/util-body-length-node@4.0.0': + resolution: {integrity: sha512-q0iDP3VsZzqJyje8xJWEJCNIu3lktUGVoSy1KB0UWym2CL1siV3artm+u1DFYTLejpsrdGyCSWBdGNjJzfDPjg==} + engines: {node: '>=18.0.0'} + + '@smithy/util-buffer-from@2.2.0': + resolution: {integrity: sha512-IJdWBbTcMQ6DA0gdNhh/BwrLkDR+ADW5Kr1aZmd4k3DIF6ezMV4R2NIAmT08wQJ3yUK82thHWmC/TnK/wpMMIA==} + engines: {node: '>=14.0.0'} + + '@smithy/util-buffer-from@4.0.0': + resolution: {integrity: sha512-9TOQ7781sZvddgO8nxueKi3+yGvkY35kotA0Y6BWRajAv8jjmigQ1sBwz0UX47pQMYXJPahSKEKYFgt+rXdcug==} + engines: {node: '>=18.0.0'} + + '@smithy/util-config-provider@4.0.0': + resolution: {integrity: sha512-L1RBVzLyfE8OXH+1hsJ8p+acNUSirQnWQ6/EgpchV88G6zGBTDPdXiiExei6Z1wR2RxYvxY/XLw6AMNCCt8H3w==} + engines: {node: '>=18.0.0'} + + '@smithy/util-defaults-mode-browser@4.0.1': + resolution: {integrity: sha512-nkQifWzWUHw/D0aLPgyKut+QnJ5X+5E8wBvGfvrYLLZ86xPfVO6MoqfQo/9s4bF3Xscefua1M6KLZtobHMWrBg==} + engines: {node: '>=18.0.0'} + + '@smithy/util-defaults-mode-node@4.0.1': + resolution: {integrity: sha512-LeAx2faB83litC9vaOdwFaldtto2gczUHxfFf8yoRwDU3cwL4/pDm7i0hxsuBCRk5mzHsrVGw+3EVCj32UZMdw==} + engines: {node: '>=18.0.0'} + + '@smithy/util-endpoints@3.0.1': + resolution: {integrity: sha512-zVdUENQpdtn9jbpD9SCFK4+aSiavRb9BxEtw9ZGUR1TYo6bBHbIoi7VkrFQ0/RwZlzx0wRBaRmPclj8iAoJCLA==} + engines: {node: '>=18.0.0'} + + '@smithy/util-hex-encoding@4.0.0': + resolution: {integrity: sha512-Yk5mLhHtfIgW2W2WQZWSg5kuMZCVbvhFmC7rV4IO2QqnZdbEFPmQnCcGMAX2z/8Qj3B9hYYNjZOhWym+RwhePw==} + engines: {node: '>=18.0.0'} + + '@smithy/util-middleware@4.0.1': + resolution: {integrity: sha512-HiLAvlcqhbzhuiOa0Lyct5IIlyIz0PQO5dnMlmQ/ubYM46dPInB+3yQGkfxsk6Q24Y0n3/JmcA1v5iEhmOF5mA==} + engines: {node: '>=18.0.0'} + + '@smithy/util-retry@4.0.1': + resolution: {integrity: sha512-WmRHqNVwn3kI3rKk1LsKcVgPBG6iLTBGC1iYOV3GQegwJ3E8yjzHytPt26VNzOWr1qu0xE03nK0Ug8S7T7oufw==} + engines: {node: '>=18.0.0'} + + '@smithy/util-stream@4.0.1': + resolution: {integrity: sha512-Js16gOgU6Qht6qTPfuJgb+1YD4AEO+5Y1UPGWKSp3BNo8ONl/qhXSYDhFKJtwybRJynlCqvP5IeiaBsUmkSPTQ==} + engines: {node: '>=18.0.0'} + + '@smithy/util-uri-escape@4.0.0': + resolution: {integrity: sha512-77yfbCbQMtgtTylO9itEAdpPXSog3ZxMe09AEhm0dU0NLTalV70ghDZFR+Nfi1C60jnJoh/Re4090/DuZh2Omg==} + engines: {node: '>=18.0.0'} + + '@smithy/util-utf8@2.3.0': + resolution: {integrity: sha512-R8Rdn8Hy72KKcebgLiv8jQcQkXoLMOGGv5uI1/k0l+snqkOzQ1R0ChUBCxWMlBsFMekWjq0wRudIweFs7sKT5A==} + engines: {node: '>=14.0.0'} + + '@smithy/util-utf8@4.0.0': + resolution: {integrity: sha512-b+zebfKCfRdgNJDknHCob3O7FpeYQN6ZG6YLExMcasDHsCXlsXCEuiPZeLnJLpwa5dvPetGlnGCiMHuLwGvFow==} + engines: {node: '>=18.0.0'} + '@solana/buffer-layout-utils@0.2.0': resolution: {integrity: sha512-szG4sxgJGktbuZYDg2FfNmkMi0DYQoVjN2h7ta1W1hPrwzarcFLBq9UpX1UjNXsNpT9dn+chgprtWGioUAr4/g==} engines: {node: '>= 10'} @@ -5520,6 +5847,9 @@ packages: '@types/uuid@8.3.4': resolution: {integrity: sha512-c/I8ZRb51j+pYGAu5CrFMRxqZ2ke4y2grEBO5AUjgSkSk+qT2Ea+OdWElz/OiMf5MNpn2b17kuVBwZLQJXzihw==} + '@types/uuid@9.0.8': + resolution: {integrity: sha512-jg+97EGIcY9AGHJJRaaPVgetKDsrTgbRjQ5Msgjh/DQKEFl0DtyRr/VCOyD1T2R1MNeWPK/u7JoGhlDZnKBAfA==} + '@types/wav-encoder@1.3.3': resolution: {integrity: sha512-2haw8zEMg4DspJRXmxUn2TElrQUs0bLPDh6x4N7/hDn+3tx2G05Lc+kC55uoHYsv8q+4deWhnDtHZT/ximg9aw==} @@ -6491,6 +6821,9 @@ packages: borsh@2.0.0: resolution: {integrity: sha512-kc9+BgR3zz9+cjbwM8ODoUB4fs3X3I5A/HtX7LZKxCLaMrEeDFoBpnhZY//DTS1VZBSs6S5v46RZRbZjRFspEg==} + bowser@2.11.0: + resolution: {integrity: sha512-AlcaJBi/pqqJBIQ8U9Mcpc9i8Aqxn88Skv5d+xBX006BY5u8N3mGLHa5Lgppa7L/HfwgwLgZ6NYs+Ag6uUmJRA==} + boxen@6.2.1: resolution: {integrity: sha512-H4PEsJXfFI/Pt8sjDWbHlQPx4zL/bvSQjcilJmaulGt5mLDorHOHpmdXAJcBcmru7PhYSp/cDMWRko4ZUMFkSw==} engines: {node: ^12.20.0 || ^14.13.1 || >=16.0.0} @@ -8327,6 +8660,10 @@ packages: fast-uri@3.0.5: resolution: {integrity: sha512-5JnBCWpFlMo0a3ciDy/JckMzzv1U9coZrIhedq+HXxxUfDTAiS0LA8OKVao4G9BxmCVck/jtA5r3KAtRWEyD8Q==} + fast-xml-parser@4.4.1: + resolution: {integrity: sha512-xkjOecfnKGkSsOwtZ5Pz7Us/T6mrbPQrq0nh+aCO5V9nk5NLWmasAHumTKjiPJPWANe+kAZ84Jc8ooJkzZ88Sw==} + hasBin: true + fastembed@1.14.1: resolution: {integrity: sha512-Y14v+FWZwjNUpQ7mRGYu4N5yF+hZkF7zqzPWzzLbwdIEtYsHy0DSpiVJ+Fg6Oi1fQjrBKASQt0hdSMSjw1/Wtw==} @@ -13241,6 +13578,9 @@ packages: strip-literal@1.3.0: resolution: {integrity: sha512-PugKzOsyXpArk0yWmUwqOZecSO0GH0bPoctLcqNDH9J04pVW3lflYE0ujElBGTloevcxF5MofAOZ7C5l2b+wLg==} + strnum@1.0.5: + resolution: {integrity: sha512-J8bbNyKKXl5qYcR36TIO8W3mVGVHrmmxsd5PAItGkmyzwJvybiw2IVq5nqd0i4LSNSkB/sx9VHllbfFdr9k1JA==} + strong-log-transformer@2.1.0: resolution: {integrity: sha512-B3Hgul+z0L9a236FAUC9iZsL+nVHgoCJnqCbN588DjYxvGXaXaaFbfmQ/JhvKjZwsOukuR72XbHv71Qkug0HxA==} engines: {node: '>=4'} @@ -14806,6 +15146,15 @@ snapshots: '@adraffy/ens-normalize@1.11.0': {} + '@ai-sdk/amazon-bedrock@1.0.8(zod@3.23.8)': + dependencies: + '@ai-sdk/provider': 1.0.4 + '@ai-sdk/provider-utils': 2.0.7(zod@3.23.8) + '@aws-sdk/client-bedrock-runtime': 3.726.0 + zod: 3.23.8 + transitivePeerDependencies: + - aws-crt + '@ai-sdk/anthropic@0.0.56(zod@3.23.8)': dependencies: '@ai-sdk/provider': 0.0.26 @@ -14888,6 +15237,15 @@ snapshots: optionalDependencies: zod: 3.24.1 + '@ai-sdk/provider-utils@2.0.7(zod@3.23.8)': + dependencies: + '@ai-sdk/provider': 1.0.4 + eventsource-parser: 3.0.0 + nanoid: 3.3.8 + secure-json-parse: 2.7.0 + optionalDependencies: + zod: 3.23.8 + '@ai-sdk/provider@0.0.24': dependencies: json-schema: 0.4.0 @@ -15220,6 +15578,407 @@ snapshots: lru-cache: 11.0.2 optional: true + '@aws-crypto/crc32@5.2.0': + dependencies: + '@aws-crypto/util': 5.2.0 + '@aws-sdk/types': 3.723.0 + tslib: 2.8.1 + + '@aws-crypto/sha256-browser@5.2.0': + dependencies: + '@aws-crypto/sha256-js': 5.2.0 + '@aws-crypto/supports-web-crypto': 5.2.0 + '@aws-crypto/util': 5.2.0 + '@aws-sdk/types': 3.723.0 + '@aws-sdk/util-locate-window': 3.723.0 + '@smithy/util-utf8': 2.3.0 + tslib: 2.8.1 + + '@aws-crypto/sha256-js@5.2.0': + dependencies: + '@aws-crypto/util': 5.2.0 + '@aws-sdk/types': 3.723.0 + tslib: 2.8.1 + + '@aws-crypto/supports-web-crypto@5.2.0': + dependencies: + tslib: 2.8.1 + + '@aws-crypto/util@5.2.0': + dependencies: + '@aws-sdk/types': 3.723.0 + '@smithy/util-utf8': 2.3.0 + tslib: 2.8.1 + + '@aws-sdk/client-bedrock-runtime@3.726.0': + dependencies: + '@aws-crypto/sha256-browser': 5.2.0 + '@aws-crypto/sha256-js': 5.2.0 + '@aws-sdk/client-sso-oidc': 3.726.0(@aws-sdk/client-sts@3.726.0) + '@aws-sdk/client-sts': 3.726.0 + '@aws-sdk/core': 3.723.0 + '@aws-sdk/credential-provider-node': 3.726.0(@aws-sdk/client-sso-oidc@3.726.0(@aws-sdk/client-sts@3.726.0))(@aws-sdk/client-sts@3.726.0) + '@aws-sdk/middleware-host-header': 3.723.0 + '@aws-sdk/middleware-logger': 3.723.0 + '@aws-sdk/middleware-recursion-detection': 3.723.0 + '@aws-sdk/middleware-user-agent': 3.726.0 + '@aws-sdk/region-config-resolver': 3.723.0 + '@aws-sdk/types': 3.723.0 + '@aws-sdk/util-endpoints': 3.726.0 + '@aws-sdk/util-user-agent-browser': 3.723.0 + '@aws-sdk/util-user-agent-node': 3.726.0 + '@smithy/config-resolver': 4.0.1 + '@smithy/core': 3.1.0 + '@smithy/eventstream-serde-browser': 4.0.1 + '@smithy/eventstream-serde-config-resolver': 4.0.1 + '@smithy/eventstream-serde-node': 4.0.1 + '@smithy/fetch-http-handler': 5.0.1 + '@smithy/hash-node': 4.0.1 + '@smithy/invalid-dependency': 4.0.1 + '@smithy/middleware-content-length': 4.0.1 + '@smithy/middleware-endpoint': 4.0.1 + '@smithy/middleware-retry': 4.0.1 + '@smithy/middleware-serde': 4.0.1 + '@smithy/middleware-stack': 4.0.1 + '@smithy/node-config-provider': 4.0.1 + '@smithy/node-http-handler': 4.0.1 + '@smithy/protocol-http': 5.0.1 + '@smithy/smithy-client': 4.1.0 + '@smithy/types': 4.1.0 + '@smithy/url-parser': 4.0.1 + '@smithy/util-base64': 4.0.0 + '@smithy/util-body-length-browser': 4.0.0 + '@smithy/util-body-length-node': 4.0.0 + '@smithy/util-defaults-mode-browser': 4.0.1 + '@smithy/util-defaults-mode-node': 4.0.1 + '@smithy/util-endpoints': 3.0.1 + '@smithy/util-middleware': 4.0.1 + '@smithy/util-retry': 4.0.1 + '@smithy/util-stream': 4.0.1 + '@smithy/util-utf8': 4.0.0 + '@types/uuid': 9.0.8 + tslib: 2.8.1 + uuid: 9.0.1 + transitivePeerDependencies: + - aws-crt + + '@aws-sdk/client-sso-oidc@3.726.0(@aws-sdk/client-sts@3.726.0)': + dependencies: + '@aws-crypto/sha256-browser': 5.2.0 + '@aws-crypto/sha256-js': 5.2.0 + '@aws-sdk/client-sts': 3.726.0 + '@aws-sdk/core': 3.723.0 + '@aws-sdk/credential-provider-node': 3.726.0(@aws-sdk/client-sso-oidc@3.726.0(@aws-sdk/client-sts@3.726.0))(@aws-sdk/client-sts@3.726.0) + '@aws-sdk/middleware-host-header': 3.723.0 + '@aws-sdk/middleware-logger': 3.723.0 + '@aws-sdk/middleware-recursion-detection': 3.723.0 + '@aws-sdk/middleware-user-agent': 3.726.0 + '@aws-sdk/region-config-resolver': 3.723.0 + '@aws-sdk/types': 3.723.0 + '@aws-sdk/util-endpoints': 3.726.0 + '@aws-sdk/util-user-agent-browser': 3.723.0 + '@aws-sdk/util-user-agent-node': 3.726.0 + '@smithy/config-resolver': 4.0.1 + '@smithy/core': 3.1.0 + '@smithy/fetch-http-handler': 5.0.1 + '@smithy/hash-node': 4.0.1 + '@smithy/invalid-dependency': 4.0.1 + '@smithy/middleware-content-length': 4.0.1 + '@smithy/middleware-endpoint': 4.0.1 + '@smithy/middleware-retry': 4.0.1 + '@smithy/middleware-serde': 4.0.1 + '@smithy/middleware-stack': 4.0.1 + '@smithy/node-config-provider': 4.0.1 + '@smithy/node-http-handler': 4.0.1 + '@smithy/protocol-http': 5.0.1 + '@smithy/smithy-client': 4.1.0 + '@smithy/types': 4.1.0 + '@smithy/url-parser': 4.0.1 + '@smithy/util-base64': 4.0.0 + '@smithy/util-body-length-browser': 4.0.0 + '@smithy/util-body-length-node': 4.0.0 + '@smithy/util-defaults-mode-browser': 4.0.1 + '@smithy/util-defaults-mode-node': 4.0.1 + '@smithy/util-endpoints': 3.0.1 + '@smithy/util-middleware': 4.0.1 + '@smithy/util-retry': 4.0.1 + '@smithy/util-utf8': 4.0.0 + tslib: 2.8.1 + transitivePeerDependencies: + - aws-crt + + '@aws-sdk/client-sso@3.726.0': + dependencies: + '@aws-crypto/sha256-browser': 5.2.0 + '@aws-crypto/sha256-js': 5.2.0 + '@aws-sdk/core': 3.723.0 + '@aws-sdk/middleware-host-header': 3.723.0 + '@aws-sdk/middleware-logger': 3.723.0 + '@aws-sdk/middleware-recursion-detection': 3.723.0 + '@aws-sdk/middleware-user-agent': 3.726.0 + '@aws-sdk/region-config-resolver': 3.723.0 + '@aws-sdk/types': 3.723.0 + '@aws-sdk/util-endpoints': 3.726.0 + '@aws-sdk/util-user-agent-browser': 3.723.0 + '@aws-sdk/util-user-agent-node': 3.726.0 + '@smithy/config-resolver': 4.0.1 + '@smithy/core': 3.1.0 + '@smithy/fetch-http-handler': 5.0.1 + '@smithy/hash-node': 4.0.1 + '@smithy/invalid-dependency': 4.0.1 + '@smithy/middleware-content-length': 4.0.1 + '@smithy/middleware-endpoint': 4.0.1 + '@smithy/middleware-retry': 4.0.1 + '@smithy/middleware-serde': 4.0.1 + '@smithy/middleware-stack': 4.0.1 + '@smithy/node-config-provider': 4.0.1 + '@smithy/node-http-handler': 4.0.1 + '@smithy/protocol-http': 5.0.1 + '@smithy/smithy-client': 4.1.0 + '@smithy/types': 4.1.0 + '@smithy/url-parser': 4.0.1 + '@smithy/util-base64': 4.0.0 + '@smithy/util-body-length-browser': 4.0.0 + '@smithy/util-body-length-node': 4.0.0 + '@smithy/util-defaults-mode-browser': 4.0.1 + '@smithy/util-defaults-mode-node': 4.0.1 + '@smithy/util-endpoints': 3.0.1 + '@smithy/util-middleware': 4.0.1 + '@smithy/util-retry': 4.0.1 + '@smithy/util-utf8': 4.0.0 + tslib: 2.8.1 + transitivePeerDependencies: + - aws-crt + + '@aws-sdk/client-sts@3.726.0': + dependencies: + '@aws-crypto/sha256-browser': 5.2.0 + '@aws-crypto/sha256-js': 5.2.0 + '@aws-sdk/client-sso-oidc': 3.726.0(@aws-sdk/client-sts@3.726.0) + '@aws-sdk/core': 3.723.0 + '@aws-sdk/credential-provider-node': 3.726.0(@aws-sdk/client-sso-oidc@3.726.0(@aws-sdk/client-sts@3.726.0))(@aws-sdk/client-sts@3.726.0) + '@aws-sdk/middleware-host-header': 3.723.0 + '@aws-sdk/middleware-logger': 3.723.0 + '@aws-sdk/middleware-recursion-detection': 3.723.0 + '@aws-sdk/middleware-user-agent': 3.726.0 + '@aws-sdk/region-config-resolver': 3.723.0 + '@aws-sdk/types': 3.723.0 + '@aws-sdk/util-endpoints': 3.726.0 + '@aws-sdk/util-user-agent-browser': 3.723.0 + '@aws-sdk/util-user-agent-node': 3.726.0 + '@smithy/config-resolver': 4.0.1 + '@smithy/core': 3.1.0 + '@smithy/fetch-http-handler': 5.0.1 + '@smithy/hash-node': 4.0.1 + '@smithy/invalid-dependency': 4.0.1 + '@smithy/middleware-content-length': 4.0.1 + '@smithy/middleware-endpoint': 4.0.1 + '@smithy/middleware-retry': 4.0.1 + '@smithy/middleware-serde': 4.0.1 + '@smithy/middleware-stack': 4.0.1 + '@smithy/node-config-provider': 4.0.1 + '@smithy/node-http-handler': 4.0.1 + '@smithy/protocol-http': 5.0.1 + '@smithy/smithy-client': 4.1.0 + '@smithy/types': 4.1.0 + '@smithy/url-parser': 4.0.1 + '@smithy/util-base64': 4.0.0 + '@smithy/util-body-length-browser': 4.0.0 + '@smithy/util-body-length-node': 4.0.0 + '@smithy/util-defaults-mode-browser': 4.0.1 + '@smithy/util-defaults-mode-node': 4.0.1 + '@smithy/util-endpoints': 3.0.1 + '@smithy/util-middleware': 4.0.1 + '@smithy/util-retry': 4.0.1 + '@smithy/util-utf8': 4.0.0 + tslib: 2.8.1 + transitivePeerDependencies: + - aws-crt + + '@aws-sdk/core@3.723.0': + dependencies: + '@aws-sdk/types': 3.723.0 + '@smithy/core': 3.1.0 + '@smithy/node-config-provider': 4.0.1 + '@smithy/property-provider': 4.0.1 + '@smithy/protocol-http': 5.0.1 + '@smithy/signature-v4': 5.0.1 + '@smithy/smithy-client': 4.1.0 + '@smithy/types': 4.1.0 + '@smithy/util-middleware': 4.0.1 + fast-xml-parser: 4.4.1 + tslib: 2.8.1 + + '@aws-sdk/credential-provider-env@3.723.0': + dependencies: + '@aws-sdk/core': 3.723.0 + '@aws-sdk/types': 3.723.0 + '@smithy/property-provider': 4.0.1 + '@smithy/types': 4.1.0 + tslib: 2.8.1 + + '@aws-sdk/credential-provider-http@3.723.0': + dependencies: + '@aws-sdk/core': 3.723.0 + '@aws-sdk/types': 3.723.0 + '@smithy/fetch-http-handler': 5.0.1 + '@smithy/node-http-handler': 4.0.1 + '@smithy/property-provider': 4.0.1 + '@smithy/protocol-http': 5.0.1 + '@smithy/smithy-client': 4.1.0 + '@smithy/types': 4.1.0 + '@smithy/util-stream': 4.0.1 + tslib: 2.8.1 + + '@aws-sdk/credential-provider-ini@3.726.0(@aws-sdk/client-sso-oidc@3.726.0(@aws-sdk/client-sts@3.726.0))(@aws-sdk/client-sts@3.726.0)': + dependencies: + '@aws-sdk/client-sts': 3.726.0 + '@aws-sdk/core': 3.723.0 + '@aws-sdk/credential-provider-env': 3.723.0 + '@aws-sdk/credential-provider-http': 3.723.0 + '@aws-sdk/credential-provider-process': 3.723.0 + '@aws-sdk/credential-provider-sso': 3.726.0(@aws-sdk/client-sso-oidc@3.726.0(@aws-sdk/client-sts@3.726.0)) + '@aws-sdk/credential-provider-web-identity': 3.723.0(@aws-sdk/client-sts@3.726.0) + '@aws-sdk/types': 3.723.0 + '@smithy/credential-provider-imds': 4.0.1 + '@smithy/property-provider': 4.0.1 + '@smithy/shared-ini-file-loader': 4.0.1 + '@smithy/types': 4.1.0 + tslib: 2.8.1 + transitivePeerDependencies: + - '@aws-sdk/client-sso-oidc' + - aws-crt + + '@aws-sdk/credential-provider-node@3.726.0(@aws-sdk/client-sso-oidc@3.726.0(@aws-sdk/client-sts@3.726.0))(@aws-sdk/client-sts@3.726.0)': + dependencies: + '@aws-sdk/credential-provider-env': 3.723.0 + '@aws-sdk/credential-provider-http': 3.723.0 + '@aws-sdk/credential-provider-ini': 3.726.0(@aws-sdk/client-sso-oidc@3.726.0(@aws-sdk/client-sts@3.726.0))(@aws-sdk/client-sts@3.726.0) + '@aws-sdk/credential-provider-process': 3.723.0 + '@aws-sdk/credential-provider-sso': 3.726.0(@aws-sdk/client-sso-oidc@3.726.0(@aws-sdk/client-sts@3.726.0)) + '@aws-sdk/credential-provider-web-identity': 3.723.0(@aws-sdk/client-sts@3.726.0) + '@aws-sdk/types': 3.723.0 + '@smithy/credential-provider-imds': 4.0.1 + '@smithy/property-provider': 4.0.1 + '@smithy/shared-ini-file-loader': 4.0.1 + '@smithy/types': 4.1.0 + tslib: 2.8.1 + transitivePeerDependencies: + - '@aws-sdk/client-sso-oidc' + - '@aws-sdk/client-sts' + - aws-crt + + '@aws-sdk/credential-provider-process@3.723.0': + dependencies: + '@aws-sdk/core': 3.723.0 + '@aws-sdk/types': 3.723.0 + '@smithy/property-provider': 4.0.1 + '@smithy/shared-ini-file-loader': 4.0.1 + '@smithy/types': 4.1.0 + tslib: 2.8.1 + + '@aws-sdk/credential-provider-sso@3.726.0(@aws-sdk/client-sso-oidc@3.726.0(@aws-sdk/client-sts@3.726.0))': + dependencies: + '@aws-sdk/client-sso': 3.726.0 + '@aws-sdk/core': 3.723.0 + '@aws-sdk/token-providers': 3.723.0(@aws-sdk/client-sso-oidc@3.726.0(@aws-sdk/client-sts@3.726.0)) + '@aws-sdk/types': 3.723.0 + '@smithy/property-provider': 4.0.1 + '@smithy/shared-ini-file-loader': 4.0.1 + '@smithy/types': 4.1.0 + tslib: 2.8.1 + transitivePeerDependencies: + - '@aws-sdk/client-sso-oidc' + - aws-crt + + '@aws-sdk/credential-provider-web-identity@3.723.0(@aws-sdk/client-sts@3.726.0)': + dependencies: + '@aws-sdk/client-sts': 3.726.0 + '@aws-sdk/core': 3.723.0 + '@aws-sdk/types': 3.723.0 + '@smithy/property-provider': 4.0.1 + '@smithy/types': 4.1.0 + tslib: 2.8.1 + + '@aws-sdk/middleware-host-header@3.723.0': + dependencies: + '@aws-sdk/types': 3.723.0 + '@smithy/protocol-http': 5.0.1 + '@smithy/types': 4.1.0 + tslib: 2.8.1 + + '@aws-sdk/middleware-logger@3.723.0': + dependencies: + '@aws-sdk/types': 3.723.0 + '@smithy/types': 4.1.0 + tslib: 2.8.1 + + '@aws-sdk/middleware-recursion-detection@3.723.0': + dependencies: + '@aws-sdk/types': 3.723.0 + '@smithy/protocol-http': 5.0.1 + '@smithy/types': 4.1.0 + tslib: 2.8.1 + + '@aws-sdk/middleware-user-agent@3.726.0': + dependencies: + '@aws-sdk/core': 3.723.0 + '@aws-sdk/types': 3.723.0 + '@aws-sdk/util-endpoints': 3.726.0 + '@smithy/core': 3.1.0 + '@smithy/protocol-http': 5.0.1 + '@smithy/types': 4.1.0 + tslib: 2.8.1 + + '@aws-sdk/region-config-resolver@3.723.0': + dependencies: + '@aws-sdk/types': 3.723.0 + '@smithy/node-config-provider': 4.0.1 + '@smithy/types': 4.1.0 + '@smithy/util-config-provider': 4.0.0 + '@smithy/util-middleware': 4.0.1 + tslib: 2.8.1 + + '@aws-sdk/token-providers@3.723.0(@aws-sdk/client-sso-oidc@3.726.0(@aws-sdk/client-sts@3.726.0))': + dependencies: + '@aws-sdk/client-sso-oidc': 3.726.0(@aws-sdk/client-sts@3.726.0) + '@aws-sdk/types': 3.723.0 + '@smithy/property-provider': 4.0.1 + '@smithy/shared-ini-file-loader': 4.0.1 + '@smithy/types': 4.1.0 + tslib: 2.8.1 + + '@aws-sdk/types@3.723.0': + dependencies: + '@smithy/types': 4.1.0 + tslib: 2.8.1 + + '@aws-sdk/util-endpoints@3.726.0': + dependencies: + '@aws-sdk/types': 3.723.0 + '@smithy/types': 4.1.0 + '@smithy/util-endpoints': 3.0.1 + tslib: 2.8.1 + + '@aws-sdk/util-locate-window@3.723.0': + dependencies: + tslib: 2.8.1 + + '@aws-sdk/util-user-agent-browser@3.723.0': + dependencies: + '@aws-sdk/types': 3.723.0 + '@smithy/types': 4.1.0 + bowser: 2.11.0 + tslib: 2.8.1 + + '@aws-sdk/util-user-agent-node@3.726.0': + dependencies: + '@aws-sdk/middleware-user-agent': 3.726.0 + '@aws-sdk/types': 3.723.0 + '@smithy/node-config-provider': 4.0.1 + '@smithy/types': 4.1.0 + tslib: 2.8.1 + '@babel/code-frame@7.26.2': dependencies: '@babel/helper-validator-identifier': 7.25.9 @@ -18022,41 +18781,6 @@ snapshots: jest-util: 29.7.0 slash: 3.0.0 - '@jest/core@29.7.0(babel-plugin-macros@3.1.0)': - dependencies: - '@jest/console': 29.7.0 - '@jest/reporters': 29.7.0 - '@jest/test-result': 29.7.0 - '@jest/transform': 29.7.0 - '@jest/types': 29.6.3 - '@types/node': 22.10.5 - ansi-escapes: 4.3.2 - chalk: 4.1.2 - ci-info: 3.9.0 - exit: 0.1.2 - graceful-fs: 4.2.11 - jest-changed-files: 29.7.0 - jest-config: 29.7.0(@types/node@22.10.5)(babel-plugin-macros@3.1.0)(ts-node@10.9.2(@swc/core@1.10.4(@swc/helpers@0.5.15))(@types/node@22.10.5)(typescript@5.7.3)) - jest-haste-map: 29.7.0 - jest-message-util: 29.7.0 - jest-regex-util: 29.6.3 - jest-resolve: 29.7.0 - jest-resolve-dependencies: 29.7.0 - jest-runner: 29.7.0 - jest-runtime: 29.7.0 - jest-snapshot: 29.7.0 - jest-util: 29.7.0 - jest-validate: 29.7.0 - jest-watcher: 29.7.0 - micromatch: 4.0.8 - pretty-format: 29.7.0 - slash: 3.0.0 - strip-ansi: 6.0.1 - transitivePeerDependencies: - - babel-plugin-macros - - supports-color - - ts-node - '@jest/core@29.7.0(babel-plugin-macros@3.1.0)(ts-node@10.9.2(@swc/core@1.10.4(@swc/helpers@0.5.15))(@types/node@22.10.5)(typescript@5.7.3))': dependencies: '@jest/console': 29.7.0 @@ -19990,6 +20714,303 @@ snapshots: micromark-util-character: 1.2.0 micromark-util-symbol: 1.1.0 + '@smithy/abort-controller@4.0.1': + dependencies: + '@smithy/types': 4.1.0 + tslib: 2.8.1 + + '@smithy/config-resolver@4.0.1': + dependencies: + '@smithy/node-config-provider': 4.0.1 + '@smithy/types': 4.1.0 + '@smithy/util-config-provider': 4.0.0 + '@smithy/util-middleware': 4.0.1 + tslib: 2.8.1 + + '@smithy/core@3.1.0': + dependencies: + '@smithy/middleware-serde': 4.0.1 + '@smithy/protocol-http': 5.0.1 + '@smithy/types': 4.1.0 + '@smithy/util-body-length-browser': 4.0.0 + '@smithy/util-middleware': 4.0.1 + '@smithy/util-stream': 4.0.1 + '@smithy/util-utf8': 4.0.0 + tslib: 2.8.1 + + '@smithy/credential-provider-imds@4.0.1': + dependencies: + '@smithy/node-config-provider': 4.0.1 + '@smithy/property-provider': 4.0.1 + '@smithy/types': 4.1.0 + '@smithy/url-parser': 4.0.1 + tslib: 2.8.1 + + '@smithy/eventstream-codec@4.0.1': + dependencies: + '@aws-crypto/crc32': 5.2.0 + '@smithy/types': 4.1.0 + '@smithy/util-hex-encoding': 4.0.0 + tslib: 2.8.1 + + '@smithy/eventstream-serde-browser@4.0.1': + dependencies: + '@smithy/eventstream-serde-universal': 4.0.1 + '@smithy/types': 4.1.0 + tslib: 2.8.1 + + '@smithy/eventstream-serde-config-resolver@4.0.1': + dependencies: + '@smithy/types': 4.1.0 + tslib: 2.8.1 + + '@smithy/eventstream-serde-node@4.0.1': + dependencies: + '@smithy/eventstream-serde-universal': 4.0.1 + '@smithy/types': 4.1.0 + tslib: 2.8.1 + + '@smithy/eventstream-serde-universal@4.0.1': + dependencies: + '@smithy/eventstream-codec': 4.0.1 + '@smithy/types': 4.1.0 + tslib: 2.8.1 + + '@smithy/fetch-http-handler@5.0.1': + dependencies: + '@smithy/protocol-http': 5.0.1 + '@smithy/querystring-builder': 4.0.1 + '@smithy/types': 4.1.0 + '@smithy/util-base64': 4.0.0 + tslib: 2.8.1 + + '@smithy/hash-node@4.0.1': + dependencies: + '@smithy/types': 4.1.0 + '@smithy/util-buffer-from': 4.0.0 + '@smithy/util-utf8': 4.0.0 + tslib: 2.8.1 + + '@smithy/invalid-dependency@4.0.1': + dependencies: + '@smithy/types': 4.1.0 + tslib: 2.8.1 + + '@smithy/is-array-buffer@2.2.0': + dependencies: + tslib: 2.8.1 + + '@smithy/is-array-buffer@4.0.0': + dependencies: + tslib: 2.8.1 + + '@smithy/middleware-content-length@4.0.1': + dependencies: + '@smithy/protocol-http': 5.0.1 + '@smithy/types': 4.1.0 + tslib: 2.8.1 + + '@smithy/middleware-endpoint@4.0.1': + dependencies: + '@smithy/core': 3.1.0 + '@smithy/middleware-serde': 4.0.1 + '@smithy/node-config-provider': 4.0.1 + '@smithy/shared-ini-file-loader': 4.0.1 + '@smithy/types': 4.1.0 + '@smithy/url-parser': 4.0.1 + '@smithy/util-middleware': 4.0.1 + tslib: 2.8.1 + + '@smithy/middleware-retry@4.0.1': + dependencies: + '@smithy/node-config-provider': 4.0.1 + '@smithy/protocol-http': 5.0.1 + '@smithy/service-error-classification': 4.0.1 + '@smithy/smithy-client': 4.1.0 + '@smithy/types': 4.1.0 + '@smithy/util-middleware': 4.0.1 + '@smithy/util-retry': 4.0.1 + tslib: 2.8.1 + uuid: 9.0.1 + + '@smithy/middleware-serde@4.0.1': + dependencies: + '@smithy/types': 4.1.0 + tslib: 2.8.1 + + '@smithy/middleware-stack@4.0.1': + dependencies: + '@smithy/types': 4.1.0 + tslib: 2.8.1 + + '@smithy/node-config-provider@4.0.1': + dependencies: + '@smithy/property-provider': 4.0.1 + '@smithy/shared-ini-file-loader': 4.0.1 + '@smithy/types': 4.1.0 + tslib: 2.8.1 + + '@smithy/node-http-handler@4.0.1': + dependencies: + '@smithy/abort-controller': 4.0.1 + '@smithy/protocol-http': 5.0.1 + '@smithy/querystring-builder': 4.0.1 + '@smithy/types': 4.1.0 + tslib: 2.8.1 + + '@smithy/property-provider@4.0.1': + dependencies: + '@smithy/types': 4.1.0 + tslib: 2.8.1 + + '@smithy/protocol-http@5.0.1': + dependencies: + '@smithy/types': 4.1.0 + tslib: 2.8.1 + + '@smithy/querystring-builder@4.0.1': + dependencies: + '@smithy/types': 4.1.0 + '@smithy/util-uri-escape': 4.0.0 + tslib: 2.8.1 + + '@smithy/querystring-parser@4.0.1': + dependencies: + '@smithy/types': 4.1.0 + tslib: 2.8.1 + + '@smithy/service-error-classification@4.0.1': + dependencies: + '@smithy/types': 4.1.0 + + '@smithy/shared-ini-file-loader@4.0.1': + dependencies: + '@smithy/types': 4.1.0 + tslib: 2.8.1 + + '@smithy/signature-v4@5.0.1': + dependencies: + '@smithy/is-array-buffer': 4.0.0 + '@smithy/protocol-http': 5.0.1 + '@smithy/types': 4.1.0 + '@smithy/util-hex-encoding': 4.0.0 + '@smithy/util-middleware': 4.0.1 + '@smithy/util-uri-escape': 4.0.0 + '@smithy/util-utf8': 4.0.0 + tslib: 2.8.1 + + '@smithy/smithy-client@4.1.0': + dependencies: + '@smithy/core': 3.1.0 + '@smithy/middleware-endpoint': 4.0.1 + '@smithy/middleware-stack': 4.0.1 + '@smithy/protocol-http': 5.0.1 + '@smithy/types': 4.1.0 + '@smithy/util-stream': 4.0.1 + tslib: 2.8.1 + + '@smithy/types@4.1.0': + dependencies: + tslib: 2.8.1 + + '@smithy/url-parser@4.0.1': + dependencies: + '@smithy/querystring-parser': 4.0.1 + '@smithy/types': 4.1.0 + tslib: 2.8.1 + + '@smithy/util-base64@4.0.0': + dependencies: + '@smithy/util-buffer-from': 4.0.0 + '@smithy/util-utf8': 4.0.0 + tslib: 2.8.1 + + '@smithy/util-body-length-browser@4.0.0': + dependencies: + tslib: 2.8.1 + + '@smithy/util-body-length-node@4.0.0': + dependencies: + tslib: 2.8.1 + + '@smithy/util-buffer-from@2.2.0': + dependencies: + '@smithy/is-array-buffer': 2.2.0 + tslib: 2.8.1 + + '@smithy/util-buffer-from@4.0.0': + dependencies: + '@smithy/is-array-buffer': 4.0.0 + tslib: 2.8.1 + + '@smithy/util-config-provider@4.0.0': + dependencies: + tslib: 2.8.1 + + '@smithy/util-defaults-mode-browser@4.0.1': + dependencies: + '@smithy/property-provider': 4.0.1 + '@smithy/smithy-client': 4.1.0 + '@smithy/types': 4.1.0 + bowser: 2.11.0 + tslib: 2.8.1 + + '@smithy/util-defaults-mode-node@4.0.1': + dependencies: + '@smithy/config-resolver': 4.0.1 + '@smithy/credential-provider-imds': 4.0.1 + '@smithy/node-config-provider': 4.0.1 + '@smithy/property-provider': 4.0.1 + '@smithy/smithy-client': 4.1.0 + '@smithy/types': 4.1.0 + tslib: 2.8.1 + + '@smithy/util-endpoints@3.0.1': + dependencies: + '@smithy/node-config-provider': 4.0.1 + '@smithy/types': 4.1.0 + tslib: 2.8.1 + + '@smithy/util-hex-encoding@4.0.0': + dependencies: + tslib: 2.8.1 + + '@smithy/util-middleware@4.0.1': + dependencies: + '@smithy/types': 4.1.0 + tslib: 2.8.1 + + '@smithy/util-retry@4.0.1': + dependencies: + '@smithy/service-error-classification': 4.0.1 + '@smithy/types': 4.1.0 + tslib: 2.8.1 + + '@smithy/util-stream@4.0.1': + dependencies: + '@smithy/fetch-http-handler': 5.0.1 + '@smithy/node-http-handler': 4.0.1 + '@smithy/types': 4.1.0 + '@smithy/util-base64': 4.0.0 + '@smithy/util-buffer-from': 4.0.0 + '@smithy/util-hex-encoding': 4.0.0 + '@smithy/util-utf8': 4.0.0 + tslib: 2.8.1 + + '@smithy/util-uri-escape@4.0.0': + dependencies: + tslib: 2.8.1 + + '@smithy/util-utf8@2.3.0': + dependencies: + '@smithy/util-buffer-from': 2.2.0 + tslib: 2.8.1 + + '@smithy/util-utf8@4.0.0': + dependencies: + '@smithy/util-buffer-from': 4.0.0 + tslib: 2.8.1 + '@solana/buffer-layout-utils@0.2.0(bufferutil@4.0.9)(encoding@0.1.13)(utf-8-validate@5.0.10)': dependencies: '@solana/buffer-layout': 4.0.1 @@ -21167,6 +22188,8 @@ snapshots: '@types/uuid@8.3.4': {} + '@types/uuid@9.0.8': {} + '@types/wav-encoder@1.3.3': {} '@types/ws@7.4.7': @@ -22503,6 +23526,8 @@ snapshots: borsh@2.0.0: {} + bowser@2.11.0: {} + boxen@6.2.1: dependencies: ansi-align: 3.0.1 @@ -23209,7 +24234,7 @@ snapshots: chalk: 4.1.2 exit: 0.1.2 graceful-fs: 4.2.11 - jest-config: 29.7.0(@types/node@22.10.5)(babel-plugin-macros@3.1.0)(ts-node@10.9.2(@swc/core@1.10.4(@swc/helpers@0.5.15))(@types/node@22.10.5)(typescript@5.7.3)) + jest-config: 29.7.0(@types/node@22.10.5)(babel-plugin-macros@3.1.0)(ts-node@10.9.2(@swc/core@1.10.4(@swc/helpers@0.5.15))(@types/node@22.8.4)(typescript@5.6.3)) jest-util: 29.7.0 prompts: 2.4.2 transitivePeerDependencies: @@ -24742,6 +25767,10 @@ snapshots: fast-uri@3.0.5: {} + fast-xml-parser@4.4.1: + dependencies: + strnum: 1.0.5 + fastembed@1.14.1: dependencies: '@anush008/tokenizers': 0.0.0 @@ -26305,14 +27334,14 @@ snapshots: jest-cli@29.7.0(@types/node@22.10.5)(babel-plugin-macros@3.1.0): dependencies: - '@jest/core': 29.7.0(babel-plugin-macros@3.1.0) + '@jest/core': 29.7.0(babel-plugin-macros@3.1.0)(ts-node@10.9.2(@swc/core@1.10.4(@swc/helpers@0.5.15))(@types/node@22.8.4)(typescript@5.6.3)) '@jest/test-result': 29.7.0 '@jest/types': 29.6.3 chalk: 4.1.2 create-jest: 29.7.0(@types/node@22.10.5)(babel-plugin-macros@3.1.0) exit: 0.1.2 import-local: 3.2.0 - jest-config: 29.7.0(@types/node@22.10.5)(babel-plugin-macros@3.1.0)(ts-node@10.9.2(@swc/core@1.10.4(@swc/helpers@0.5.15))(@types/node@22.10.5)(typescript@5.7.3)) + jest-config: 29.7.0(@types/node@22.10.5)(babel-plugin-macros@3.1.0)(ts-node@10.9.2(@swc/core@1.10.4(@swc/helpers@0.5.15))(@types/node@22.8.4)(typescript@5.6.3)) jest-util: 29.7.0 jest-validate: 29.7.0 yargs: 17.7.2 @@ -26676,7 +27705,7 @@ snapshots: jest@29.7.0(@types/node@22.10.5)(babel-plugin-macros@3.1.0): dependencies: - '@jest/core': 29.7.0(babel-plugin-macros@3.1.0) + '@jest/core': 29.7.0(babel-plugin-macros@3.1.0)(ts-node@10.9.2(@swc/core@1.10.4(@swc/helpers@0.5.15))(@types/node@22.8.4)(typescript@5.6.3)) '@jest/types': 29.6.3 import-local: 3.2.0 jest-cli: 29.7.0(@types/node@22.10.5)(babel-plugin-macros@3.1.0) @@ -31215,6 +32244,8 @@ snapshots: dependencies: acorn: 8.14.0 + strnum@1.0.5: {} + strong-log-transformer@2.1.0: dependencies: duplexer: 0.1.2 @@ -31628,12 +32659,12 @@ snapshots: babel-jest: 29.7.0(@babel/core@7.26.0) esbuild: 0.24.2 - ts-jest@29.2.5(@babel/core@7.26.0)(@jest/transform@29.7.0)(@jest/types@29.6.3)(babel-jest@29.7.0(@babel/core@7.26.0))(jest@29.7.0(@types/node@22.10.5)(babel-plugin-macros@3.1.0))(typescript@5.6.3): + ts-jest@29.2.5(@babel/core@7.26.0)(@jest/transform@29.7.0)(@jest/types@29.6.3)(babel-jest@29.7.0(@babel/core@7.26.0))(esbuild@0.24.2)(jest@29.7.0(@types/node@22.8.4)(babel-plugin-macros@3.1.0)(ts-node@10.9.2(@swc/core@1.10.4(@swc/helpers@0.5.15))(@types/node@22.8.4)(typescript@5.6.3)))(typescript@5.6.3): dependencies: bs-logger: 0.2.6 ejs: 3.1.10 fast-json-stable-stringify: 2.1.0 - jest: 29.7.0(@types/node@22.10.5)(babel-plugin-macros@3.1.0) + jest: 29.7.0(@types/node@22.8.4)(babel-plugin-macros@3.1.0)(ts-node@10.9.2(@swc/core@1.10.4(@swc/helpers@0.5.15))(@types/node@22.8.4)(typescript@5.6.3)) jest-util: 29.7.0 json5: 2.2.3 lodash.memoize: 4.1.2 @@ -31646,13 +32677,14 @@ snapshots: '@jest/transform': 29.7.0 '@jest/types': 29.6.3 babel-jest: 29.7.0(@babel/core@7.26.0) + esbuild: 0.24.2 - ts-jest@29.2.5(@babel/core@7.26.0)(@jest/transform@29.7.0)(@jest/types@29.6.3)(babel-jest@29.7.0(@babel/core@7.26.0))(jest@29.7.0(@types/node@22.8.4)(babel-plugin-macros@3.1.0)(ts-node@10.9.2(@swc/core@1.10.4(@swc/helpers@0.5.15))(@types/node@22.8.4)(typescript@5.6.3)))(typescript@5.6.3): + ts-jest@29.2.5(@babel/core@7.26.0)(@jest/transform@29.7.0)(@jest/types@29.6.3)(babel-jest@29.7.0(@babel/core@7.26.0))(jest@29.7.0(@types/node@22.10.5)(babel-plugin-macros@3.1.0))(typescript@5.6.3): dependencies: bs-logger: 0.2.6 ejs: 3.1.10 fast-json-stable-stringify: 2.1.0 - jest: 29.7.0(@types/node@22.8.4)(babel-plugin-macros@3.1.0)(ts-node@10.9.2(@swc/core@1.10.4(@swc/helpers@0.5.15))(@types/node@22.8.4)(typescript@5.6.3)) + jest: 29.7.0(@types/node@22.10.5)(babel-plugin-macros@3.1.0) jest-util: 29.7.0 json5: 2.2.3 lodash.memoize: 4.1.2