diff --git a/javascriptv3/example_code/bedrock-runtime/models/ai21_labs_jurassic2/jurassic2.js b/javascriptv3/example_code/bedrock-runtime/models/ai21_labs_jurassic2/jurassic2.js index d1af560305a..7a741ce6e8a 100644 --- a/javascriptv3/example_code/bedrock-runtime/models/ai21_labs_jurassic2/jurassic2.js +++ b/javascriptv3/example_code/bedrock-runtime/models/ai21_labs_jurassic2/jurassic2.js @@ -26,7 +26,6 @@ import { * * @param {string} prompt - The input text prompt for the model to complete. * @param {string} [modelId] - The ID of the model to use. Defaults to "ai21.j2-mid-v1". - * @returns {Promise} The inference response from the model. */ export const invokeModel = async (prompt, modelId) => { // Create a new Bedrock Runtime client instance. diff --git a/javascriptv3/example_code/bedrock-runtime/models/amazon_titan/titan_text.js b/javascriptv3/example_code/bedrock-runtime/models/amazon_titan/titan_text.js index 2ad48b36685..9ad9f8ebba3 100644 --- a/javascriptv3/example_code/bedrock-runtime/models/amazon_titan/titan_text.js +++ b/javascriptv3/example_code/bedrock-runtime/models/amazon_titan/titan_text.js @@ -20,7 +20,6 @@ import { * * @param {string} prompt - The input text prompt for the model to complete. * @param {string} [modelId] - The ID of the model to use. Defaults to "amazon.titan-text-express-v1". - * @returns {Promise} The inference response from the model. */ export const invokeModel = async (prompt, modelId) => { // Create a new Bedrock Runtime client instance. diff --git a/javascriptv3/example_code/bedrock-runtime/models/anthropic_claude/claude_2.js b/javascriptv3/example_code/bedrock-runtime/models/anthropic_claude/claude_2.js index 21a44858cdb..ed2cc6d48c0 100644 --- a/javascriptv3/example_code/bedrock-runtime/models/anthropic_claude/claude_2.js +++ b/javascriptv3/example_code/bedrock-runtime/models/anthropic_claude/claude_2.js @@ -33,7 +33,6 @@ import { * * @param {string} prompt - The input text prompt for the model to complete. * @param {string} [modelId] - The ID of the model to use. Defaults to "anthropic.claude-v2". - * @returns {Promise} The inference response from the model. */ export const invokeMessagesApi = async (prompt, modelId) => { // Create a new Bedrock Runtime client instance. @@ -80,7 +79,6 @@ export const invokeMessagesApi = async (prompt, modelId) => { * * @param {string} prompt - The input text prompt for the model to complete. * @param {string} [modelId] - The ID of the model to use. Defaults to "anthropic.claude-v2". - * @returns {Promise} The inference response from the model. */ export const invokeTextCompletionsApi = async (prompt, modelId) => { // Create a new Bedrock Runtime client instance. diff --git a/javascriptv3/example_code/bedrock-runtime/models/anthropic_claude/claude_3.js b/javascriptv3/example_code/bedrock-runtime/models/anthropic_claude/claude_3.js index dca9227a87f..0664c73df78 100644 --- a/javascriptv3/example_code/bedrock-runtime/models/anthropic_claude/claude_3.js +++ b/javascriptv3/example_code/bedrock-runtime/models/anthropic_claude/claude_3.js @@ -38,7 +38,6 @@ import { * * @param {string} prompt - The input text prompt for the model to complete. * @param {string} [modelId] - The ID of the model to use. Defaults to "anthropic.claude-3-haiku-20240307-v1:0". - * @returns {Promise} The inference response from the model. */ export const invokeModel = async (prompt, modelId) => { // Create a new Bedrock Runtime client instance. @@ -85,7 +84,6 @@ export const invokeModel = async (prompt, modelId) => { * * @param {string} prompt - The input text prompt for the model to complete. * @param {string} [modelId] - The ID of the model to use. Defaults to "anthropic.claude-3-haiku-20240307-v1:0". - * @returns {Promise} The final response from the model. */ export const invokeModelWithResponseStream = async (prompt, modelId) => { // Create a new Bedrock Runtime client instance. diff --git a/javascriptv3/example_code/bedrock-runtime/models/anthropic_claude/claude_instant_1.js b/javascriptv3/example_code/bedrock-runtime/models/anthropic_claude/claude_instant_1.js index 349c2bb7977..4372964a595 100644 --- a/javascriptv3/example_code/bedrock-runtime/models/anthropic_claude/claude_instant_1.js +++ b/javascriptv3/example_code/bedrock-runtime/models/anthropic_claude/claude_instant_1.js @@ -31,7 +31,6 @@ import { * * @param {string} prompt - The input text prompt for the model to complete. * @param {string} [modelId] - The ID of the model to use. Defaults to "anthropic.claude-instant-v1". - * @returns {Promise} The inference response from the model. */ export const invokeMessagesApi = async (prompt, modelId) => { // Create a new Bedrock Runtime client instance. @@ -78,7 +77,6 @@ export const invokeMessagesApi = async (prompt, modelId) => { * * @param {string} prompt - The input text prompt for the model to complete. * @param {string} [modelId] - The ID of the model to use. Defaults to "anthropic.claude-instant-v1". - * @returns {Promise} The inference response from the model. */ export const invokeTextCompletionsApi = async (prompt, modelId) => { // Create a new Bedrock Runtime client instance. diff --git a/javascriptv3/example_code/bedrock-runtime/models/meta_llama2/llama2_chat.js b/javascriptv3/example_code/bedrock-runtime/models/meta_llama2/llama2_chat.js index a9c7ec1e361..3cc9826c5b8 100644 --- a/javascriptv3/example_code/bedrock-runtime/models/meta_llama2/llama2_chat.js +++ b/javascriptv3/example_code/bedrock-runtime/models/meta_llama2/llama2_chat.js @@ -20,7 +20,6 @@ import { * * @param {string} prompt - The input text prompt for the model to complete. * @param {string} [modelId] - The ID of the model to use. Defaults to "meta.llama2-13b-chat-v1". - * @returns {Promise} The inference response from the model. */ export const invokeModel = async (prompt, modelId) => { // Create a new Bedrock Runtime client instance. diff --git a/javascriptv3/example_code/bedrock-runtime/models/mistral_ai/mistral_7b.js b/javascriptv3/example_code/bedrock-runtime/models/mistral_ai/mistral_7b.js index baa7eab31b5..31697399bcb 100644 --- a/javascriptv3/example_code/bedrock-runtime/models/mistral_ai/mistral_7b.js +++ b/javascriptv3/example_code/bedrock-runtime/models/mistral_ai/mistral_7b.js @@ -23,7 +23,6 @@ import { * * @param {string} prompt - The input text prompt for the model to complete. * @param {string} [modelId] - The ID of the model to use. Defaults to "mistral.mistral-7b-instruct-v0:2". - * @returns {Promise} The inference response from the model. */ export const invokeModel = async (prompt, modelId) => { // Create a new Bedrock Runtime client instance. diff --git a/javascriptv3/example_code/bedrock-runtime/models/mistral_ai/mixtral_8x7b.js b/javascriptv3/example_code/bedrock-runtime/models/mistral_ai/mixtral_8x7b.js index df62d9aa879..5de86408c10 100644 --- a/javascriptv3/example_code/bedrock-runtime/models/mistral_ai/mixtral_8x7b.js +++ b/javascriptv3/example_code/bedrock-runtime/models/mistral_ai/mixtral_8x7b.js @@ -23,7 +23,6 @@ import { * * @param {string} prompt - The input text prompt for the model to complete. * @param {string} [modelId] - The ID of the model to use. Defaults to "mistral.mixtral-8x7b-instruct-v0:1". - * @returns {Promise} The inference response from the model. */ export const invokeModel = async (prompt, modelId) => { // Create a new Bedrock Runtime client instance.