diff --git a/.doc_gen/metadata/bedrock-runtime_metadata.yaml b/.doc_gen/metadata/bedrock-runtime_metadata.yaml index 80f6fa1f8f2..6be6015d5c4 100644 --- a/.doc_gen/metadata/bedrock-runtime_metadata.yaml +++ b/.doc_gen/metadata/bedrock-runtime_metadata.yaml @@ -1,4 +1,21 @@ # zexi 0.4.0 +bedrock-runtime_Hello: + title: Hello &BR; + title_abbrev: Hello &BR; + synopsis: get started using &BR;. + category: Hello + languages: + Go: + versions: + - sdk_version: 2 + github: gov2/bedrock-runtime + excerpts: + - description: + snippet_tags: + - gov2.bedrock-runtime.Hello + services: + bedrock-runtime: {InvokeModel} + bedrock-runtime_InvokeModel: title: Invoke the specified &BR; model to run an inference title_abbrev: Invoke a model @@ -25,6 +42,14 @@ bedrock-runtime_InvokeClaude: synopsis: invoke the Anthropic Claude model on &BR; to run an inference. category: languages: + Go: + versions: + - sdk_version: 2 + github: gov2/bedrock-runtime + excerpts: + - description: Invoke the Anthropic Claude 2 foundation model. + snippet_tags: + - gov2.bedrock-runtime.InvokeClaude Java: versions: - sdk_version: 2 @@ -75,6 +100,14 @@ bedrock-runtime_InvokeJurassic2: synopsis: invoke the AI21 Labs Jurassic-2 model on &BR; to run an inference. category: languages: + Go: + versions: + - sdk_version: 2 + github: gov2/bedrock-runtime + excerpts: + - description: Invoke the AI21 Labs Jurassic-2 foundation model. + snippet_tags: + - gov2.bedrock-runtime.InvokeJurassic2 Java: versions: - sdk_version: 2 @@ -95,7 +128,6 @@ bedrock-runtime_InvokeJurassic2: versions: - sdk_version: 3 github: python/example_code/bedrockruntime - sdkguide: excerpts: - description: Invoke the AI 21 Labs Jurassic-2 foundation model. snippet_tags: @@ -126,12 +158,20 @@ bedrock-runtime_InvokeLlama2: synopsis: invoke the Meta Llama 2 Chat model on &BR; to run inference. category: languages: + Go: + versions: + - sdk_version: 2 + github: gov2/bedrock-runtime + excerpts: + - description: Invoke the Meta Llama 2 Chat foundation model. + snippet_tags: + - gov2.bedrock-runtime.InvokeLlama2 Java: versions: - sdk_version: 2 github: javav2/example_code/bedrock-runtime excerpts: - - description: Invoke the Meta Llama 2 foundation model. + - description: Invoke the Meta Llama 2 Chat foundation model. snippet_tags: - bedrock-runtime.java2.invoke_llama2.main PHP: @@ -139,16 +179,15 @@ bedrock-runtime_InvokeLlama2: - sdk_version: 3 github: php/example_code/bedrock-runtime excerpts: - - description: Invoke the Meta Llama 2 foundation model. + - description: Invoke the Meta Llama 2 Chat foundation model. snippet_tags: - php.example_code.bedrock-runtime.service.invokeLlama2 Python: versions: - sdk_version: 3 github: python/example_code/bedrockruntime - sdkguide: excerpts: - - description: Invoke the Meta Llama 2 foundation model. + - description: Invoke the Meta Llama 2 Chat foundation model. snippet_tags: - python.example_code.bedrock-runtime.InvokeMetaLlama2 services: @@ -213,18 +252,43 @@ bedrock-runtime_InvokeStableDiffusionAsync: services: bedrock-runtime: {InvokeModel} +bedrock-runtime_InvokeTitanImage: + title: Invoke Amazon Titan on &BR; to generate images + title_abbrev: Image generation with Amazon Titan + synopsis: invoke the Amazon Titan on &BR; to generate images. + category: + languages: + Go: + versions: + - sdk_version: 2 + github: gov2/bedrock-runtime + excerpts: + - description: Invoke the Amazon Titan image generation model. + snippet_tags: + - gov2.bedrock-runtime.InvokeTitanImage + services: + bedrock-runtime: {InvokeModel} + bedrock-runtime_InvokeModelWithResponseStream: title: Invoke Anthropic Claude on &BR; to run an inference with a response stream title_abbrev: Invoke Anthropic Claude on &BR; and process the response stream synopsis: invoke Anthropic Claude on &BR; to run an inference with a response stream. category: languages: + Go: + versions: + - sdk_version: 2 + github: gov2/bedrock-runtime + excerpts: + - description: Invoke Anthropic Claude and process the response stream. + snippet_tags: + - gov2.bedrock-runtime.InvokeModelWithResponseStream Java: versions: - sdk_version: 2 github: javav2/example_code/bedrock-runtime excerpts: - - description: Invoke Anthropic Claude on &BR; and process the response stream. + - description: Invoke Anthropic Claude and process the response stream. snippet_tags: - bedrock-runtime.java2.invoke_model_with_response_stream.main Python: @@ -232,7 +296,7 @@ bedrock-runtime_InvokeModelWithResponseStream: - sdk_version: 3 github: python/example_code/bedrock-runtime excerpts: - - description: Invoke Anthropic Claude on &BR; and process the response stream. + - description: Invoke Anthropic Claude and process the response stream. snippet_tags: - python.example_code.bedrock-runtime.InvokeModelWithResponseStream services: @@ -258,3 +322,26 @@ bedrock-runtime_Scenario_Invoke_models: - php.example_code.bedrock-runtime.basics.scenario services: bedrock-runtime: {InvokeModel} + +bedrock-runtime_Scenario_InvokeModelsInclResponseStream: + title: Invoke multiple foundation models on &BR; + title_abbrev: Invoke multiple foundation models on &BR; + synopsis: invoke multiple foundation models on &BR;. + synopsis_list: + - Generate text with Anthropic Claude. + - Generate text with AI21 Labs Jurassic-2. + - Generate text with Meta Llama 2 Chat. + - Asynchronously process the response stream from Anthropic Claude. + - Generate an image with the Amazon Titan Image Generator. + category: Scenarios + languages: + Go: + versions: + - sdk_version: 2 + github: gov2/bedrock-runtime + excerpts: + - description: Invoke multiple foundation models on &BR;. + snippet_tags: + - gov2.bedrock-runtime.Scenario_InvokeModels + services: + bedrock-runtime: {InvokeModel, InvokeModelWithResponseStream} diff --git a/gov2/bedrock-runtime/.gitignore b/gov2/bedrock-runtime/.gitignore new file mode 100644 index 00000000000..7e902e850dc --- /dev/null +++ b/gov2/bedrock-runtime/.gitignore @@ -0,0 +1 @@ +output/* \ No newline at end of file diff --git a/gov2/bedrock-runtime/README.md b/gov2/bedrock-runtime/README.md new file mode 100644 index 00000000000..7d0d7b250f4 --- /dev/null +++ b/gov2/bedrock-runtime/README.md @@ -0,0 +1,133 @@ + +# Amazon Bedrock Runtime code examples for the SDK for Go V2 + +## Overview + +Shows how to use the AWS SDK for Go V2 to work with Amazon Bedrock Runtime. + + + + +*Amazon Bedrock Runtime is a fully managed service that makes it easy to use foundation models from third-party providers and Amazon.* + +## ⚠ Important + +* Running this code might result in charges to your AWS account. For more details, see [AWS Pricing](https://aws.amazon.com/pricing/?aws-products-pricing.sort-by=item.additionalFields.productNameLowercase&aws-products-pricing.sort-order=asc&awsf.Free%20Tier%20Type=*all&awsf.tech-category=*all) and [Free Tier](https://aws.amazon.com/free/?all-free-tier.sort-by=item.additionalFields.SortRank&all-free-tier.sort-order=asc&awsf.Free%20Tier%20Types=*all&awsf.Free%20Tier%20Categories=*all). +* Running the tests might result in charges to your AWS account. +* We recommend that you grant your code least privilege. At most, grant only the minimum permissions required to perform the task. For more information, see [Grant least privilege](https://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html#grant-least-privilege). +* This code is not tested in every AWS Region. For more information, see [AWS Regional Services](https://aws.amazon.com/about-aws/global-infrastructure/regional-product-services). + + + + +## Code examples + +### Prerequisites + +For prerequisites, see the [README](../README.md#Prerequisites) in the `gov2` folder. + + + +> ⚠ You must request access to a model before you can use it. If you try to use the model (with the API or console) before you have requested access to it, you will receive an error message. For more information, see [Model access](https://docs.aws.amazon.com/bedrock/latest/userguide/model-access.html). + + + +### Get started + +* [Hello Amazon Bedrock](hello/hello.go#L4) (`InvokeModel`) + +### Single actions + +Code excerpts that show you how to call individual service functions. + +* [Image generation with Amazon Titan](actions/invoke_model.go#L178) (`InvokeModel`) +* [Invoke AI21 Labs Jurassic-2 on Amazon Bedrock](actions/invoke_model.go#L78) (`InvokeModel`) +* [Invoke Anthropic Claude on Amazon Bedrock](actions/invoke_model.go#L27) (`InvokeModel`) +* [Invoke Anthropic Claude on Amazon Bedrock and process the response stream](actions/invoke_model_with_response_stream.go#L30) (`InvokeModelWithResponseStream`) +* [Invoke Meta Llama 2 on Amazon Bedrock](actions/invoke_model.go#L130) (`InvokeModel`) + +### Scenarios + +Code examples that show you how to accomplish a specific task by calling multiple +functions within the same service. + +* [Invoke multiple foundation models on Amazon Bedrock](scenarios/scenario_invoke_models.go) + +## Run the examples + +### Instructions + + + + +#### Region configuration +By default, examples are set to `us-east-1`. To specify a different region, use the `-region` flag as shown in this example: + +``` +go run ./hello -region=eu-central-1 +``` + +Be aware that not all regions may support Bedrock and its models yet. Verify service availability for your region [here](https://aws.amazon.com/about-aws/global-infrastructure/regional-product-services/). For available models per region, refer to the [Bedrock dashboard](https://console.aws.amazon.com/bedrock) in the AWS Management Console. + + +#### Hello Amazon Bedrock + +This example shows you how to get started using Amazon Bedrock. + +``` +go run ./hello +``` + +#### Run a scenario + +All scenarios can be run with the `cmd` runner. To get a list of scenarios +and to get help for running a scenario, use the following command: + +``` +go run ./cmd -h +``` + +#### Invoke multiple foundation models on Amazon Bedrock + +This example shows you how to invoke multiple foundation models on Amazon Bedrock. + +* Generate text with Anthropic Claude. +* Generate text with AI21 Labs Jurassic-2. +* Generate text with Meta Llama 2 Chat. +* Asynchronously process the response stream from Anthropic Claude. +* Generate an image with the Amazon Titan Image Generator. + + + + + + + + +### Tests + +⚠ Running tests might result in charges to your AWS account. + + +To find instructions for running these tests, see the [README](../README.md#Tests) +in the `gov2` folder. + + + + + + +## Additional resources + +* [Amazon Bedrock Runtime User Guide](https://docs.aws.amazon.com/bedrock/latest/userguide/what-is-bedrock.html) +* [Amazon Bedrock Runtime API Reference](https://docs.aws.amazon.com/bedrock/latest/APIReference/welcome.html) +* [SDK for Go V2 Amazon Bedrock Runtime reference](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/service/bedrock-runtime) + + + + +--- + +Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + +SPDX-License-Identifier: Apache-2.0 \ No newline at end of file diff --git a/gov2/bedrock-runtime/actions/invoke_model.go b/gov2/bedrock-runtime/actions/invoke_model.go new file mode 100644 index 00000000000..1726c8b20c7 --- /dev/null +++ b/gov2/bedrock-runtime/actions/invoke_model.go @@ -0,0 +1,259 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +package actions + +import ( + "context" + "encoding/json" + "log" + "strings" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/bedrockruntime" +) + +// snippet-start:[gov2.bedrock-runtime.InvokeModelWrapper.complete] +// snippet-start:[gov2.bedrock-runtime.InvokeModelWrapper.struct] + +// InvokeModelWrapper encapsulates Amazon Bedrock actions used in the examples. +// It contains a Bedrock Runtime client that is used to invoke foundation models. +type InvokeModelWrapper struct { + BedrockRuntimeClient *bedrockruntime.Client +} + +// snippet-end:[gov2.bedrock-runtime.InvokeModelWrapper.struct] + +// snippet-start:[gov2.bedrock-runtime.InvokeClaude] + +// Each model provider has their own individual request and response formats. +// For the format, ranges, and default values for Anthropic Claude, refer to: +// https://docs.anthropic.com/claude/reference/complete_post + +type ClaudeRequest struct { + Prompt string `json:"prompt"` + MaxTokensToSample int `json:"max_tokens_to_sample"` + Temperature float64 `json:"temperature,omitempty"` + StopSequences []string `json:"stop_sequences,omitempty"` +} + +type ClaudeResponse struct { + Completion string `json:"completion"` +} + +// Invokes Anthropic Claude on Amazon Bedrock to run an inference using the input +// provided in the request body. +func (wrapper InvokeModelWrapper) InvokeClaude(prompt string) (string, error) { + modelId := "anthropic.claude-v2" + + // Anthropic Claude requires enclosing the prompt as follows: + enclosedPrompt := "Human: " + prompt + "\n\nAssistant:" + + body, err := json.Marshal(ClaudeRequest { + Prompt: enclosedPrompt, + MaxTokensToSample: 200, + Temperature: 0.5, + StopSequences: []string{"\n\nHuman:"}, + }) + + if err != nil { log.Fatal("failed to marshal", err) } + + output, err := wrapper.BedrockRuntimeClient.InvokeModel(context.TODO(), &bedrockruntime.InvokeModelInput{ + ModelId: aws.String(modelId), + ContentType: aws.String("application/json"), + Body: body, + }) + + if err != nil { ProcessError(err, modelId) } + + var response ClaudeResponse + if err := json.Unmarshal(output.Body, &response); err != nil { + log.Fatal("failed to unmarshal", err) + } + + return response.Completion, nil +} +// snippet-end:[gov2.bedrock-runtime.InvokeClaude] + +// snippet-start:[gov2.bedrock-runtime.InvokeJurassic2] + +// Each model provider has their own individual request and response formats. +// For the format, ranges, and default values for AI21 Labs Jurassic-2, refer to: +// https://docs.ai21.com/reference/j2-complete-ref + +type Jurassic2Request struct { + Prompt string `json:"prompt"` + MaxTokens int `json:"maxTokens,omitempty"` + Temperature float64 `json:"temperature,omitempty"` +} + +type Jurassic2Response struct { + Completions []Completion `json:"completions"` +} +type Completion struct { + Data Data `json:"data"` +} +type Data struct { + Text string `json:"text"` +} + +// Invokes AI21 Labs Jurassic-2 on Amazon Bedrock to run an inference using the input +// provided in the request body. +func (wrapper InvokeModelWrapper) InvokeJurassic2(prompt string) (string, error) { + modelId := "ai21.j2-mid-v1" + + body, err := json.Marshal(Jurassic2Request { + Prompt: prompt, + MaxTokens: 200, + Temperature: 0.5, + }) + + if err != nil { log.Fatal("failed to marshal", err) } + + output, err := wrapper.BedrockRuntimeClient.InvokeModel(context.TODO(), &bedrockruntime.InvokeModelInput{ + ModelId: aws.String(modelId), + ContentType: aws.String("application/json"), + Body: body, + }) + + if err != nil { ProcessError(err, modelId) } + + var response Jurassic2Response + if err := json.Unmarshal(output.Body, &response); err != nil { + log.Fatal("failed to unmarshal", err) + } + + return response.Completions[0].Data.Text, nil +} +// snippet-end:[gov2.bedrock-runtime.InvokeJurassic2] + +// snippet-start:[gov2.bedrock-runtime.InvokeLlama2] + +// Each model provider has their own individual request and response formats. +// For the format, ranges, and default values for Meta Llama 2 Chat, refer to: +// https://docs.aws.amazon.com/bedrock/latest/userguide/model-parameters-meta.html + +type Llama2Request struct { + Prompt string `json:"prompt"` + MaxGenLength int `json:"max_gen_len,omitempty"` + Temperature float64 `json:"temperature,omitempty"` +} + +type Llama2Response struct { + Generation string `json:"generation"` +} + +// Invokes Meta Llama 2 Chat on Amazon Bedrock to run an inference using the input +// provided in the request body. +func (wrapper InvokeModelWrapper) InvokeLlama2(prompt string) (string, error) { + modelId := "meta.llama2-13b-chat-v1" + + body, err := json.Marshal(Llama2Request { + Prompt: prompt, + MaxGenLength: 512, + Temperature: 0.5, + }) + + if err != nil { log.Fatal("failed to marshal", err) } + + output, err := wrapper.BedrockRuntimeClient.InvokeModel(context.TODO(), &bedrockruntime.InvokeModelInput{ + ModelId: aws.String(modelId), + ContentType: aws.String("application/json"), + Body: body, + }) + + if err != nil { ProcessError(err, modelId) } + + var response Llama2Response + if err := json.Unmarshal(output.Body, &response); err != nil { + log.Fatal("failed to unmarshal", err) + } + + + + return response.Generation, nil +} +// snippet-end:[gov2.bedrock-runtime.InvokeLlama2] + +// snippet-start:[gov2.bedrock-runtime.InvokeTitanImage] + +type TitanImageRequest struct { + TaskType string `json:"taskType"` + TextToImageParams TextToImageParams `json:"textToImageParams"` + ImageGenerationConfig ImageGenerationConfig `json:"imageGenerationConfig"` +} +type TextToImageParams struct { + Text string `json:"text"` +} +type ImageGenerationConfig struct { + NumberOfImages int `json:"numberOfImages"` + Quality string `json:"quality"` + CfgScale float64 `json:"cfgScale"` + Height int `json:"height"` + Width int `json:"width"` + Seed int64 `json:"seed"` +} + +type TitanImageResponse struct { + Images []string `json:"images"` +} + +// Invokes the Titan Image model to create an image using the input provided +// in the request body. +func (wrapper InvokeModelWrapper) InvokeTitanImage(prompt string, seed int64) (string, error) { + modelId := "amazon.titan-image-generator-v1" + + body, err := json.Marshal(TitanImageRequest { + TaskType: "TEXT_IMAGE", + TextToImageParams: TextToImageParams { + Text: prompt, + }, + ImageGenerationConfig: ImageGenerationConfig { + NumberOfImages: 1, + Quality: "standard", + CfgScale: 8.0, + Height: 512, + Width: 512, + Seed: seed, + }, + }) + + if err != nil { log.Fatal("failed to marshal", err) } + + output, err := wrapper.BedrockRuntimeClient.InvokeModel(context.TODO(), &bedrockruntime.InvokeModelInput{ + ModelId: aws.String(modelId), + ContentType: aws.String("application/json"), + Body: body, + }) + + if err != nil { ProcessError(err, modelId) } + + var response TitanImageResponse + if err := json.Unmarshal(output.Body, &response); err != nil { + log.Fatal("failed to unmarshal", err) + } + + base64ImageData := response.Images[0] + + return base64ImageData, nil + +} +// snippet-end:[gov2.bedrock-runtime.InvokeTitanImage] + +func ProcessError(err error, modelId string) { + errMsg := err.Error() + if strings.Contains(errMsg, "no such host") { + log.Printf(`The Bedrock service is not available in the selected region. + Please double-check the service availability for your region at + https://aws.amazon.com/about-aws/global-infrastructure/regional-product-services/.\n`) + } else if strings.Contains(errMsg, "Could not resolve the foundation model") { + log.Printf(`Could not resolve the foundation model from model identifier: \"%v\". + Please verify that the requested model exists and is accessible + within the specified region.\n + `, modelId) + } else { + log.Printf("Couldn't invoke model: \"%v\". Here's why: %v\n", modelId, err) + } +} + +// snippet-end:[gov2.bedrock-runtime.InvokeModelWrapper.complete] \ No newline at end of file diff --git a/gov2/bedrock-runtime/actions/invoke_model_test.go b/gov2/bedrock-runtime/actions/invoke_model_test.go new file mode 100644 index 00000000000..5a500f64db4 --- /dev/null +++ b/gov2/bedrock-runtime/actions/invoke_model_test.go @@ -0,0 +1,143 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +// Unit tests for the bedrock runtime actions. + +package actions + +import ( + "testing" + "encoding/json" + "log" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/bedrockruntime" + "github.com/awsdocs/aws-doc-sdk-examples/gov2/bedrock-runtime/stubs" + "github.com/awsdocs/aws-doc-sdk-examples/gov2/testtools" +) + +const CLAUDE_MODEL_ID = "anthropic.claude-v2" +const JURASSIC2_MODEL_ID = "ai21.j2-mid-v1" +const LLAMA2_MODEL_ID = "meta.llama2-13b-chat-v1" +const TITAN_IMAGE_MODEL_ID = "amazon.titan-image-generator-v1" + +const prompt = "A test prompt" + +func CallInvokeModelActions(sdkConfig aws.Config) { + defer func() { + if r := recover(); r != nil { + log.Println(r) + } + }() + + client := bedrockruntime.NewFromConfig(sdkConfig) + wrapper := InvokeModelWrapper{client} + + claudeCompletion, err := wrapper.InvokeClaude(prompt) + if err != nil {panic(err)} + log.Println(claudeCompletion) + + jurassic2Completion, err := wrapper.InvokeJurassic2(prompt) + if err != nil {panic(err)} + log.Println(jurassic2Completion) + + llama2Completion, err := wrapper.InvokeLlama2(prompt) + if err != nil {panic(err)} + log.Println(llama2Completion) + + seed := int64(0) + titanImageCompletion, err := wrapper.InvokeTitanImage(prompt, seed) + if err != nil {panic(err)} + log.Println(titanImageCompletion) + + log.Printf("Thanks for watching!") +} + +func TestInvokeModels(t *testing.T) { + scenTest := InvokeModelActionsTest{} + testtools.RunScenarioTests(&scenTest, t) +} + +type InvokeModelActionsTest struct {} + + +func (scenTest *InvokeModelActionsTest) SetupDataAndStubs() []testtools.Stub { + var stubList []testtools.Stub + stubList = append(stubList, stubInvokeModel(CLAUDE_MODEL_ID)) + stubList = append(stubList, stubInvokeModel(JURASSIC2_MODEL_ID)) + stubList = append(stubList, stubInvokeModel(LLAMA2_MODEL_ID)) + stubList = append(stubList, stubInvokeModel(TITAN_IMAGE_MODEL_ID)) + return stubList +} + +func (scenTest *InvokeModelActionsTest) RunSubTest(stubber *testtools.AwsmStubber) { + CallInvokeModelActions(*stubber.SdkConfig) +} + +func (scenTest *InvokeModelActionsTest) Cleanup() {} + +func stubInvokeModel(modelId string) (testtools.Stub) { + var request []byte + var response []byte + + switch modelId { + case CLAUDE_MODEL_ID: + request, _ = json.Marshal(ClaudeRequest{ + Prompt: "Human: " + prompt + "\n\nAssistant:", + MaxTokensToSample: 200, + Temperature: 0.5, + StopSequences: []string{"\n\nHuman:"}, + }) + response, _ = json.Marshal(ClaudeResponse{ + Completion: "A fake response", + }) + + case JURASSIC2_MODEL_ID: + request, _ = json.Marshal(Jurassic2Request{ + Prompt: prompt, + MaxTokens: 200, + Temperature: 0.5, + }) + response, _ = json.Marshal(Jurassic2Response{ + Completions: []Completion{ + { Data: Data{ Text: "A fake response", }, }, + }, + }) + + case LLAMA2_MODEL_ID: + request, _ = json.Marshal(Llama2Request{ + Prompt: prompt, + MaxGenLength: 512, + Temperature: 0.5, + }) + response, _ = json.Marshal(Llama2Response{ + Generation: "A fake response", + }) + + case TITAN_IMAGE_MODEL_ID: + request, _ = json.Marshal(TitanImageRequest{ + TaskType: "TEXT_IMAGE", + TextToImageParams: TextToImageParams{ + Text: prompt, + }, + ImageGenerationConfig: ImageGenerationConfig{ + NumberOfImages: 1, + Quality: "standard", + CfgScale: 8.0, + Height: 512, + Width: 512, + Seed: 0, + }, + }) + response, _ = json.Marshal(TitanImageResponse{ + Images: []string{"FakeBase64String=="}, + }) + + default: + return testtools.Stub{} + } + + return stubs.StubInvokeModel(stubs.StubInvokeModelParams{ + request, response, modelId, nil, + }) +} diff --git a/gov2/bedrock-runtime/actions/invoke_model_with_response_stream.go b/gov2/bedrock-runtime/actions/invoke_model_with_response_stream.go new file mode 100644 index 00000000000..a9043bc612a --- /dev/null +++ b/gov2/bedrock-runtime/actions/invoke_model_with_response_stream.go @@ -0,0 +1,134 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +package actions + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "log" + "strings" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/bedrockruntime" + "github.com/aws/aws-sdk-go-v2/service/bedrockruntime/types" +) + +// snippet-start:[gov2.bedrock-runtime.InvokeModelWithResponseStreamWrapper.complete] +// snippet-start:[gov2.bedrock-runtime.InvokeModelWithResponseStreamWrapper.struct] + +// InvokeModelWithResponseStreamWrapper encapsulates Amazon Bedrock actions used in the examples. +// It contains a Bedrock Runtime client that is used to invoke foundation models. +type InvokeModelWithResponseStreamWrapper struct { + BedrockRuntimeClient *bedrockruntime.Client +} + +// snippet-end:[gov2.bedrock-runtime.InvokeModelWithResponseStreamWrapper.struct] + +// snippet-start:[gov2.bedrock-runtime.InvokeModelWithResponseStream] + +// Each model provider defines their own individual request and response formats. +// For the format, ranges, and default values for the different models, refer to: +// https://docs.aws.amazon.com/bedrock/latest/userguide/model-parameters.html + +type Request struct { + Prompt string `json:"prompt"` + MaxTokensToSample int `json:"max_tokens_to_sample"` + Temperature float64 `json:"temperature,omitempty"` +} + +type Response struct { + Completion string `json:"completion"` +} + +// Invokes Anthropic Claude on Amazon Bedrock to run an inference and asynchronously +// process the response stream. + +func (wrapper InvokeModelWithResponseStreamWrapper) InvokeModelWithResponseStream(prompt string) (string, error) { + + modelId := "anthropic.claude-v2" + + // Anthropic Claude requires you to enclose the prompt as follows: + prefix := "Human: " + postfix := "\n\nAssistant:" + prompt = prefix + prompt + postfix + + request := ClaudeRequest { + Prompt: prompt, + MaxTokensToSample: 200, + Temperature: 0.5, + StopSequences: []string{"\n\nHuman:"}, + } + + body, err := json.Marshal(request) + + output, err := wrapper.BedrockRuntimeClient.InvokeModelWithResponseStream(context.Background(), &bedrockruntime.InvokeModelWithResponseStreamInput{ + Body: body, + ModelId: aws.String(modelId), + ContentType: aws.String("application/json"), + }) + + if err != nil { + errMsg := err.Error() + if strings.Contains(errMsg, "no such host") { + log.Printf("The Bedrock service is not available in the selected region. Please double-check the service availability for your region at https://aws.amazon.com/about-aws/global-infrastructure/regional-product-services/.\n") + } else if strings.Contains(errMsg, "Could not resolve the foundation model") { + log.Printf("Could not resolve the foundation model from model identifier: \"%v\". Please verify that the requested model exists and is accessible within the specified region.\n", modelId) + } else { + log.Printf("Couldn't invoke Anthropic Claude. Here's why: %v\n", err) + } + } + + resp, err := processStreamingOutput(output, func(ctx context.Context, part []byte) error { + fmt.Print(string(part)) + return nil + }) + + if err != nil { + log.Fatal("streaming output processing error: ", err) + } + + return resp.Completion, nil + +} + +type StreamingOutputHandler func(ctx context.Context, part []byte) error + +func processStreamingOutput(output *bedrockruntime.InvokeModelWithResponseStreamOutput, handler StreamingOutputHandler) (Response, error) { + + var combinedResult string + resp := Response{} + + for event := range output.GetStream().Events() { + switch v := event.(type) { + case *types.ResponseStreamMemberChunk: + + //fmt.Println("payload", string(v.Value.Bytes)) + + var resp Response + err := json.NewDecoder(bytes.NewReader(v.Value.Bytes)).Decode(&resp) + if err != nil { + return resp, err + } + + handler(context.Background(), []byte(resp.Completion)) + combinedResult += resp.Completion + + case *types.UnknownUnionMember: + fmt.Println("unknown tag:", v.Tag) + + default: + fmt.Println("union is nil or unknown type") + } + } + + resp.Completion = combinedResult + + return resp, nil +} + +// snippet-end:[gov2.bedrock-runtime.InvokeModelWithResponseStream] + +// snippet-end:[gov2.bedrock-runtime.InvokeModelWithResponseStreamWrapper.complete] \ No newline at end of file diff --git a/gov2/bedrock-runtime/cmd/main.go b/gov2/bedrock-runtime/cmd/main.go new file mode 100644 index 00000000000..6aa67599b18 --- /dev/null +++ b/gov2/bedrock-runtime/cmd/main.go @@ -0,0 +1,62 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +package main + +import ( + "context" + "flag" + "fmt" + "log" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/config" + "github.com/awsdocs/aws-doc-sdk-examples/gov2/demotools" + "github.com/awsdocs/aws-doc-sdk-examples/gov2/bedrock-runtime/scenarios" +) + +// main loads default AWS credentials and configuration from the ~/.aws folder and runs +// a scenario specified by the `-scenario` flag. +// +// `-scenario` can be one of the following: +// +// * `invokemodels` - Runs a scenario that shows how to invoke various image and text +// generation models on Amazon Bedrock. +func main() { + + scenarioMap := map[string]func(sdkConfig aws.Config){ + "invokemodels": runInvokeModelsScenario, + } + choices := make([]string, len(scenarioMap)) + choiceIndex := 0 + for choice := range scenarioMap { + choices[choiceIndex] = choice + choiceIndex++ + } + scenario := flag.String( + "scenario", "", + fmt.Sprintf("The scenario to run. Must be one of %v.", choices)) + + var region = flag.String("region", "us-east-1", "The AWS region") + flag.Parse() + + fmt.Printf("Using AWS region: %s\n", *region) + + if runScenario, ok := scenarioMap[*scenario]; !ok { + fmt.Printf("'%v' is not a valid scenario.\n", *scenario) + flag.Usage() + } else { + sdkConfig, err := config.LoadDefaultConfig(context.Background(), config.WithRegion(*region)) + if err != nil { + log.Fatalf("unable to load SDK config, %v", err) + } + + log.SetFlags(0) + runScenario(sdkConfig) + } +} + +func runInvokeModelsScenario(sdkConfig aws.Config) { + scenario := scenarios.NewInvokeModelsScenario(sdkConfig, demotools.NewQuestioner()) + scenario.Run() +} diff --git a/gov2/bedrock-runtime/go.mod b/gov2/bedrock-runtime/go.mod new file mode 100644 index 00000000000..456b48fb19e --- /dev/null +++ b/gov2/bedrock-runtime/go.mod @@ -0,0 +1,31 @@ +module github.com/awsdocs/aws-doc-sdk-examples/gov2/bedrock-runtime + +go 1.21 + +require ( + github.com/aws/aws-sdk-go-v2 v1.22.2 + github.com/aws/aws-sdk-go-v2/config v1.23.0 + github.com/aws/aws-sdk-go-v2/service/bedrockruntime v1.3.1 + github.com/awsdocs/aws-doc-sdk-examples/gov2/demotools v0.0.0-20231116013656-9f08f276537a + github.com/awsdocs/aws-doc-sdk-examples/gov2/testtools v0.0.0-20231116013656-9f08f276537a +) + +require ( + github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.5.0 // indirect + github.com/aws/aws-sdk-go-v2/credentials v1.15.2 // indirect + github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.14.3 // indirect + github.com/aws/aws-sdk-go-v2/internal/configsources v1.2.2 // indirect + github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.5.2 // indirect + github.com/aws/aws-sdk-go-v2/internal/ini v1.6.0 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.10.2 // indirect + github.com/aws/aws-sdk-go-v2/service/sso v1.17.1 // indirect + github.com/aws/aws-sdk-go-v2/service/ssooidc v1.19.1 // indirect + github.com/aws/aws-sdk-go-v2/service/sts v1.25.1 // indirect + github.com/aws/smithy-go v1.16.0 // indirect + golang.org/x/sys v0.9.0 // indirect + golang.org/x/term v0.9.0 // indirect +) + +replace github.com/awsdocs/aws-doc-sdk-examples/gov2/bedrock-runtime/stubs => ./stubs + +replace github.com/awsdocs/aws-doc-sdk-examples/gov2/bedrock-runtime/scenarios => ./scenarios diff --git a/gov2/bedrock-runtime/go.sum b/gov2/bedrock-runtime/go.sum new file mode 100644 index 00000000000..d0be52d1a01 --- /dev/null +++ b/gov2/bedrock-runtime/go.sum @@ -0,0 +1,38 @@ +github.com/aws/aws-sdk-go-v2 v1.22.2 h1:lV0U8fnhAnPz8YcdmZVV60+tr6CakHzqA6P8T46ExJI= +github.com/aws/aws-sdk-go-v2 v1.22.2/go.mod h1:Kd0OJtkW3Q0M0lUWGszapWjEvrXDzRW+D21JNsroB+c= +github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.5.0 h1:hHgLiIrTRtddC0AKcJr5s7i/hLgcpTt+q/FKxf1Zayk= +github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.5.0/go.mod h1:w4I/v3NOWgD+qvs1NPEwhd++1h3XPHFaVxasfY6HlYQ= +github.com/aws/aws-sdk-go-v2/config v1.23.0 h1:kqzEfGGDIrRJpfJckgwuZfFTbU9NB1jZnRcaO9MpOqE= +github.com/aws/aws-sdk-go-v2/config v1.23.0/go.mod h1:p7wbxKXXjS1GGQOss7VXOazVMFF9bjUGq85/4wR/fSw= +github.com/aws/aws-sdk-go-v2/credentials v1.15.2 h1:rKH7khRMxPdD0u3dHecd0Q7NOVw3EUe7AqdkUOkiOGI= +github.com/aws/aws-sdk-go-v2/credentials v1.15.2/go.mod h1:tXM8wmaeAhfC7nZoCxb0FzM/aRaB1m1WQ7x0qlBLq80= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.14.3 h1:G5KawTAkyHH6WyKQCdHiW4h3PmAXNJpOgwKg3H7sDRE= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.14.3/go.mod h1:hugKmSFnZB+HgNI1sYGT14BUPZkO6alC/e0AWu+0IAQ= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.2.2 h1:AaQsr5vvGR7rmeSWBtTCcw16tT9r51mWijuCQhzLnq8= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.2.2/go.mod h1:o1IiRn7CWocIFTXJjGKJDOwxv1ibL53NpcvcqGWyRBA= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.5.2 h1:UZx8SXZ0YtzRiALzYAWcjb9Y9hZUR7MBKaBQ5ouOjPs= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.5.2/go.mod h1:ipuRpcSaklmxR6C39G187TpBAO132gUfleTGccUPs8c= +github.com/aws/aws-sdk-go-v2/internal/ini v1.6.0 h1:hwZB07/beLiCopuRKF0t+dEHmP39iN4YtDh3X5d3hrg= +github.com/aws/aws-sdk-go-v2/internal/ini v1.6.0/go.mod h1:rdAuXeHWhI/zkpYcO5n8WCpaIgY9MUxFyBsuqq3kjyA= +github.com/aws/aws-sdk-go-v2/service/bedrockruntime v1.3.1 h1:0x8EYVwkE+fXxhsmjuO0rTrQtcik0whm3IFqB+ERxEk= +github.com/aws/aws-sdk-go-v2/service/bedrockruntime v1.3.1/go.mod h1:1GvO8YV08YNrHv2sweo8vWDh04+c6zSPqMcf+5q7TzI= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.10.2 h1:h7j73yuAVVjic8pqswh+L/7r2IHP43QwRyOu6zcCDDE= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.10.2/go.mod h1:H07AHdK5LSy8F7EJUQhoxyiCNkePoHj2D8P2yGTWafo= +github.com/aws/aws-sdk-go-v2/service/sso v1.17.1 h1:km+ZNjtLtpXYf42RdaDZnNHm9s7SYAuDGTafy6nd89A= +github.com/aws/aws-sdk-go-v2/service/sso v1.17.1/go.mod h1:aHBr3pvBSD5MbzOvQtYutyPLLRPbl/y9x86XyJJnUXQ= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.19.1 h1:iRFNqZH4a67IqPvK8xxtyQYnyrlsvwmpHOe9r55ggBA= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.19.1/go.mod h1:pTy5WM+6sNv2tB24JNKFtn6EvciQ5k40ZJ0pq/Iaxj0= +github.com/aws/aws-sdk-go-v2/service/sts v1.25.1 h1:txgVXIXWPXyqdiVn92BV6a/rgtpX31HYdsOYj0sVQQQ= +github.com/aws/aws-sdk-go-v2/service/sts v1.25.1/go.mod h1:VAiJiNaoP1L89STFlEMgmHX1bKixY+FaP+TpRFrmyZ4= +github.com/aws/smithy-go v1.16.0 h1:gJZEH/Fqh+RsvlJ1Zt4tVAtV6bKkp3cC+R6FCZMNzik= +github.com/aws/smithy-go v1.16.0/go.mod h1:NukqUGpCZIILqqiV0NIjeFh24kd/FAa4beRb6nbIUPE= +github.com/awsdocs/aws-doc-sdk-examples/gov2/demotools v0.0.0-20231116013656-9f08f276537a h1:tgK0E4LiTS6exOgXmN1p1HQthDDOFcnsG+zZyYdzJ+o= +github.com/awsdocs/aws-doc-sdk-examples/gov2/demotools v0.0.0-20231116013656-9f08f276537a/go.mod h1:bcgBgQb+DUAGU6d0P3fokzUUhPz2pHhnsQ9Acj6aUcY= +github.com/awsdocs/aws-doc-sdk-examples/gov2/testtools v0.0.0-20231116013656-9f08f276537a h1:+CdtyLyRFidoakauxp1/KAyzPoJ7lpoRUiOHJdDcoWY= +github.com/awsdocs/aws-doc-sdk-examples/gov2/testtools v0.0.0-20231116013656-9f08f276537a/go.mod h1:qcs782jWmSQW2exwfKW39rOvOJBZ4xzO8dVLoFF62Sc= +github.com/google/go-cmp v0.5.8 h1:e6P7q2lk1O+qJJb4BtCQXlK8vWEO8V1ZeuEdJNOqZyg= +github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +golang.org/x/sys v0.9.0 h1:KS/R3tvhPqvJvwcKfnBHJwwthS11LRhmM5D59eEXa0s= +golang.org/x/sys v0.9.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/term v0.9.0 h1:GRRCnKYhdQrD8kfRAdQ6Zcw1P0OcELxGLKJvtjVMZ28= +golang.org/x/term v0.9.0/go.mod h1:M6DEAAIenWoTxdKrOltXcmDY3rSplQUkrvaDU5FcQyo= diff --git a/gov2/bedrock-runtime/hello/hello.go b/gov2/bedrock-runtime/hello/hello.go new file mode 100644 index 00000000000..243a30d6094 --- /dev/null +++ b/gov2/bedrock-runtime/hello/hello.go @@ -0,0 +1,101 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +// snippet-start:[gov2.bedrock-runtime.Hello] + +package main + +import ( + "context" + "encoding/json" + "flag" + "fmt" + "log" + "os" + "strings" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/config" + "github.com/aws/aws-sdk-go-v2/service/bedrockruntime" +) + +// Each model provider defines their own individual request and response formats. +// For the format, ranges, and default values for the different models, refer to: +// https://docs.aws.amazon.com/bedrock/latest/userguide/model-parameters.html + +type ClaudeRequest struct { + Prompt string `json:"prompt"` + MaxTokensToSample int `json:"max_tokens_to_sample"` + // Omitting optional request parameters +} + +type ClaudeResponse struct { + Completion string `json:"completion"` +} + +// main uses the AWS SDK for Go (v2) to create an Amazon Bedrock Runtime client +// and invokes Anthropic Claude 2 inside your account and the chosen region. +// This example uses the default settings specified in your shared credentials +// and config files. +func main() { + + region := flag.String("region", "us-east-1", "The AWS region") + flag.Parse() + + fmt.Printf("Using AWS region: %s\n", *region) + + sdkConfig, err := config.LoadDefaultConfig(context.Background(), config.WithRegion(*region)) + if err != nil { + fmt.Println("Couldn't load default configuration. Have you set up your AWS account?") + fmt.Println(err) + return + } + + client := bedrockruntime.NewFromConfig(sdkConfig) + + modelId := "anthropic.claude-v2" + + prompt := "Hello, how are you today?" + + // Anthropic Claude requires you to enclose the prompt as follows: + prefix := "Human: " + postfix := "\n\nAssistant:" + wrappedPrompt := prefix + prompt + postfix + + request := ClaudeRequest { + Prompt: wrappedPrompt, + MaxTokensToSample: 200, + } + + body, err := json.Marshal(request) + + result, err := client.InvokeModel(context.Background(), &bedrockruntime.InvokeModelInput { + ModelId: aws.String(modelId), + ContentType: aws.String("application/json"), + Body: body, + }) + + if err != nil { + errMsg := err.Error() + if strings.Contains(errMsg, "no such host") { + fmt.Printf("Error: The Bedrock service is not available in the selected region. Please double-check the service availability for your region at https://aws.amazon.com/about-aws/global-infrastructure/regional-product-services/.\n") + } else if strings.Contains(errMsg, "Could not resolve the foundation model") { + fmt.Printf("Error: Could not resolve the foundation model from model identifier: \"%v\". Please verify that the requested model exists and is accessible within the specified region.\n", modelId) + } else { + fmt.Printf("Error: Couldn't invoke Anthropic Claude. Here's why: %v\n", err) + } + os.Exit(1) + } + + var response ClaudeResponse + + err = json.Unmarshal(result.Body, &response) + + if err != nil { + log.Fatal("failed to unmarshal", err) + } + fmt.Println("Prompt:\n", prompt) + fmt.Println("Response from Anthropic Claude:\n", response.Completion) +} + +// snippet-end:[gov2.bedrock-runtime.Hello] \ No newline at end of file diff --git a/gov2/bedrock-runtime/scenarios/scenario_invoke_models.go b/gov2/bedrock-runtime/scenarios/scenario_invoke_models.go new file mode 100644 index 00000000000..74426e6b0d2 --- /dev/null +++ b/gov2/bedrock-runtime/scenarios/scenario_invoke_models.go @@ -0,0 +1,160 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +package scenarios + +import ( + "encoding/base64" + "math/rand" + "path/filepath" + "fmt" + "log" + "strings" + "os" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/bedrockruntime" + "github.com/awsdocs/aws-doc-sdk-examples/gov2/demotools" + "github.com/awsdocs/aws-doc-sdk-examples/gov2/bedrock-runtime/actions" +) + +// snippet-start:[gov2.bedrock-runtime.Scenario_InvokeModels] + +// InvokeModelsScenario demonstrates how to use the Amazon Bedrock Runtime client +// to invoke various foundation models for text and image generation +// +// 1. Generate text with Anthropic Claude 2 +// 2. Generate text with AI21 Labs Jurassic-2 +// 3. Generate text with Meta Llama 2 Chat +// 4. Generate text and asynchronously process the response stream with Anthropic Claude 2 +// 5. Generate and image with the Amazon Titan image generation model +type InvokeModelsScenario struct { + sdkConfig aws.Config + invokeModelWrapper actions.InvokeModelWrapper + responseStreamWrapper actions.InvokeModelWithResponseStreamWrapper + questioner demotools.IQuestioner + isTestRun bool +} + +// NewInvokeModelsScenario constructs an InvokeModelsScenario instance from a configuration. +// It uses the specified config to get a Bedrock Runtime client and create wrappers for the +// actions used in the scenario. +func NewInvokeModelsScenario(sdkConfig aws.Config, questioner demotools.IQuestioner) InvokeModelsScenario { + client := bedrockruntime.NewFromConfig(sdkConfig) + return InvokeModelsScenario{ + sdkConfig: sdkConfig, + invokeModelWrapper: actions.InvokeModelWrapper{client}, + responseStreamWrapper: actions.InvokeModelWithResponseStreamWrapper{client}, + questioner: questioner, + } +} + +// Runs the interactive scenario. +func (scenario InvokeModelsScenario) Run() { + defer func() { + if r := recover(); r != nil { + log.Printf("Something went wrong with the demo: %v\n", r) + } + }() + + + log.Println(strings.Repeat("=", 77)) + log.Println("Welcome to the Amazon Bedrock Runtime model invocation demo.") + log.Println(strings.Repeat("=", 77)) + + log.Printf("First, let's invoke a few large-language models using the synchronous client:\n\n") + + text2textPrompt := "In one paragraph, who are you?" + + log.Println(strings.Repeat("-", 77)) + log.Printf("Invoking Claude with prompt: %v\n", text2textPrompt) + scenario.InvokeClaude(text2textPrompt) + + log.Println(strings.Repeat("-", 77)) + log.Printf("Invoking Jurassic-2 with prompt: %v\n", text2textPrompt) + scenario.InvokeJurassic2(text2textPrompt) + + log.Println(strings.Repeat("-", 77)) + log.Printf("Invoking Llama2 with prompt: %v\n", text2textPrompt) + scenario.InvokeLlama2(text2textPrompt) + + log.Println(strings.Repeat("=", 77)) + log.Printf("Now, let's invoke Claude with the asynchronous client and process the response stream:\n\n") + + log.Println(strings.Repeat("-", 77)) + log.Printf("Invoking Claude with prompt: %v\n", text2textPrompt) + scenario.InvokeWithResponseStream(text2textPrompt) + + log.Println(strings.Repeat("=", 77)) + log.Printf("Now, let's create an image with the Amazon Titan image generation model:\n\n") + + text2ImagePrompt := "stylized picture of a cute old steampunk robot" + seed := rand.Int63n(2147483648) + + log.Println(strings.Repeat("-", 77)) + log.Printf("Invoking Amazon Titan with prompt: %v\n", text2ImagePrompt) + scenario.InvokeTitanImage(text2ImagePrompt, seed) + + log.Println(strings.Repeat("=", 77)) + log.Println("Thanks for watching!") + log.Println(strings.Repeat("=", 77)) +} + +func (scenario InvokeModelsScenario) InvokeClaude(prompt string) { + completion, err := scenario.invokeModelWrapper.InvokeClaude(prompt) + if err != nil { panic(err) } + log.Printf("\nClaude : %v\n", strings.TrimSpace(completion)) +} + +func (scenario InvokeModelsScenario) InvokeJurassic2(prompt string) { + completion, err := scenario.invokeModelWrapper.InvokeJurassic2(prompt) + if err != nil { panic(err) } + log.Printf("\nJurassic-2 : %v\n", strings.TrimSpace(completion)) +} + +func (scenario InvokeModelsScenario) InvokeLlama2(prompt string) { + completion, err := scenario.invokeModelWrapper.InvokeLlama2(prompt) + if err != nil { panic(err) } + log.Printf("\nLlama 2 : %v\n\n", strings.TrimSpace(completion)) +} + +func (scenario InvokeModelsScenario) InvokeWithResponseStream(prompt string) { + log.Println("\nClaude with response stream:") + _, err := scenario.responseStreamWrapper.InvokeModelWithResponseStream(prompt) + if err != nil { panic(err) } + log.Println() +} + +func (scenario InvokeModelsScenario) InvokeTitanImage(prompt string, seed int64) { + base64ImageData, err := scenario.invokeModelWrapper.InvokeTitanImage(prompt, seed) + if err != nil { panic(err) } + imagePath := saveImage(base64ImageData, "amazon.titan-image-generator-v1") + fmt.Printf("The generated image has been saved to %s\n", imagePath) +} + +// snippet-end:[gov2.bedrock-runtime.Scenario_InvokeModels] + +func saveImage(base64ImageData string, modelId string) string { + outputDir := "output" + + if _, err := os.Stat(outputDir); os.IsNotExist(err) { + os.MkdirAll(outputDir, 0755) + } + + i := 1 + for { + if _, err := os.Stat(filepath.Join(outputDir, fmt.Sprintf("%s_%d.png", modelId, i))); os.IsNotExist(err) { + break + } + i++ + } + + imageData, _ := base64.StdEncoding.DecodeString(base64ImageData) + + filePath := filepath.Join(outputDir, fmt.Sprintf("%s_%d.png", modelId, i)) + f, _ := os.Create(filePath) + f.Write(imageData) + f.Close() + + return filePath +} \ No newline at end of file diff --git a/gov2/bedrock-runtime/stubs/invoke_model_stubs.go b/gov2/bedrock-runtime/stubs/invoke_model_stubs.go new file mode 100644 index 00000000000..ea07ccf5923 --- /dev/null +++ b/gov2/bedrock-runtime/stubs/invoke_model_stubs.go @@ -0,0 +1,58 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +// Defines stubs used for unit testing the Bedrock Runtime actions. + +package stubs + +import ( + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/bedrockruntime" + "github.com/awsdocs/aws-doc-sdk-examples/gov2/testtools" +) + +type StubInvokeModelParams struct { + Request []byte + Response []byte + ModelId string + RaiseErr *testtools.StubError +} + +func StubInvokeModel(params StubInvokeModelParams) testtools.Stub { + return testtools.Stub{ + OperationName: "InvokeModel", + Input: &bedrockruntime.InvokeModelInput{ + Body: params.Request, + ModelId: aws.String(params.ModelId), + ContentType: aws.String("application/json"), + }, + Output: &bedrockruntime.InvokeModelOutput{ Body: params.Response, }, + Error: params.RaiseErr, + } +} + +// func StubInvokeTitanImage(requestBytes []byte, raiseErr *testtools.StubError) testtools.Stub { +// fakeTitanImageResponse := actions.TitanImageResponse{ +// Images: []string { +// "FakeBase64String==", +// }, +// } +// +// responseBytes, err := json.Marshal(fakeTitanImageResponse) +// if err != nil { +// panic(err) +// } +// +// return testtools.Stub{ +// OperationName: "InvokeModel", +// Input: &bedrockruntime.InvokeModelInput{ +// Body: requestBytes, +// ModelId: aws.String("amazon.titan-image-generator-v1"), +// ContentType: aws.String("application/json"), +// }, +// Output: &bedrockruntime.InvokeModelOutput{ +// Body: responseBytes, +// }, +// Error: raiseErr, +// } +// } \ No newline at end of file diff --git a/php/example_code/bedrock-runtime/README.md b/php/example_code/bedrock-runtime/README.md index 405e18fd282..6c40ecef5ef 100644 --- a/php/example_code/bedrock-runtime/README.md +++ b/php/example_code/bedrock-runtime/README.md @@ -1,4 +1,4 @@ - + # Amazon Bedrock Runtime code examples for the SDK for PHP ## Overview @@ -41,9 +41,9 @@ For prerequisites, see the [README](../../README.md#Prerequisites) in the `php` Code excerpts that show you how to call individual service functions. -* [Invoke AI21 Labs Jurassic-2 on Amazon Bedrock](BedrockRuntimeService.php#L72) (`InvokeModel`) -* [Invoke Anthropic Claude 2 on Amazon Bedrock](BedrockRuntimeService.php#L33) (`InvokeModel`) -* [Invoke Meta Llama 2 on Amazon Bedrock](BedrockRuntimeService.php#L107) (`InvokeModel`) +* [Invoke AI21 Labs Jurassic-2 on Amazon Bedrock](BedrockRuntimeService.php#L71) (`InvokeModel`) +* [Invoke Anthropic Claude on Amazon Bedrock](BedrockRuntimeService.php#L33) (`InvokeModel`) +* [Invoke Meta Llama 2 on Amazon Bedrock](BedrockRuntimeService.php#L105) (`InvokeModel`) ### Scenarios @@ -67,7 +67,7 @@ functions within the same service. This example shows you how to invoke multiple large-language-models (LLMs) on Amazon Bedrock. -* Generate text with Anthropic Claude 2. +* Generate text with Anthropic Claude. * Generate text with AI21 Labs Jurassic-2. * Generate text with Meta Llama 2 Chat. diff --git a/python/example_code/bedrock-runtime/README.md b/python/example_code/bedrock-runtime/README.md index 1f34914a902..6938c220ebc 100644 --- a/python/example_code/bedrock-runtime/README.md +++ b/python/example_code/bedrock-runtime/README.md @@ -1,4 +1,4 @@ - + # Amazon Bedrock Runtime code examples for the SDK for Python ## Overview @@ -41,10 +41,10 @@ python -m pip install -r requirements.txt Code excerpts that show you how to call individual service functions. * [Invoke AI21 Labs Jurassic-2 on Amazon Bedrock](bedrock_runtime_wrapper.py#L79) (`InvokeModel`) -* [Invoke Anthropic Claude 2 on Amazon Bedrock](bedrock_runtime_wrapper.py#L39) (`InvokeModel`) +* [Invoke Anthropic Claude on Amazon Bedrock](bedrock_runtime_wrapper.py#L39) (`InvokeModel`) +* [Invoke Anthropic Claude on Amazon Bedrock and process the response stream](bedrock_runtime_wrapper.py#L195) (`InvokeModelWithResponseStream`) * [Invoke Meta Llama 2 on Amazon Bedrock](bedrock_runtime_wrapper.py#L115) (`InvokeModel`) * [Invoke Stability.ai Stable Diffusion XL on Amazon Bedrock](bedrock_runtime_wrapper.py#L152) (`InvokeModel`) -* [Invoke a model on Amazon Bedrock with a response stream](bedrock_runtime_wrapper.py#L195) (`InvokeModelWithResponseStream`) ## Run the examples