Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Python - Bedrock: Claude 3 Demo #6295

Merged
merged 4 commits into from
Mar 27, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
@@ -0,0 +1,61 @@
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
import os
ford-at-aws marked this conversation as resolved.
Show resolved Hide resolved

import boto3

from utils.custom_logging import setup_custom_logger
from utils.timeit import timeit


class BedrockModelService:
"""
A class to interact with the Amazon Bedrock service, specifically for operations
related to foundation models.
"""

def __init__(self):
"""
Initializes the BedrockModelService with a boto3 client for Bedrock and a custom logger.
ford-at-aws marked this conversation as resolved.
Show resolved Hide resolved
"""
self.bedrock_client = boto3.client(service_name="bedrock")
self.logger = setup_custom_logger(os.path.basename(__file__))

@timeit
def get_models(self):
"""
Fetches a list of foundation models from the Bedrock AI service and logs their IDs.

This method uses the `boto3` library to interact with the Bedrock AI service,
retrieving a list of foundation models. Each model's ID is then logged using
a custom logger. The method is decorated with `@timeit` to measure its execution time.

Exception Handling:
Catches and logs exceptions that may occur during the interaction with the Bedrock service.

Logging:
Logs the total number of models found and each model's ID at DEBUG level. If no models are found
or an exception occurs, appropriate warnings or errors are logged.
"""
try:
# Request a list of foundation models from Bedrock
model_list = self.bedrock_client.list_foundation_models()

# Extract model summaries from the response
model_summaries = model_list.get("modelSummaries")
if model_summaries is not None:
self.logger.info(f"Found models: {len(model_summaries)}")
# Log each model's ID
for model in model_summaries:
self.logger.debug(model["modelId"])
else:
self.logger.warning("No model summaries found in the Bedrock response.")
except Exception as e:
# Log any exceptions that occur during the process
self.logger.error(
f"Failed to retrieve models from Bedrock: {e}", exc_info=True
)


bedrock_service = BedrockModelService()
bedrock_service.get_models()
Original file line number Diff line number Diff line change
@@ -0,0 +1,224 @@
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
import os

import boto3

from utils.custom_logging import setup_custom_logger
from utils.timeit import timeit


class BedrockAIConverter:
"""
A class that provides methods for converting between images and text, and generating images from text,
using generative AI models hosted on Amazon Bedrock.
"""

def __init__(self):
"""
Initializes the BedrockAIConverter with a BedrockRuntime client and a custom logger.
"""
self.bedrock_runtime_client = boto3.client("bedrock-runtime")
self.logger = setup_custom_logger(os.path.basename(__file__))

@timeit
def img_to_txt(self):
"""
Converts an image to text by sending the image to a generative AI model
hosted on Bedrock. Reads an image file, encodes it in base64, and sends
it to the model with a prompt. Logs and returns the model's response.

Returns:
str: The text generated by the model based on the image.

Raises:
Exception: If the process fails at any point.
"""
try:
import base64
import json
import sys

sys.path.append("../data/resources")
with open("../data/resources/entry.jpeg", "rb") as image_file:
encoded_string = base64.b64encode(image_file.read())
base64_string = encoded_string.decode("utf-8")

prompt = """
######
Describe the site in the photo with as much detail as you can.
Do not mention an image, as it will confuse later prompting in this chain.
Just provide a description of the site with no mention of any image.
######
"""
self.logger.info(f"PROMPT:\n{prompt}\n")

payload = {
"modelId": "anthropic.claude-3-sonnet-20240229-v1:0",
"contentType": "application/json",
"accept": "application/json",
"body": {
"anthropic_version": "bedrock-2023-05-31",
"max_tokens": 1000,
"messages": [
{
"role": "user",
"content": [
{
"type": "image",
"source": {
"type": "base64",
"media_type": "image/png",
"data": base64_string,
},
},
{
"type": "text",
"text": prompt,
},
],
}
],
},
}

body_bytes = json.dumps(payload["body"]).encode("utf-8")
response = self.bedrock_runtime_client.invoke_model(
body=body_bytes,
contentType=payload["contentType"],
accept=payload["accept"],
modelId=payload["modelId"],
)

response_body = response["body"].read().decode("utf-8")
data = json.loads(response_body)
text = data["content"][0]["text"]
self.logger.warning(text)
return text
except Exception as e:
self.logger.error(f"Failed to convert image to text: {e}")
raise

@timeit
def txt_to_txt(self, text):
"""
Invokes a text-to-text generative AI model to generate an idea based on the given text description.
Logs and returns the model's generated text.

Args:
text (str): The input text description to base the generative model's response on.

Returns:
str: The text generated by the model.

Raises:
Exception: If the process fails at any point.
"""
try:
import json

prompt = f"""
######
You are a skilled event planner with a knack for coming
up with low-cost high tech improvements to event spaces,
particularly for tech conferences.

Based on the following description, describe a single thrilling
design improvement that would heighten the experience at the
rvatech/ AI & Data Summit 2024, which is a day-long event
where attendees will pass through this site in high
volumes. Something small, nerdy, tech-oriented, and interactive.

Update the starter description provided in such a way that
your new improvement idea has been implemented. How would it look in the
site description provided?

Starter Description: {text}

Updated description:
######
"""
self.logger.info(f"PROMPT:\n{prompt}\n")

response = self.bedrock_runtime_client.invoke_model(
modelId="anthropic.claude-3-sonnet-20240229-v1:0",
body=json.dumps(
{
"anthropic_version": "bedrock-2023-05-31",
"max_tokens": 1024,
"messages": [
{
"role": "user",
"content": [{"type": "text", "text": prompt}],
}
],
}
),
)

result = json.loads(response.get("body").read())
self.logger.critical(response)
output_list = result.get("content", [])
text = output_list[0]["text"]
self.logger.warning(text)
return text
except Exception as e:
self.logger.error(f"Failed to generate text from text: {e}")
raise

@timeit
def txt_to_img(self, text):
"""
Converts given text to an image by sending the text to a generative AI model
that produces images. Decodes and saves the response image.

Args:
text (str): The text description to convert to an image.

Raises:
Exception: If the process fails at any point.
"""
try:
import json
from base64 import b64decode
from io import BytesIO

from PIL import Image

body = json.dumps(
{
"text_prompts": [{"text": text}],
"cfg_scale": 6,
"seed": 10,
"steps": 50,
}
)
self.logger.info("PROMPT:\n" + f"\n{text}\n")
modelId = "stability.stable-diffusion-xl"
accept = "application/json"
contentType = "application/json"
response = self.bedrock_runtime_client.invoke_model(
body=body, modelId=modelId, accept=accept, contentType=contentType
)
response = json.loads(response.get("body").read())
images = response.get("artifacts")
image = Image.open(BytesIO(b64decode(images[0].get("base64"))))
image.save("2_try_claude3.png")
except Exception as e:
self.logger.error(f"Failed to convert text to image: {e}")
raise


converter = BedrockAIConverter()
description = converter.img_to_txt()
design_idea = converter.txt_to_txt(description)
converter.txt_to_img(
f"""
#####
You are a whimsical artist with big ideas.

Generate a new detailed drawing showcasing the following site:
{design_idea}
######
"""
)
Original file line number Diff line number Diff line change
@@ -0,0 +1,24 @@
# Running a RAG-enhanced chatbot on Bedrock using Claude 3, Knowledge Base, and LangChain
This directory contains code used for a live demo. It should be run sequentially, as follows:
1. 0_get_models.py
2. 0_try_claude3.py
3. fordgpt_v1.py
4. fordgpt_v2.py
5. fordgpt_v3.py

Note: FordGPTv2-3 (step 4-5) require AWS data stores containing certain information that is not included in this repository.

### Usage
This is reference code that should not be copied directly into production without review.

Running foundation models costs money. This demo presumes you have billing alarms configured and an understanding of Amazon Bedrock pricing.

## About FordGPT
![fordgpt.jpg](fordgpt.jpg)

# Requirements
Speaks fluently on:
* Popular companies
* Meetup groups & events
* Best coffee shops for coding
* Fail-safe job hunting resources
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Loading
Loading