Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Amazon ElastiCache | Build a generative AI Virtual Assistant with Ama… #64

Open
wants to merge 3 commits into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 4 additions & 0 deletions webinars/genai-chatbot/.env.example
Original file line number Diff line number Diff line change
@@ -0,0 +1,4 @@
BWB_ENDPOINT_URL=https://bedrock-runtime.us-east-1.amazonaws.com
ELASTICACHE_ENDPOINT_URL=rediss://elaticache.serverless.use1.cache.amazonaws.com:6379
BWB_PROFILE_NAME=IF_YOU_NEED_TO_USE_AN_AWS_CLI_PROFILE_IT_GOES_HERE
BWB_REGION_NAME=REGION_NAME_GOES_HERE_IF_YOU_NEED_TO_OVERRIDE_THE_DEFAULT_REGION
1 change: 1 addition & 0 deletions webinars/genai-chatbot/.gitignore
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@
.venv/*
.env.sh
__pycache__/*
.env
88 changes: 46 additions & 42 deletions webinars/genai-chatbot/chatbot_app.py
Original file line number Diff line number Diff line change
@@ -1,52 +1,56 @@
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: MIT-0


import os
import streamlit as st #all streamlit commands will be available through the "st" alias
import chatbot_lib as glib #reference to local lib script
import logging # Import the logging module for logging purposes
import streamlit as st # Import the Streamlit library for building interactive web apps
import chatbot_lib as glib # Import a custom module named 'chatbot_lib'
from dotenv import load_dotenv # Import the load_dotenv function from the 'dotenv' module

# Set up the logging configuration with the INFO level and a specific format
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')


# Load environment variables from a '.env' file
load_dotenv()

st.set_page_config(page_title="Chatbot") #HTML title
st.title("Chatbot") #page title
# Add a login mechanism for the user
st.set_page_config(page_title="Chatbot") # Set the page title for the Streamlit app
st.title("Chatbot") # Display the title "Chatbot" on the Streamlit app

username = st.text_input("Enter your username:")
session_id = username
print("username being sent to session", session_id)
redis_url=os.environ.get("ELASTICACHE_ENDPOINT_URL")
key_prefix="chat_history:"
username = st.text_input("Enter your username:") # Get the username from the user input
session_id = username # Set the session_id to the username
redis_url = os.environ.get("ELASTICACHE_ENDPOINT_URL") # Get the Redis URL from the environment variables
key_prefix = "chat_history:" # Set a prefix for the chat history key

if username:
st.session_state.username = username # Store the username in the session state
st.info(f"Logged in as: {username}")

if 'memory' not in st.session_state: #see if the memory hasn't been created yet
print(f"DEBUG: session_id: {session_id}")
st.session_state.memory = glib.get_memory(session_id=session_id, url=redis_url, key_prefix=key_prefix) #initialize the memory

if 'chat_history' not in st.session_state: #see if the chat history hasn't been created yet
st.session_state.chat_history = [] #initialize the chat history

print(f"DEBUG: session_id2: {session_id}")
st.session_state.memory = glib.get_memory(session_id=session_id, url=redis_url, key_prefix=key_prefix) #initialize the memory

#Re-render the chat history (Streamlit re-runs this script, so need this to preserve previous chat messages)
for message in st.session_state.chat_history: #loop through the chat history
with st.chat_message(message["role"]): #renders a chat line for the given role, containing everything in the with block
st.markdown(message["text"]) #display the chat content

input_text = st.chat_input("Chat with your bot here") #display a chat input box

if input_text: #run the code in this if block after the user submits a chat message
with st.chat_message("user"): #display a user chat message
st.markdown(input_text) #renders the user's latest message
#Debugging : Pring the input_text to the model
print("Input text to the model:", input_text)
st.session_state.chat_history.append({"role":"user", "text":input_text}) #append the user's latest message to the chat history
#Debugging: Print the memory being sent to the model
print("Memory to the Model:", st.session_state.memory)
chat_response = glib.get_chat_response(input_text=input_text, memory=st.session_state.memory) #call the model through the supporting library
with st.chat_message("assistant"): #display a bot chat message
st.markdown(chat_response) #display bot's latest response
st.session_state.chat_history.append({"role":"assistant", "text":chat_response}) #append the bot's latest message to the chat history
st.info(f"Logged in as: {username}") # Display a message with the logged-in username

# If 'memory' is not in the session state, initialize it by calling the 'get_memory' function from 'chatbot_lib'
if 'memory' not in st.session_state:
st.session_state.memory = glib.get_memory(session_id=session_id, url=redis_url, key_prefix=key_prefix)

# If 'chat_history' is not in the session state, initialize it as an empty list
if 'chat_history' not in st.session_state:
st.session_state.chat_history = []

# Update the 'memory' in the session state by calling the 'get_memory' function from 'chatbot_lib'
st.session_state.memory = glib.get_memory(session_id=session_id, url=redis_url, key_prefix=key_prefix)

# Display the chat history by iterating over the messages and rendering them using Streamlit's chat_message
for message in st.session_state.chat_history:
with st.chat_message(message["role"]):
st.markdown(message["text"])

input_text = st.chat_input("Chat with your bot here") # Get the user's input text from the chat input

if input_text:
with st.chat_message("user"):
st.markdown(input_text) # Display the user's input text in the chat
logging.info(f"Input text to the model: {input_text}") # Log the user's input text
st.session_state.chat_history.append({"role": "user", "text": input_text}) # Add the user's input to the chat history
logging.info(f"Memory to the Model: {st.session_state.memory}") # Log the memory passed to the model
chat_response = glib.get_chat_response(input_text=input_text, memory=st.session_state.memory) # Get the chat response from the 'chatbot_lib' module
with st.chat_message("assistant"):
st.markdown(chat_response) # Display the chat response in the chat
st.session_state.chat_history.append({"role": "assistant", "text": chat_response}) # Add the chat response to the chat history
97 changes: 68 additions & 29 deletions webinars/genai-chatbot/chatbot_lib.py
Original file line number Diff line number Diff line change
@@ -1,62 +1,101 @@
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: MIT-0


import os
import logging
from dotenv import load_dotenv
from langchain.memory import ConversationSummaryBufferMemory, RedisChatMessageHistory
from langchain.llms.bedrock import Bedrock
from langchain.chains import ConversationChain
from langchain.prompts.prompt import PromptTemplate

# Load environment variables from .env file
load_dotenv()

# Set up logging
logging.basicConfig(level=logging.INFO)

redis_url=os.environ.get("ELASTICACHE_ENDPOINT_URL")
# Get Redis URL from environment variable
redis_url = os.environ.get("ELASTICACHE_ENDPOINT_URL")


def get_llm():
model_kwargs = {
"""
Returns an instance of the Bedrock LLM with the specified model and configuration.
Amazon Bedrock endpoints and quotas => https://docs.aws.amazon.com/general/latest/gr/bedrock.html

Returns:
Bedrock: An instance of the Bedrock LLM.
"""
model_kwargs = {
"max_tokens_to_sample": 8000,
"temperature": 0,
"top_k": 50,
"top_p": 1,
"stop_sequences": ["\n\nHuman:"]
"temperature": 0.5,
"top_k": 50,
"top_p": 1,
"stop_sequences": ["\n\nHuman:"]
}
# Amazon Bedrock endpoints and quotas => https://docs.aws.amazon.com/general/latest/gr/bedrock.html
llm = Bedrock(
credentials_profile_name=os.environ.get("BWB_PROFILE_NAME"), #sets the profile name to use for AWS credentials (if not the default)
region_name=os.environ.get("BWB_REGION_NAME"), #sets the region name (if not the default)
endpoint_url=os.environ.get("BWB_ENDPOINT_URL"), #sets the endpoint URL (if necessary)
model_id="anthropic.claude-v2", #use the Anthropic Claude model
model_kwargs=model_kwargs) #configure the properties for Claude
credentials_profile_name=os.getenv("BWB_PROFILE_NAME"),
region_name=os.getenv("BWB_REGION_NAME"),
endpoint_url=os.getenv("BWB_ENDPOINT_URL"),
model_id="anthropic.claude-v2",
model_kwargs=model_kwargs)
return llm


def get_chat_history():
chat_history=RedisChatMessageHistory(session_id='username', url=redis_url, key_prefix="chat_history:")
"""
Returns an instance of RedisChatMessageHistory for storing chat history.

Returns:
RedisChatMessageHistory: An instance of RedisChatMessageHistory.
"""
chat_history = RedisChatMessageHistory(session_id='username', url=redis_url, key_prefix="chat_history:")
return chat_history


def get_memory(session_id, url, key_prefix): # create memory for this chat session
def get_memory(session_id, url, key_prefix):
"""
Creates and returns a ConversationSummaryBufferMemory instance for maintaining conversation history.

Args:
session_id (str): The session ID for the chat session.
url (str): The URL of the Redis instance.
key_prefix (str): The key prefix for storing chat history in Redis.

Returns:
ConversationSummaryBufferMemory: An instance of ConversationSummaryBufferMemory.
"""
# ConversationSummaryBufferMemory requires an LLM for summarizing older messages
# this allows us to maintain the "big picture" of a long-running conversation
# This allows us to maintain the "big picture" of a long-running conversation
llm = get_llm()
chat_history=RedisChatMessageHistory(session_id=session_id, url=url, key_prefix=key_prefix)
memory = ConversationSummaryBufferMemory(ai_prefix="AI Assistant",llm=llm, max_token_limit=1024, chat_memory=chat_history) #Maintains a summary of previous messages
# memory = ConversationSummaryBufferMemory(llm=llm, max_token_limit=1024) # Maintains a summary of previous messages
chat_history = RedisChatMessageHistory(session_id=session_id, url=url, key_prefix=key_prefix)
memory = ConversationSummaryBufferMemory(ai_prefix="AI Assistant", llm=llm, max_token_limit=1024, chat_memory=chat_history)
return memory


def get_chat_response(input_text, memory): #chat client function
def get_chat_response(input_text, memory):
"""
Generates a chat response based on the input text and conversation history.

Args:
input_text (str): The input text from the user.
memory (ConversationSummaryBufferMemory): The conversation history and summary.

Returns:
str: The chat response generated by the LLM.
"""
llm = get_llm()
template = """The following is a friendly conversation between a human and an AI.
template = """The following is a friendly conversation between a human and an AI.
AI provide very concise responses. If the AI does not know the answer to a question, it truthfully says it does not know.
Current conversation:{history}. Human: {input} AI Assistant:"""
PROMPT = PromptTemplate(input_variables=["history", "input"], template=template)
conversation_with_summary = ConversationChain( #create a chat client
llm = llm, #using the Bedrock LLM
memory = memory, #with the summarization memory
conversation_with_summary = ConversationChain(
llm=llm, # using the Bedrock LLM
memory=memory, # with the summarization memory
prompt=PROMPT,
verbose = True #print out some of the internal states of the chain while running
verbose=False # disable printing internal states
)
chat_response = conversation_with_summary.predict(input=input_text) #pass the user message and summary to the model
chat_response = conversation_with_summary.predict(input=input_text) # pass the user message and summary to the model
logging.debug(f"Chat response: {chat_response}")
return chat_response

81 changes: 81 additions & 0 deletions webinars/genai-chatbot/requirements.lock
Original file line number Diff line number Diff line change
@@ -0,0 +1,81 @@
aiohappyeyeballs==2.3.4
aiohttp==3.10.0
aiosignal==1.3.1
altair==5.3.0
annotated-types==0.7.0
anthropic==0.32.0
anyio==3.7.1
attrs==23.2.0
blinker==1.8.2
boto3==1.34.151
botocore==1.34.151
cachetools==5.4.0
certifi==2024.7.4
charset-normalizer==3.3.2
click==8.1.7
dataclasses-json==0.6.7
distro==1.9.0
filelock==3.15.4
frozenlist==1.4.1
fsspec==2024.6.1
gitdb==4.0.11
GitPython==3.1.43
greenlet==3.0.3
h11==0.14.0
hiredis==3.0.0
httpcore==1.0.5
httpx==0.27.0
huggingface-hub==0.24.5
idna==3.7
Jinja2==3.1.4
jiter==0.5.0
jmespath==1.0.1
jsonpatch==1.33
jsonpointer==3.0.0
jsonschema==4.23.0
jsonschema-specifications==2023.12.1
langchain==0.0.317
langsmith==0.0.51
markdown-it-py==3.0.0
MarkupSafe==2.1.5
marshmallow==3.21.3
mdurl==0.1.2
multidict==6.0.5
mypy-extensions==1.0.0
numpy==1.26.4
packaging==24.1
pandas==2.2.2
pillow==10.4.0
protobuf==5.27.3
pyarrow==17.0.0
pydantic==2.8.2
pydantic_core==2.20.1
pydeck==0.9.1
Pygments==2.18.0
python-dateutil==2.9.0.post0
python-dotenv==1.0.1
pytz==2024.1
PyYAML==6.0.1
redis==5.0.8
referencing==0.35.1
requests==2.32.3
rich==13.7.1
rpds-py==0.19.1
s3transfer==0.10.2
six==1.16.0
smmap==5.0.1
sniffio==1.3.1
SQLAlchemy==2.0.31
streamlit==1.37.0
tenacity==8.5.0
tokenizers==0.19.1
toml==0.10.2
toolz==0.12.1
tornado==6.4.1
tqdm==4.66.4
typing-inspect==0.9.0
typing_extensions==4.12.2
tzdata==2024.1
urllib3==2.2.2
watchdog==4.0.1
yarl==1.9.4
Loading