Skip to content

Commit

Permalink
Merge pull request #93 from Daethyra/patches/imports
Browse files Browse the repository at this point in the history
chore(pages): remove unused imports

- also removed CUDA requirement for `torch`
  • Loading branch information
Daethyra authored Apr 24, 2024
2 parents da66e3a + 0874f70 commit 90c999d
Show file tree
Hide file tree
Showing 6 changed files with 219 additions and 229 deletions.
25 changes: 7 additions & 18 deletions freestream/pages/1_🤖_RAGbot.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,6 @@
from langchain.chains import ConversationalRetrievalChain
from langchain.memory import ConversationBufferMemory
from langchain_community.chat_message_histories import StreamlitChatMessageHistory
from langchain_google_genai import ChatGoogleGenerativeAI
from langchain_anthropic import ChatAnthropic
from langchain_openai import ChatOpenAI
from pages import (
Expand Down Expand Up @@ -98,23 +97,13 @@
# streaming=True,
# max_tokens=4096,
# ),
# "Claude: Opus": ChatAnthropic(
# model="claude-3-opus-20240229",
# anthropic_api_key=st.secrets.ANTHROPIC.anthropic_api_key,
# temperature=temperature_slider,
# streaming=True,
# max_tokens=4096,
# ),
# "Gemini-Pro": ChatGoogleGenerativeAI(
# model="gemini-pro",
# google_api_key=st.secrets.GOOGLE.google_api_key,
# temperature=temperature_slider,
# top_k=50,
# top_p=0.7,
# convert_system_message_to_human=True,
# max_output_tokens=4096,
# max_retries=1,
# ),
"Claude: Opus": ChatAnthropic(
model="claude-3-opus-20240229",
anthropic_api_key=st.secrets.ANTHROPIC.anthropic_api_key,
temperature=temperature_slider,
streaming=True,
max_tokens=4096,
),
}

# Create a dropdown menu for selecting a chat model
Expand Down
11 changes: 0 additions & 11 deletions freestream/pages/2_💬_Curie.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,6 @@
from langchain_community.chat_message_histories import StreamlitChatMessageHistory
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
from langchain_core.runnables.history import RunnableWithMessageHistory
from langchain_google_genai import ChatGoogleGenerativeAI
from langchain_openai import ChatOpenAI
from pages import (
PrintRetrievalHandler,
Expand Down Expand Up @@ -109,16 +108,6 @@
# streaming=True,
# max_tokens=4096,
# ),
# "Gemini-Pro": ChatGoogleGenerativeAI(
# model="gemini-pro",
# google_api_key=st.secrets.GOOGLE.google_api_key,
# temperature=temperature_slider,
# top_k=50,
# top_p=0.7,
# convert_system_message_to_human=True,
# max_output_tokens=4096,
# max_retries=1,
# ),
}

# Create a dropdown menu for selecting a chat model
Expand Down
2 changes: 0 additions & 2 deletions freestream/pages/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,8 +4,6 @@
StreamHandler,
footer,
set_llm,
image_upscaler,
upscale_model_weights,
set_bg_url,
set_bg_local,
)
133 changes: 66 additions & 67 deletions freestream/pages/utils/utility_funcs.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,6 @@
from langchain_core.callbacks.base import BaseCallbackHandler
from langchain_core.documents import Document
from PIL import Image
from RealESRGAN import RealESRGAN

# Set up logging
logging.basicConfig(level=logging.INFO, stream=sys.stdout)
Expand Down Expand Up @@ -119,72 +118,72 @@ def set_llm(selected_model: str, model_names: dict):
st.error(f"Failed to change model! Error: {e}\n{selected_model}")


# Define a dictionary for Real-ESRGAN's model weights
upscale_model_weights = {
2: "weights/RealESRGAN_x2plus.pth",
4: "weights/RealESRGAN_x4plus.pth",
# 8: "weights/RealESRGAN_x8plus.pth",
}


# Define a function to upscale images using Real-ESRGAN
def image_upscaler(image: str, scale: int) -> Image:
"""
Upscales the input image using the specified model and returns the upscaled image.
Parameters:
image (str): The file path of the input image.
Returns:
Image: The upscaled image.
"""

# Assign the image to a variable
img = Image.open(image).convert("RGB")

# Initialize the upscaler
upscaler = RealESRGAN(
device="cuda" if torch.cuda.is_available() else "cpu", scale=scale
)

# Load the corresponding model weight
if scale in upscale_model_weights:
upscaler.load_weights(
upscale_model_weights[scale],
# Download the model weight if it doesn't exist
download=True,
)
else:
logger.error("Scale factor not in supported model weights.")

try:
# Capture start time
start_time = datetime.datetime.now()

with st.spinner(
f"Began upscaling: {datetime.datetime.now().strftime('%H:%M:%S')}..."
):
# Upscale the image
upscaled_img = upscaler.predict(img)

# Capture end time
end_time = datetime.datetime.now()

# Calculate and log the process duration
process_duration = end_time - start_time
logger.info(f"Upscale process took {process_duration.total_seconds()} seconds.")

# Notify the user
st.toast(
f"Success! Upscaling took {process_duration.total_seconds()} seconds.",
icon="😄",
)

except Exception as e:
logger.error(f"Failed to upscale image. Error: {e}")
st.error(f"Failed to upscale image! Please try again.")

return upscaled_img
# # Define a dictionary for Real-ESRGAN's model weights
# upscale_model_weights = {
# 2: "weights/RealESRGAN_x2plus.pth",
# 4: "weights/RealESRGAN_x4plus.pth",
# # 8: "weights/RealESRGAN_x8plus.pth",
# }


# # Define a function to upscale images using Real-ESRGAN
# def image_upscaler(image: str, scale: int) -> Image:
# """
# Upscales the input image using the specified model and returns the upscaled image.

# Parameters:
# image (str): The file path of the input image.

# Returns:
# Image: The upscaled image.
# """

# # Assign the image to a variable
# img = Image.open(image).convert("RGB")

# # Initialize the upscaler
# upscaler = RealESRGAN(
# device="cuda" if torch.cuda.is_available() else "cpu", scale=scale
# )

# # Load the corresponding model weight
# if scale in upscale_model_weights:
# upscaler.load_weights(
# upscale_model_weights[scale],
# # Download the model weight if it doesn't exist
# download=True,
# )
# else:
# logger.error("Scale factor not in supported model weights.")

# try:
# # Capture start time
# start_time = datetime.datetime.now()

# with st.spinner(
# f"Began upscaling: {datetime.datetime.now().strftime('%H:%M:%S')}..."
# ):
# # Upscale the image
# upscaled_img = upscaler.predict(img)

# # Capture end time
# end_time = datetime.datetime.now()

# # Calculate and log the process duration
# process_duration = end_time - start_time
# logger.info(f"Upscale process took {process_duration.total_seconds()} seconds.")

# # Notify the user
# st.toast(
# f"Success! Upscaling took {process_duration.total_seconds()} seconds.",
# icon="😄",
# )

# except Exception as e:
# logger.error(f"Failed to upscale image. Error: {e}")
# st.error(f"Failed to upscale image! Please try again.")

# return upscaled_img


# Define a function to change the background to an image via URL
Expand Down
Loading

0 comments on commit 90c999d

Please sign in to comment.