diff --git a/api/utils/llm_providers/__pycache__/anthropic.cpython-311.pyc b/api/utils/llm_providers/__pycache__/anthropic.cpython-311.pyc index 9a20180..ee9332f 100644 Binary files a/api/utils/llm_providers/__pycache__/anthropic.cpython-311.pyc and b/api/utils/llm_providers/__pycache__/anthropic.cpython-311.pyc differ diff --git a/api/utils/llm_providers/__pycache__/openai.cpython-311.pyc b/api/utils/llm_providers/__pycache__/openai.cpython-311.pyc index db5e309..7b29d9a 100644 Binary files a/api/utils/llm_providers/__pycache__/openai.cpython-311.pyc and b/api/utils/llm_providers/__pycache__/openai.cpython-311.pyc differ diff --git a/api/utils/llm_providers/anthropic.py b/api/utils/llm_providers/anthropic.py index 20d6377..cbb7f93 100644 --- a/api/utils/llm_providers/anthropic.py +++ b/api/utils/llm_providers/anthropic.py @@ -1,9 +1,8 @@ -import asyncio -from anthropic import Anthropic +from anthropic import AsyncAnthropic from starlette.config import Config config = Config('.env') -client = Anthropic(api_key=config("ANTHROPIC_API_KEY")) +client = AsyncAnthropic(api_key=config("ANTHROPIC_API_KEY")) async def anthropic_generate_response(conversation): messages = [ @@ -11,30 +10,31 @@ async def anthropic_generate_response(conversation): for message in conversation.messages ] - with client.messages.stream( + stream = await client.messages.create( model=conversation.model.name, messages=messages, max_tokens=1024, - ) as stream: - for text in stream.text_stream: - yield text + stream=True, + ) + + async for event in stream: + if event.type == "content_block_delta": + content = event.delta.text + yield content async def generate_conversation_name(conversation): messages = [ {"role": message.role, "content": message.content} for message in conversation.messages + if message.content.strip() # Filter out messages with empty content ] messages.append({"role": "user", "content": "Please give a short, concise name for the above conversation."}) - - def sync_create_message(): - response = client.messages.create( - model="claude-3-haiku-20240307", - system="You are a conversation namer. Give a short, concise name for the given conversation.", - messages=messages, - max_tokens=10, - ) - return response - - response = await asyncio.to_thread(sync_create_message) - + + response = await client.messages.create( + model="claude-3-haiku-20240307", + system="You are a conversation namer. Give a short, concise name for the given conversation.", + messages=messages, + max_tokens=10, + ) + return response.content[0].text \ No newline at end of file diff --git a/api/utils/llm_providers/openai.py b/api/utils/llm_providers/openai.py index 822f9a9..b427d6f 100644 --- a/api/utils/llm_providers/openai.py +++ b/api/utils/llm_providers/openai.py @@ -1,22 +1,23 @@ -from openai import OpenAI +from openai import AsyncOpenAI from starlette.config import Config config = Config('.env') - -client = OpenAI(api_key=config("OPENAI_API_KEY")) +client = AsyncOpenAI(api_key=config("OPENAI_API_KEY")) async def openai_generate_response(conversation): - messages = [ {"role": message.role, "content": message.content} for message in conversation.messages ] - response = client.chat.completions.create(model=conversation.model.name, - messages=messages, - stream=True) + stream = await client.chat.completions.create( + model=conversation.model.name, + messages=messages, + stream=True, + ) - for chunk in response: - # Extract the content from the chunk - content = chunk.choices[0].delta.content - yield content + async for chunk in stream: + content = chunk.choices[0].delta.content + if content is None: + content = "" + yield content \ No newline at end of file diff --git a/app/components/ConversationMessages.tsx b/app/components/ConversationMessages.tsx index c855db1..49757cd 100644 --- a/app/components/ConversationMessages.tsx +++ b/app/components/ConversationMessages.tsx @@ -35,13 +35,35 @@ const ConversationMessages: React.FC = ({ userName = "User", }) => { const messagesEndRef = useRef(null); + // const messagesContainerRef = useRef(null); useEffect(() => { messagesEndRef.current?.scrollIntoView({ behavior: "smooth" }); }, [messages.length]); + // useEffect(() => { + // const messagesContainer = messagesContainerRef.current; + // if (messagesContainer) { + // const scrollHeight = messagesContainer.scrollHeight; + // const scrollTop = messagesContainer.scrollTop; + // const clientHeight = messagesContainer.clientHeight; + // const scrollPosition = scrollTop + clientHeight; + // const scrollThreshold = scrollHeight * 0.9; // Bottom 10% of the page + + // if (scrollPosition >= scrollThreshold) { + // messagesEndRef.current?.scrollIntoView({ behavior: "smooth" }); + // } + // } + // }, [messages]); + return ( - + {messages.map((message, index) => (