diff --git a/.env b/.env new file mode 100644 index 000000000..fd0d6faa7 --- /dev/null +++ b/.env @@ -0,0 +1,4 @@ +API_KEY=eyJhbGciOiJSUzI1NiIsInR5cCI6IkpXVCIsImtpZCI6ImxzV0lDNWtkU1V1bXg1ckg5NkR6bFdYUGxJTSJ9.eyJhdWQiOiI4OTUyNTNhZi1lYzlkLTRiZTYtODNkMS02ZjI0OGU2NDRlNzkiLCJleHAiOjM3MDQyNTcwOTQsImlhdCI6MTc0NDcwNTA5NCwiaXNzIjoidHJ1ZWZvdW5kcnkuY29tIiwic3ViIjoiY205aThkNmxpMDc0NzAxbjYxZXF2OXV2dSIsImp0aSI6IjA2Mzg1NGEwLWIyMGEtNDA3MC1hODU1LThlODUyYzM1NzdkMCIsInN1YmplY3RTbHVnIjoidGVzdDE1MDQyNSIsInVzZXJuYW1lIjoidGVzdDE1MDQyNSIsInVzZXJUeXBlIjoic2VydmljZWFjY291bnQiLCJzdWJqZWN0VHlwZSI6InNlcnZpY2VhY2NvdW50IiwidGVuYW50TmFtZSI6InRydWVmb3VuZHJ5Iiwicm9sZXMiOlsidGVuYW50LWFkbWluIl0sImFwcGxpY2F0aW9uSWQiOiI4OTUyNTNhZi1lYzlkLTRiZTYtODNkMS02ZjI0OGU2NDRlNzkifQ.j_sucrB3HCfSCqyLESukXRfMjuIyz7JXXWCiiAxb5G6HlvAcPu1UoVRsmjVk2SLSPFlmy2olXJEkJvINLSAL2HeYlJ6PDlHRrW9fyXUXNXLxq9M4nBGLzU5fMh_qZa0Y_RqDdlXbatVbrqaaIinJHrmeuUphWNnbkZX3VlGWNGygh1V6WmLcx96E-aQ4OwGtj39cgL8geBcm6pJeUTxygf_saYL4o7olGJXHbVGbZSrPGTnE7Uoi1bGtLj_DRq_42CW53qL4I2R3aSzcXVtWOZyIfsvph3STWzH39VFA-Gei5qB2FMDU4m0BmtnhHarmJpcr-_GVs7fjd2R9J1rpIw +SSL_VERIFY='false' +API_BASE_URL=https://internal.devtest.truefoundry.tech/api/llm/api/inference/openai +LLM_MODEL_NAME=openai-main/gpt-4o-mini \ No newline at end of file diff --git a/Chatbot.py b/Chatbot.py index 0a4f2df45..a392c3ab8 100644 --- a/Chatbot.py +++ b/Chatbot.py @@ -1,14 +1,19 @@ -from openai import OpenAI +import os + +import httpx import streamlit as st +from openai import OpenAI + +SSL_VERIFY = os.getenv("SSL_VERIFY", "true") +API_KEY = os.getenv("API_KEY") +BASE_URL = os.getenv("API_BASE_URL", "https://api.openai.com/v1") +LLM_MODEL_NAME = os.getenv("LLM_MODEL_NAME", "gpt-3.5-turbo") with st.sidebar: - openai_api_key = st.text_input("OpenAI API Key", key="chatbot_api_key", type="password") - "[Get an OpenAI API key](https://platform.openai.com/account/api-keys)" - "[View the source code](https://github.com/streamlit/llm-examples/blob/main/Chatbot.py)" - "[![Open in GitHub Codespaces](https://github.com/codespaces/badge.svg)](https://codespaces.new/streamlit/llm-examples?quickstart=1)" + "[View the source code](https://github.com/truefoundry/rag-streamlit/blob/main/Chatbot.py)" st.title("💬 Chatbot") -st.caption("🚀 A Streamlit chatbot powered by OpenAI") +st.caption("🚀 A Streamlit chatbot powered by LLM Gateway") if "messages" not in st.session_state: st.session_state["messages"] = [{"role": "assistant", "content": "How can I help you?"}] @@ -16,14 +21,19 @@ st.chat_message(msg["role"]).write(msg["content"]) if prompt := st.chat_input(): - if not openai_api_key: - st.info("Please add your OpenAI API key to continue.") - st.stop() - client = OpenAI(api_key=openai_api_key) + client = OpenAI( + api_key=API_KEY, + base_url=BASE_URL, + http_client=httpx.Client(verify=SSL_VERIFY == "true"), + ) st.session_state.messages.append({"role": "user", "content": prompt}) st.chat_message("user").write(prompt) - response = client.chat.completions.create(model="gpt-3.5-turbo", messages=st.session_state.messages) + response = client.chat.completions.create( + model=LLM_MODEL_NAME, + messages=st.session_state.messages, + extra_headers={"X-TFY-METADATA": '{"tfy_log_request":"true"}'}, + ) msg = response.choices[0].message.content st.session_state.messages.append({"role": "assistant", "content": msg}) st.chat_message("assistant").write(msg) diff --git a/pages/1_File_Q&A.py b/pages/1_File_Q&A.py index 417474c4f..b86a3921b 100644 --- a/pages/1_File_Q&A.py +++ b/pages/1_File_Q&A.py @@ -1,12 +1,18 @@ +import os + +import httpx import streamlit as st -import anthropic +from openai import OpenAI + +API_KEY = os.getenv("API_KEY") +BASE_URL = os.getenv("API_BASE_URL", "https://api.openai.com/v1") +LLM_MODEL_NAME = os.getenv("LLM_MODEL_NAME", "gpt-3.5-turbo") +SSL_VERIFY = os.getenv("SSL_VERIFY", "true") with st.sidebar: - anthropic_api_key = st.text_input("Anthropic API Key", key="file_qa_api_key", type="password") - "[View the source code](https://github.com/streamlit/llm-examples/blob/main/pages/1_File_Q%26A.py)" - "[![Open in GitHub Codespaces](https://github.com/codespaces/badge.svg)](https://codespaces.new/streamlit/llm-examples?quickstart=1)" + "[View the source code](https://github.com/truefoundry/rag-streamlit/blob/main/pages/1_File_Q%26A.py)" -st.title("📝 File Q&A with Anthropic") +st.title("📝 File Q&A with LLM Gateway") uploaded_file = st.file_uploader("Upload an article", type=("txt", "md")) question = st.text_input( "Ask something about the article", @@ -14,20 +20,23 @@ disabled=not uploaded_file, ) -if uploaded_file and question and not anthropic_api_key: - st.info("Please add your Anthropic API key to continue.") - -if uploaded_file and question and anthropic_api_key: +if uploaded_file and question: article = uploaded_file.read().decode() - prompt = f"""{anthropic.HUMAN_PROMPT} Here's an article:\n\n
- {article}\n\n
\n\n{question}{anthropic.AI_PROMPT}""" + prompt = f"Here's an article:\n\n
\n{article}\n\n
\n\n{question}" + + client = OpenAI( + api_key=API_KEY, + base_url=BASE_URL, + http_client=httpx.Client(verify=SSL_VERIFY == "true"), + ) - client = anthropic.Client(api_key=anthropic_api_key) - response = client.completions.create( - prompt=prompt, - stop_sequences=[anthropic.HUMAN_PROMPT], - model="claude-v1", # "claude-2" for Claude 2 model - max_tokens_to_sample=100, + response = client.chat.completions.create( + model=LLM_MODEL_NAME, + messages=[{"role": "user", "content": prompt}], + temperature=0.7, + stream=False, # Make sure streaming is disabled + extra_headers={ + "X-TFY-METADATA": '{"tfy_log_request":"true"}', + }, ) - st.write("### Answer") - st.write(response.completion) + st.write(response.choices[0].message.content) diff --git a/pages/2_Chat_with_search.py b/pages/2_Chat_with_search.py deleted file mode 100644 index 399c58219..000000000 --- a/pages/2_Chat_with_search.py +++ /dev/null @@ -1,48 +0,0 @@ -import streamlit as st - -from langchain.agents import initialize_agent, AgentType -from langchain.callbacks import StreamlitCallbackHandler -from langchain.chat_models import ChatOpenAI -from langchain.tools import DuckDuckGoSearchRun - -with st.sidebar: - openai_api_key = st.text_input( - "OpenAI API Key", key="langchain_search_api_key_openai", type="password" - ) - "[Get an OpenAI API key](https://platform.openai.com/account/api-keys)" - "[View the source code](https://github.com/streamlit/llm-examples/blob/main/pages/2_Chat_with_search.py)" - "[![Open in GitHub Codespaces](https://github.com/codespaces/badge.svg)](https://codespaces.new/streamlit/llm-examples?quickstart=1)" - -st.title("🔎 LangChain - Chat with search") - -""" -In this example, we're using `StreamlitCallbackHandler` to display the thoughts and actions of an agent in an interactive Streamlit app. -Try more LangChain 🤝 Streamlit Agent examples at [github.com/langchain-ai/streamlit-agent](https://github.com/langchain-ai/streamlit-agent). -""" - -if "messages" not in st.session_state: - st.session_state["messages"] = [ - {"role": "assistant", "content": "Hi, I'm a chatbot who can search the web. How can I help you?"} - ] - -for msg in st.session_state.messages: - st.chat_message(msg["role"]).write(msg["content"]) - -if prompt := st.chat_input(placeholder="Who won the Women's U.S. Open in 2018?"): - st.session_state.messages.append({"role": "user", "content": prompt}) - st.chat_message("user").write(prompt) - - if not openai_api_key: - st.info("Please add your OpenAI API key to continue.") - st.stop() - - llm = ChatOpenAI(model_name="gpt-3.5-turbo", openai_api_key=openai_api_key, streaming=True) - search = DuckDuckGoSearchRun(name="Search") - search_agent = initialize_agent( - [search], llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, handle_parsing_errors=True - ) - with st.chat_message("assistant"): - st_cb = StreamlitCallbackHandler(st.container(), expand_new_thoughts=False) - response = search_agent.run(st.session_state.messages, callbacks=[st_cb]) - st.session_state.messages.append({"role": "assistant", "content": response}) - st.write(response) diff --git a/pages/3_Langchain_Quickstart.py b/pages/3_Langchain_Quickstart.py deleted file mode 100644 index 38c820f24..000000000 --- a/pages/3_Langchain_Quickstart.py +++ /dev/null @@ -1,22 +0,0 @@ -import streamlit as st -from langchain.llms import OpenAI - -st.title("🦜🔗 Langchain Quickstart App") - -with st.sidebar: - openai_api_key = st.text_input("OpenAI API Key", type="password") - "[Get an OpenAI API key](https://platform.openai.com/account/api-keys)" - - -def generate_response(input_text): - llm = OpenAI(temperature=0.7, openai_api_key=openai_api_key) - st.info(llm(input_text)) - - -with st.form("my_form"): - text = st.text_area("Enter text:", "What are 3 key advice for learning how to code?") - submitted = st.form_submit_button("Submit") - if not openai_api_key: - st.info("Please add your OpenAI API key to continue.") - elif submitted: - generate_response(text) diff --git a/pages/4_Langchain_PromptTemplate.py b/pages/4_Langchain_PromptTemplate.py deleted file mode 100644 index 3755419ea..000000000 --- a/pages/4_Langchain_PromptTemplate.py +++ /dev/null @@ -1,29 +0,0 @@ -import streamlit as st -from langchain.llms import OpenAI -from langchain.prompts import PromptTemplate - -st.title("🦜🔗 Langchain - Blog Outline Generator App") - -openai_api_key = st.sidebar.text_input("OpenAI API Key", type="password") - - -def blog_outline(topic): - # Instantiate LLM model - llm = OpenAI(model_name="text-davinci-003", openai_api_key=openai_api_key) - # Prompt - template = "As an experienced data scientist and technical writer, generate an outline for a blog about {topic}." - prompt = PromptTemplate(input_variables=["topic"], template=template) - prompt_query = prompt.format(topic=topic) - # Run LLM model - response = llm(prompt_query) - # Print results - return st.info(response) - - -with st.form("myform"): - topic_text = st.text_input("Enter prompt:", "") - submitted = st.form_submit_button("Submit") - if not openai_api_key: - st.info("Please add your OpenAI API key to continue.") - elif submitted: - blog_outline(topic_text) diff --git a/pages/5_Chat_with_user_feedback.py b/pages/5_Chat_with_user_feedback.py deleted file mode 100644 index 5f58f139c..000000000 --- a/pages/5_Chat_with_user_feedback.py +++ /dev/null @@ -1,65 +0,0 @@ -from openai import OpenAI -import streamlit as st -from streamlit_feedback import streamlit_feedback -import trubrics - -with st.sidebar: - openai_api_key = st.text_input("OpenAI API Key", key="feedback_api_key", type="password") - "[Get an OpenAI API key](https://platform.openai.com/account/api-keys)" - "[View the source code](https://github.com/streamlit/llm-examples/blob/main/pages/5_Chat_with_user_feedback.py)" - "[![Open in GitHub Codespaces](https://github.com/codespaces/badge.svg)](https://codespaces.new/streamlit/llm-examples?quickstart=1)" - -st.title("📝 Chat with feedback (Trubrics)") - -""" -In this example, we're using [streamlit-feedback](https://github.com/trubrics/streamlit-feedback) and Trubrics to collect and store feedback -from the user about the LLM responses. -""" - -if "messages" not in st.session_state: - st.session_state.messages = [ - {"role": "assistant", "content": "How can I help you? Leave feedback to help me improve!"} - ] -if "response" not in st.session_state: - st.session_state["response"] = None - -messages = st.session_state.messages -for msg in messages: - st.chat_message(msg["role"]).write(msg["content"]) - -if prompt := st.chat_input(placeholder="Tell me a joke about sharks"): - messages.append({"role": "user", "content": prompt}) - st.chat_message("user").write(prompt) - - if not openai_api_key: - st.info("Please add your OpenAI API key to continue.") - st.stop() - client = OpenAI(api_key=openai_api_key) - response = client.chat.completions.create(model="gpt-3.5-turbo", messages=messages) - st.session_state["response"] = response.choices[0].message.content - with st.chat_message("assistant"): - messages.append({"role": "assistant", "content": st.session_state["response"]}) - st.write(st.session_state["response"]) - -if st.session_state["response"]: - feedback = streamlit_feedback( - feedback_type="thumbs", - optional_text_label="[Optional] Please provide an explanation", - key=f"feedback_{len(messages)}", - ) - # This app is logging feedback to Trubrics backend, but you can send it anywhere. - # The return value of streamlit_feedback() is just a dict. - # Configure your own account at https://trubrics.streamlit.app/ - if feedback and "TRUBRICS_EMAIL" in st.secrets: - config = trubrics.init( - email=st.secrets.TRUBRICS_EMAIL, - password=st.secrets.TRUBRICS_PASSWORD, - ) - collection = trubrics.collect( - component_name="default", - model="gpt", - response=feedback, - metadata={"chat": messages}, - ) - trubrics.save(config, collection) - st.toast("Feedback recorded!", icon="📝") diff --git a/test.py b/test.py new file mode 100644 index 000000000..1f1d12caa --- /dev/null +++ b/test.py @@ -0,0 +1,20 @@ +import os + +from openai import OpenAI + +API_KEY = os.getenv("API_KEY") +BASE_URL = os.getenv("API_BASE_URL", "https://api.openai.com/v1") +LLM_MODEL_NAME = os.getenv("LLM_MODEL_NAME", "gpt-3.5-turbo") + + +prompt = f"Here's an article: what is this" + +client =OpenAI(api_key=API_KEY, base_url=BASE_URL) +chat = client.chat.completions.create( + model=LLM_MODEL_NAME, + messages=[{"role": "user", "content": prompt}], + temperature=0.7, + stream=False, # Make sure streaming is disabled +) +chat.choices[0].message.content +print(chat.choices[0].message.content)