Skip to content

add llm gateway support #2

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Open
wants to merge 9 commits into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 4 additions & 0 deletions .env
Original file line number Diff line number Diff line change
@@ -0,0 +1,4 @@
API_KEY=eyJhbGciOiJSUzI1NiIsInR5cCI6IkpXVCIsImtpZCI6ImxzV0lDNWtkU1V1bXg1ckg5NkR6bFdYUGxJTSJ9.eyJhdWQiOiI4OTUyNTNhZi1lYzlkLTRiZTYtODNkMS02ZjI0OGU2NDRlNzkiLCJleHAiOjM3MDQyNTcwOTQsImlhdCI6MTc0NDcwNTA5NCwiaXNzIjoidHJ1ZWZvdW5kcnkuY29tIiwic3ViIjoiY205aThkNmxpMDc0NzAxbjYxZXF2OXV2dSIsImp0aSI6IjA2Mzg1NGEwLWIyMGEtNDA3MC1hODU1LThlODUyYzM1NzdkMCIsInN1YmplY3RTbHVnIjoidGVzdDE1MDQyNSIsInVzZXJuYW1lIjoidGVzdDE1MDQyNSIsInVzZXJUeXBlIjoic2VydmljZWFjY291bnQiLCJzdWJqZWN0VHlwZSI6InNlcnZpY2VhY2NvdW50IiwidGVuYW50TmFtZSI6InRydWVmb3VuZHJ5Iiwicm9sZXMiOlsidGVuYW50LWFkbWluIl0sImFwcGxpY2F0aW9uSWQiOiI4OTUyNTNhZi1lYzlkLTRiZTYtODNkMS02ZjI0OGU2NDRlNzkifQ.j_sucrB3HCfSCqyLESukXRfMjuIyz7JXXWCiiAxb5G6HlvAcPu1UoVRsmjVk2SLSPFlmy2olXJEkJvINLSAL2HeYlJ6PDlHRrW9fyXUXNXLxq9M4nBGLzU5fMh_qZa0Y_RqDdlXbatVbrqaaIinJHrmeuUphWNnbkZX3VlGWNGygh1V6WmLcx96E-aQ4OwGtj39cgL8geBcm6pJeUTxygf_saYL4o7olGJXHbVGbZSrPGTnE7Uoi1bGtLj_DRq_42CW53qL4I2R3aSzcXVtWOZyIfsvph3STWzH39VFA-Gei5qB2FMDU4m0BmtnhHarmJpcr-_GVs7fjd2R9J1rpIw
SSL_VERIFY='false'
API_BASE_URL=https://internal.devtest.truefoundry.tech/api/llm/api/inference/openai
LLM_MODEL_NAME=openai-main/gpt-4o-mini
32 changes: 21 additions & 11 deletions Chatbot.py
Original file line number Diff line number Diff line change
@@ -1,29 +1,39 @@
from openai import OpenAI
import os

import httpx
import streamlit as st
from openai import OpenAI

SSL_VERIFY = os.getenv("SSL_VERIFY", "true")

API_KEY = os.getenv("API_KEY")
BASE_URL = os.getenv("API_BASE_URL", "https://api.openai.com/v1")
LLM_MODEL_NAME = os.getenv("LLM_MODEL_NAME", "gpt-3.5-turbo")
with st.sidebar:
openai_api_key = st.text_input("OpenAI API Key", key="chatbot_api_key", type="password")
"[Get an OpenAI API key](https://platform.openai.com/account/api-keys)"
"[View the source code](https://github.com/streamlit/llm-examples/blob/main/Chatbot.py)"
"[![Open in GitHub Codespaces](https://github.com/codespaces/badge.svg)](https://codespaces.new/streamlit/llm-examples?quickstart=1)"
"[View the source code](https://github.com/truefoundry/rag-streamlit/blob/main/Chatbot.py)"

st.title("💬 Chatbot")
st.caption("🚀 A Streamlit chatbot powered by OpenAI")
st.caption("🚀 A Streamlit chatbot powered by LLM Gateway")
if "messages" not in st.session_state:
st.session_state["messages"] = [{"role": "assistant", "content": "How can I help you?"}]

for msg in st.session_state.messages:
st.chat_message(msg["role"]).write(msg["content"])

if prompt := st.chat_input():
if not openai_api_key:
st.info("Please add your OpenAI API key to continue.")
st.stop()

client = OpenAI(api_key=openai_api_key)
client = OpenAI(
api_key=API_KEY,
base_url=BASE_URL,
http_client=httpx.Client(verify=SSL_VERIFY == "true"),
)
st.session_state.messages.append({"role": "user", "content": prompt})
st.chat_message("user").write(prompt)
response = client.chat.completions.create(model="gpt-3.5-turbo", messages=st.session_state.messages)
response = client.chat.completions.create(
model=LLM_MODEL_NAME,
messages=st.session_state.messages,
extra_headers={"X-TFY-METADATA": '{"tfy_log_request":"true"}'},
)
msg = response.choices[0].message.content
st.session_state.messages.append({"role": "assistant", "content": msg})
st.chat_message("assistant").write(msg)
47 changes: 28 additions & 19 deletions pages/1_File_Q&A.py
Original file line number Diff line number Diff line change
@@ -1,33 +1,42 @@
import os

import httpx
import streamlit as st
import anthropic
from openai import OpenAI

API_KEY = os.getenv("API_KEY")
BASE_URL = os.getenv("API_BASE_URL", "https://api.openai.com/v1")
LLM_MODEL_NAME = os.getenv("LLM_MODEL_NAME", "gpt-3.5-turbo")
SSL_VERIFY = os.getenv("SSL_VERIFY", "true")

with st.sidebar:
anthropic_api_key = st.text_input("Anthropic API Key", key="file_qa_api_key", type="password")
"[View the source code](https://github.com/streamlit/llm-examples/blob/main/pages/1_File_Q%26A.py)"
"[![Open in GitHub Codespaces](https://github.com/codespaces/badge.svg)](https://codespaces.new/streamlit/llm-examples?quickstart=1)"
"[View the source code](https://github.com/truefoundry/rag-streamlit/blob/main/pages/1_File_Q%26A.py)"

st.title("📝 File Q&A with Anthropic")
st.title("📝 File Q&A with LLM Gateway")
uploaded_file = st.file_uploader("Upload an article", type=("txt", "md"))
question = st.text_input(
"Ask something about the article",
placeholder="Can you give me a short summary?",
disabled=not uploaded_file,
)

if uploaded_file and question and not anthropic_api_key:
st.info("Please add your Anthropic API key to continue.")

if uploaded_file and question and anthropic_api_key:
if uploaded_file and question:
article = uploaded_file.read().decode()
prompt = f"""{anthropic.HUMAN_PROMPT} Here's an article:\n\n<article>
{article}\n\n</article>\n\n{question}{anthropic.AI_PROMPT}"""
prompt = f"Here's an article:\n\n<article>\n{article}\n\n</article>\n\n{question}"

client = OpenAI(
api_key=API_KEY,
base_url=BASE_URL,
http_client=httpx.Client(verify=SSL_VERIFY == "true"),
)

client = anthropic.Client(api_key=anthropic_api_key)
response = client.completions.create(
prompt=prompt,
stop_sequences=[anthropic.HUMAN_PROMPT],
model="claude-v1", # "claude-2" for Claude 2 model
max_tokens_to_sample=100,
response = client.chat.completions.create(
model=LLM_MODEL_NAME,
messages=[{"role": "user", "content": prompt}],
temperature=0.7,
stream=False, # Make sure streaming is disabled
extra_headers={
"X-TFY-METADATA": '{"tfy_log_request":"true"}',
},
)
st.write("### Answer")
st.write(response.completion)
st.write(response.choices[0].message.content)
48 changes: 0 additions & 48 deletions pages/2_Chat_with_search.py

This file was deleted.

22 changes: 0 additions & 22 deletions pages/3_Langchain_Quickstart.py

This file was deleted.

29 changes: 0 additions & 29 deletions pages/4_Langchain_PromptTemplate.py

This file was deleted.

65 changes: 0 additions & 65 deletions pages/5_Chat_with_user_feedback.py

This file was deleted.

20 changes: 20 additions & 0 deletions test.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,20 @@
import os

from openai import OpenAI

API_KEY = os.getenv("API_KEY")
BASE_URL = os.getenv("API_BASE_URL", "https://api.openai.com/v1")
LLM_MODEL_NAME = os.getenv("LLM_MODEL_NAME", "gpt-3.5-turbo")


prompt = f"Here's an article: what is this"

client =OpenAI(api_key=API_KEY, base_url=BASE_URL)
chat = client.chat.completions.create(
model=LLM_MODEL_NAME,
messages=[{"role": "user", "content": prompt}],
temperature=0.7,
stream=False, # Make sure streaming is disabled
)
chat.choices[0].message.content
print(chat.choices[0].message.content)