Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Support using local(open-source) LLMs instead than OpenAI API as backend #45

Closed
wants to merge 2 commits into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 3 additions & 0 deletions .idea/.gitignore

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

12 changes: 12 additions & 0 deletions .idea/DemoGPT.iml

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

21 changes: 21 additions & 0 deletions .idea/inspectionProfiles/Project_Default.xml

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

6 changes: 6 additions & 0 deletions .idea/inspectionProfiles/profiles_settings.xml

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

4 changes: 4 additions & 0 deletions .idea/misc.xml

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

8 changes: 8 additions & 0 deletions .idea/modules.xml

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

6 changes: 6 additions & 0 deletions .idea/vcs.xml

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

10 changes: 10 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -202,6 +202,16 @@ If you have cloned the repository and wish to run the source code version, you c
streamlit run demogpt/app.py
```

### Use Local LLM

First, modify the configuration in model_config.py to ensure that the desired model is correctly configured for use.
To start the LLM server by running the following command:

```sh
cd server
python llm_api.py
```

## To-Do 📝
- [x] Implement new DemoGPT pipeline including plan generation, task creation, code snippet generation, and final code assembly.
- [x] Add feature to allow users to select models.
Expand Down
60 changes: 34 additions & 26 deletions demogpt/app.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,7 @@

from model import DemoGPT
from utils import runStreamlit
from model_config import llm_model_dict

# logging.basicConfig(level = logging.DEBUG,format='%(levelname)s-%(message)s')

Expand Down Expand Up @@ -52,32 +53,38 @@ def initCode():

initCode()

# Text input

openai_api_key = st.sidebar.text_input(
"OpenAI API Key",
placeholder="sk-...",
value=os.getenv("OPENAI_API_KEY", ""),
type="password",
)

openai_api_base = st.sidebar.text_input(
"Open AI base URL",
placeholder="https://api.openai.com/v1",
llm = st.sidebar.selectbox(
"LLM models",
llm_model_dict.keys(),
)

models = (
"gpt-3.5-turbo-0613",
"gpt-3.5-turbo-0301",
"gpt-3.5-turbo",
"gpt-3.5-turbo-16k",
"gpt-3.5-turbo-16k-0613",
"gpt-4",
"gpt-4-0314",
"gpt-4-0613",
)
# Text input

model_name = st.sidebar.selectbox("Model", models)
# openai_api_key = st.sidebar.text_input(
# "OpenAI API Key",
# placeholder="sk-...",
# value=os.getenv("OPENAI_API_KEY", ""),
# type="password",
# )
#
# openai_api_base = st.sidebar.text_input(
# "Open AI base URL",
# placeholder="https://api.openai.com/v1",
# )

# models = (
# "gpt-3.5-turbo-0613",
# "gpt-3.5-turbo-0301",
# "gpt-3.5-turbo",
# "gpt-3.5-turbo-16k",
# "gpt-3.5-turbo-16k-0613",
# "gpt-4",
# "gpt-4-0314",
# "gpt-4-0613",
# )
#
# model_name = st.sidebar.selectbox("Model", models)

empty_idea = st.empty()
demo_idea = empty_idea.text_area(
Expand Down Expand Up @@ -119,13 +126,14 @@ def kill():

if submitted:
st.session_state.messages = []
if not openai_api_key:
st.warning("Please enter your OpenAI API Key!", icon="⚠️")
if not llm:
st.warning("Please choose llm model!", icon="⚠️")
else:
bar = progressBar(0)
st.session_state.container = st.container()
agent = DemoGPT(openai_api_key=openai_api_key, openai_api_base=openai_api_base)
agent.setModel(model_name)
agent = DemoGPT(llm_config=llm_model_dict[llm])
# agent = DemoGPT(openai_api_key=openai_api_key, openai_api_base=openai_api_base)
# agent.setModel(model_name)
kill()
code_empty = st.empty()
st.session_state.container = st.container()
Expand Down
13 changes: 4 additions & 9 deletions demogpt/model.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,18 +12,13 @@
class DemoGPT:
def __init__(
self,
openai_api_key=os.getenv("OPENAI_API_KEY", ""),
model_name="gpt-3.5-turbo-0613",
llm_config,
max_steps=10,
openai_api_base="",
):
assert len(
openai_api_key.strip()
), "Either give openai_api_key as an argument or put it in the environment variable"
self.model_name = model_name
self.openai_api_key = openai_api_key
self.model_name = llm_config["model_name"]
self.openai_api_base = llm_config["api_base_url"]
self.openai_api_key = llm_config["api_key"]
self.max_steps = max_steps # max iteration for refining the model purpose
self.openai_api_base = openai_api_base
Chains.setLlm(
self.model_name, self.openai_api_key, openai_api_base=self.openai_api_base
)
Expand Down
62 changes: 62 additions & 0 deletions demogpt/model_config.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,62 @@
import os
import logging
import torch
# 日志格式
LOG_FORMAT = "%(asctime)s - %(filename)s[line:%(lineno)d] - %(levelname)s: %(message)s"
logger = logging.getLogger()
logger.setLevel(logging.INFO)
logging.basicConfig(format=LOG_FORMAT)

llm_model_dict = {
"chatglm-6b": {
"model_name": "chatglm-6b",
"local_model_path": "/opt/ChatGLM-6B/chatglm-6b/",
"api_base_url": "http://localhost:8888/v1", # "name"修改为fastchat服务中的"api_base_url"
"api_key": "EMPTY"
},

"chatglm2-6b": {
"model_name": "chatglm2-6b",
"local_model_path": "/opt/ChatGLM2-6B/chatglm2-6b/",
"api_base_url": "http://localhost:8888/v1", # "name"修改为fastchat服务中的"api_base_url"
"api_key": "EMPTY"
},


"vicuna-13b-hf": {
"local_model_path": "vicuna-13b-hf",
"api_base_url": "http://localhost:8888/v1", # "name"修改为fastchat服务中的"api_base_url"
"api_key": "EMPTY"
},

"gpt-3.5-turbo": {
"local_model_path": "gpt-3.5-turbo",
"api_base_url": "https://api.openai.com/v1",
"api_key": os.environ.get("OPENAI_API_KEY")
},

"baichuan-7b": {
"model_name": "baichuan-7b",
"local_model_path": "/opt/baichuan-7B",
"api_base_url": "http://localhost:8888/v1", # "name"修改为fastchat服务中的"api_base_url"
"api_key": "EMPTY"
},
"Baichuan-13b-Chat": {
"model_name": "baichuan-13b",
"local_model_path": "baichuan-inc/Baichuan-13b-Chat",
"api_base_url": "http://localhost:8888/v1", # "name"修改为fastchat服务中的"api_base_url"
"api_key": "EMPTY"
},

}

# LLM 名称
LLM_MODEL = "chatglm-6b"

# LLM 运行设备
LLM_DEVICE = "cuda" if torch.cuda.is_available() else "mps" if torch.backends.mps.is_available() else "cpu"

# 日志存储路径
LOG_PATH = os.path.join(os.path.dirname(os.path.dirname(__file__)), "logs")
if not os.path.exists(LOG_PATH):
os.mkdir(LOG_PATH)
Empty file added demogpt/server/__init__.py
Empty file.
Loading