-
Notifications
You must be signed in to change notification settings - Fork 0
/
app.py
57 lines (33 loc) · 1.5 KB
/
app.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
# import streamlit for the app
import streamlit as st
# Import langchain stuff
from langchain import PromptTemplate, LLMChain
from langchain.llms import GPT4All
from langchain.chains import SimpleSequentialChain
from langchain.memory import ConversationBufferMemory
PATH = 'GPT4ALL/mistral-7b-instruct-v0.1.Q4_0.gguf'
PATH2= 'LLM-Models/codellama-7b-instruct.Q3_K_S.gguf'
#stremlit Environment
st.title('🐍 Python Co-Pilot 🤖')
prompt = st.text_input('what code do you need')
col1, col2 = st.columns([5,5])
with col1:
model_name=st.selectbox('Model to be used',('Mistral Instruct','Code llama2'),index=0)
code_templete = PromptTemplate(input_variables=['question'],
template='write a python program to {question}')
memory= ConversationBufferMemory(input_key='question', memory_key='chat_history')
#LLM initialization
llm = GPT4All(model=PATH, verbose=True)
code_chain=LLMChain(prompt=code_templete, llm=llm, verbose=True, memory=memory)
llama = GPT4All(model=PATH2, verbose=True)
code2_chain=LLMChain(prompt=code_templete, llm=llama, verbose=True, memory=memory)
if prompt and model_name=='Mistral Instruct':
response=code_chain.run(question = prompt)
st.write(response)
with st.expander('Message History'):
st.info(memory.buffer)
if prompt and model_name=='Code llama2':
response=code2_chain.run(question = prompt)
st.write(response)
with st.expander('Message History'):
st.info(memory.buffer)