-
Notifications
You must be signed in to change notification settings - Fork 1
/
main.py
73 lines (57 loc) · 2.63 KB
/
main.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
import streamlit as st
from langchain.prompts import PromptTemplate
from langchain.chains.question_answering import load_qa_chain
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.vectorstores import Chroma
from langchain_google_genai import GoogleGenerativeAIEmbeddings, ChatGoogleGenerativeAI
from dotenv import load_dotenv
import PyPDF2
import os
import io
st.title("Chat Your PDFs") # Updated title
# Load environment variables from .env file
load_dotenv()
# File Upload with user-defined name
uploaded_file = st.file_uploader("Upload a PDF file", type=["pdf"])
if uploaded_file is not None:
st.text("PDF File Uploaded Successfully!")
# PDF Processing (using PyPDF2 directly)
pdf_data = uploaded_file.read()
pdf_reader = PyPDF2.PdfReader(io.BytesIO(pdf_data))
pdf_pages = pdf_reader.pages
# Create Context
context = "\n\n".join(page.extract_text() for page in pdf_pages)
# Split Texts
text_splitter = RecursiveCharacterTextSplitter(chunk_size=10000, chunk_overlap=200)
texts = text_splitter.split_text(context)
# Chroma Embeddings
embeddings = GoogleGenerativeAIEmbeddings(model="models/embedding-001", google_api_key="AIzaSyDR7C1S_y_DPKcvBJKfNUMXCD-2wZ7cWys")
vector_index = Chroma.from_texts(texts, embeddings).as_retriever()
# Get User Question
user_question = st.text_input("Ask a Question:")
if st.button("Get Answer"):
if user_question:
# Get Relevant Documents
docs = vector_index.get_relevant_documents(user_question)
# Define Prompt Template
prompt_template = """
Answer the question as detailed as possible from the provided context,
make sure to provide all the details, if the answer is not in
provided context just say, "answer is not available in the context",
don't provide the wrong answer\n\n
Context:\n {context}?\n
Question: \n{question}\n
Answer:
"""
# Create Prompt
prompt = PromptTemplate(template=prompt_template, input_variables=['context', 'question'])
# Load QA Chain
model = ChatGoogleGenerativeAI(model="gemini-pro", temperature=0.3, google_api_key="AIzaSyDR7C1S_y_DPKcvBJKfNUMXCD-2wZ7cWys")
chain = load_qa_chain(model, chain_type="stuff", prompt=prompt)
# Get Response
response = chain({"input_documents": docs, "question": user_question})
# Display Answer
st.subheader("Answer:")
st.write(response['output_text'])
else:
st.warning("Please enter a question.")