-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathQ_A_with_document.py
58 lines (43 loc) · 1.68 KB
/
Q_A_with_document.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
import os
import openai
from dotenv import load_dotenv, find_dotenv
_ = load_dotenv(find_dotenv()) # read local .env file
openai.api_key = os.environ["OPENAI_API_KEY"]
# account for deprecation of LLM model
import datetime
# Get the current date
current_date = datetime.datetime.now().date()
# Define the date after which the model should be set to "gpt-3.5-turbo"
target_date = datetime.date(2024, 6, 12)
# Set the model variable based on the current date
if current_date > target_date:
llm_model = "gpt-3.5-turbo"
else:
llm_model = "gpt-3.5-turbo-0301"
from langchain.chains import RetrievalQA
from langchain.chat_models import ChatOpenAI
from langchain.document_loaders import CSVLoader
from langchain.vectorstores import DocArrayInMemorySearch
from IPython.display import display, Markdown
from langchain.document_loaders import Docx2txtLoader
loader = Docx2txtLoader("jerryGYM.docx")
# file = "outdoorclothing_catalog.csv"
# loader = CSVLoader(file_path=file)
from langchain.indexes import VectorstoreIndexCreator
# index = VectorstoreIndexCreator(vectorstore_cls=DocArrayInMemorySearch).from_loaders(
# [loader]
# )
# index = VectorstoreIndexCreator().from_loaders([loader])
query = " What is the Membership Model of YOur GYM"
response = index.query(query)
print(response)
# print(type(response))
# mark_string = Markdown(response)
# print(mark_string)
# # display(Markdown(response))
# docs = loader.load()
# print(docs[0])
# from langchain.embeddings import OpenAIEmbeddings
# embeddings = OpenAIEmbeddings()#
# above one is the Implementation by Abstraction that whatever the question you can ask with YOur document.
# Now From here is the step by step Implementation of the above code.