-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathmemory_for_chatbot.py
83 lines (68 loc) · 3.74 KB
/
memory_for_chatbot.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
import os
import openai
from dotenv import load_dotenv, find_dotenv
_ = load_dotenv(find_dotenv()) # read local .env file
openai.api_key = os.environ['OPENAI_API_KEY']
from langchain.chat_models import ChatOpenAI
from langchain.chains import ConversationChain
from langchain.memory import ConversationBufferMemory,ConversationBufferWindowMemory
llm = ChatOpenAI(temperature=0.0)
memory = ConversationBufferMemory()
conversation = ConversationChain(
llm=llm,
memory = memory,
verbose=True # here If I want the Abstraction of the Implementation than I can simply use the verbose = True.
# verbose=False
)
# print(conversation.predict(input="Hi, my name is Andrew"))
# print(memory.buffer) # this will show that How the previous conversation is stored in the buffer of a memory
# print(memory.load_memory_variables({})) # this gives the Dictionary of the History of the Conversatioin
################## memory = ConversationBufferMemory() ####### It is quite costly because it store the whole conversation from the starting as gives the lLM as a Input by which tokenisation get increases and it will increase the cost of using the token.So ther eis other way of doinng this
# @@@@@@@ memory = ConversationBufferWindowMemory() @@@@@@@@@@@@ # it stores on the basis of the window size that is given as a parameter of the value of K=1,3,5,6.....
llm = ChatOpenAI(temperature=0.0)
memory = ConversationBufferWindowMemory(k=3)
conversation = ConversationChain(
llm=llm,
memory = memory,
verbose=True # here If I want the Abstraction of the Implementation than I can simply use the verbose = True.
# verbose=False
)
# print(conversation.predict(input="Hi, my name is Andrew"))
# print(conversation.predict(input="I want you to help me in building by Study Timetable"))
# print(conversation.predict(input="yeah , I have Subjects = ['Maths','ENglish','SScience']"))
# print(conversation.predict(input="I want to Study Maths for about 3 hours and English for about 1 hr and SScience for about 1 hr"))
# print(conversation.predict(input = "I want to study aboyt 5 days in a week"))
from langchain.memory import ConversationTokenBufferMemory # here this is more cost effective because it will limit to the SEndig token by the User.
from langchain.llms import OpenAI
llm = ChatOpenAI(temperature=0.0)
memory = ConversationTokenBufferMemory(llm=llm, max_token_limit=30)
memory.save_context({"input": "AI is what?!"},
{"output": "Amazing!"})
memory.save_context({"input": "Backpropagation is what?"},
{"output": "Beautiful!"})
memory.save_context({"input": "Chatbots are what?"},
{"output": "Charming!"})
print(memory.load_memory_variables({}))
from langchain.memory import ConversationSummaryBufferMemory
# create a long string
schedule = "There is a meeting at 8am with your product team. \
You will need your powerpoint presentation prepared. \
9am-12pm have time to work on your LangChain \
project which will go quickly because Langchain is such a powerful tool. \
At Noon, lunch at the italian resturant with a customer who is driving \
from over an hour away to meet you to understand the latest in AI. \
Be sure to bring your laptop to show the latest LLM demo."
memory = ConversationSummaryBufferMemory(llm=llm, max_token_limit=100)
memory.save_context({"input": "Hello"}, {"output": "What's up"})
memory.save_context({"input": "Not much, just hanging"},
{"output": "Cool"})
memory.save_context({"input": "What is on the schedule today?"},
{"output": f"{schedule}"})
memory.load_memory_variables({})
conversation = ConversationChain(
llm=llm,
memory = memory,
verbose=True
)
conversation.predict(input="What would be a good demo to show?")
memory.load_memory_variables({})