Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

feat: honcho fact memory example #21

Merged
merged 1 commit into from
Feb 15, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 2 additions & 0 deletions example/discord/honcho-fact-memory/.env.template
Original file line number Diff line number Diff line change
@@ -0,0 +1,2 @@
BOT_TOKEN=
OPENAI_API_KEY=
5 changes: 5 additions & 0 deletions example/discord/honcho-fact-memory/.gitignore
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
.env

.venv

.DS_Store
85 changes: 85 additions & 0 deletions example/discord/honcho-fact-memory/bot.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,85 @@
import os
# from uuid import uuid4
import discord
from honcho import Client as HonchoClient
from chain import langchain_message_converter, LMChain


intents = discord.Intents.default()
intents.messages = True
intents.message_content = True
intents.members = True

app_id = str("demo-honcho-fact-memory")

#honcho = HonchoClient(app_id=app_id, base_url="http://localhost:8000") # uncomment to use local
honcho = HonchoClient(app_id=app_id) # uses demo server at https://demo.honcho.dev

bot = discord.Bot(intents=intents)


@bot.event
async def on_ready():
print(f'We have logged in as {bot.user}')

@bot.event
async def on_member_join(member):
await member.send(
f"*Hello {member.name}, welcome to the server! This is a demo bot built with Honcho,* "
"*implementing a naive version of the memory feature similar to what ChatGPT recently released.* "
"*To get started, just type a message in this channel and the bot will respond.* "
"*Over time, it will remember facts about you and use them to make the conversation more personal.* "
"*You can use the /restart command to restart the conversation at any time.* "
"*If you have any questions or feedback, feel free to ask in the #honcho channel.* "
"*Enjoy!*"
)


@bot.event
async def on_message(message):
if message.author == bot.user or message.guild is not None:
return

user_id = f"discord_{str(message.author.id)}"
location_id=str(message.channel.id)

sessions = list(honcho.get_sessions_generator(user_id, location_id))
try:
collection = honcho.get_collection(user_id=user_id, name="discord")
except Exception:
collection = honcho.create_collection(user_id=user_id, name="discord")

if len(sessions) > 0:
session = sessions[0]
else:
session = honcho.create_session(user_id, location_id)

history = list(session.get_messages_generator())
chat_history = langchain_message_converter(history)

inp = message.content
user_message = session.create_message(is_user=True, content=inp)

async with message.channel.typing():
response = await LMChain.chat(
chat_history=chat_history,
user_message=user_message,
session=session,
collection=collection,
input=inp
)
await message.channel.send(response)

session.create_message(is_user=False, content=response)

@bot.slash_command(name = "restart", description = "Restart the Conversation")
async def restart(ctx):
user_id=f"discord_{str(ctx.author.id)}"
location_id=str(ctx.channel_id)
sessions = list(honcho.get_sessions_generator(user_id, location_id))
sessions[0].close() if len(sessions) > 0 else None

msg = "Great! The conversation has been restarted. What would you like to talk about?"
await ctx.respond(msg)

bot.run(os.environ["BOT_TOKEN"])
175 changes: 175 additions & 0 deletions example/discord/honcho-fact-memory/chain.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,175 @@
import os
from typing import List
from dotenv import load_dotenv
from langchain_openai import ChatOpenAI
from langchain_core.prompts import ChatPromptTemplate, SystemMessagePromptTemplate, load_prompt
from langchain_core.output_parsers import NumberedListOutputParser
from langchain_core.messages import AIMessage, HumanMessage

from honcho import Collection, Session, Message

load_dotenv()

SYSTEM_DERIVE_FACTS = load_prompt(os.path.join(os.path.dirname(__file__), 'prompts/core/derive_facts.yaml'))
SYSTEM_INTROSPECTION = load_prompt(os.path.join(os.path.dirname(__file__), 'prompts/core/introspection.yaml'))
SYSTEM_RESPONSE = load_prompt(os.path.join(os.path.dirname(__file__), 'prompts/core/response.yaml'))
SYSTEM_CHECK_DUPS = load_prompt(os.path.join(os.path.dirname(__file__), 'prompts/utils/check_dup_facts.yaml'))


def langchain_message_converter(messages: List):
new_messages = []
for message in messages:
if message.is_user:
new_messages.append(HumanMessage(content=message.content))
else:
new_messages.append(AIMessage(content=message.content))
return new_messages


class LMChain:
"Wrapper class for encapsulating the multiple different chains used"
output_parser = NumberedListOutputParser()
llm: ChatOpenAI = ChatOpenAI(model_name = "gpt-3.5-turbo")
system_derive_facts: SystemMessagePromptTemplate = SystemMessagePromptTemplate(prompt=SYSTEM_DERIVE_FACTS)
system_introspection: SystemMessagePromptTemplate = SystemMessagePromptTemplate(prompt=SYSTEM_INTROSPECTION)
system_response: SystemMessagePromptTemplate = SystemMessagePromptTemplate(prompt=SYSTEM_RESPONSE)
system_check_dups: SystemMessagePromptTemplate = SystemMessagePromptTemplate(prompt=SYSTEM_CHECK_DUPS)

def __init__(self) -> None:
pass

@classmethod
async def derive_facts(cls, chat_history: List, input: str):
"""Derive facts from the user input"""

# format prompt
fact_derivation = ChatPromptTemplate.from_messages([
cls.system_derive_facts
])

# LCEL
chain = fact_derivation | cls.llm

# inference
response = await chain.ainvoke({
"chat_history": [("user: " + message.content if isinstance(message, HumanMessage) else "ai: " + message.content) for message in chat_history],
"user_input": input
})

# parse output
facts = cls.output_parser.parse(response.content)

print(f"DERIVED FACTS: {facts}")

return facts

@classmethod
async def check_dups(cls, user_message: Message, session: Session, collection: Collection, facts: List):
"""Check that we're not storing duplicate facts"""

# format prompt
check_duplication = ChatPromptTemplate.from_messages([
cls.system_check_dups
])

query = " ".join(facts)
result = collection.query(query=query, top_k=10)
existing_facts = [document.content for document in result]

# LCEL
chain = check_duplication | cls.llm

# inference
response = await chain.ainvoke({
"existing_facts": existing_facts,
"facts": facts
})

# parse output
new_facts = cls.output_parser.parse(response.content)

print(f"FILTERED FACTS: {new_facts}")

# TODO: write to vector store
for fact in new_facts:
collection.create_document(content=fact)

# add facts as metamessages
for fact in new_facts:
session.create_metamessage(message=user_message, metamessage_type="fact", content=fact)

return


@classmethod
async def introspect(cls, user_message: Message, session: Session, chat_history: List, input:str):
"""Generate questions about the user to use as retrieval over the fact store"""

# format prompt
introspection_prompt = ChatPromptTemplate.from_messages([
cls.system_introspection
])

# LCEL
chain = introspection_prompt | cls.llm

# inference
response = await chain.ainvoke({
"chat_history": chat_history,
"user_input": input
})

# parse output
questions = cls.output_parser.parse(response.content)

print(f"INTROSPECTED QUESTIONS: {questions}")

# write questions as metamessages
for question in questions:
session.create_metamessage(message=user_message, metamessage_type="introspect", content=question)

return questions


@classmethod
async def respond(cls, collection: Collection, chat_history: List, questions: List, input: str):
"""Take the facts and chat history and generate a personalized response"""

# format prompt
response_prompt = ChatPromptTemplate.from_messages([
cls.system_response,
*chat_history,
HumanMessage(content=input)
])

retrieved_facts = collection.query(query=questions, top_k=10)
retrieved_facts_content = [document.content for document in retrieved_facts]

# LCEL
chain = response_prompt | cls.llm

# inference
response = await chain.ainvoke({
"facts": retrieved_facts_content,
})

return response.content

@classmethod
async def chat(cls, chat_history: List, user_message: Message, session: Session, collection: Collection, input: str):
"""Chat with the model"""

facts = await cls.derive_facts(chat_history, input)
await cls.check_dups(user_message, session, collection, facts) if facts is not None else None

# introspect
questions = await cls.introspect(user_message, session, chat_history, input)

# respond
response = await cls.respond(collection, chat_history, questions, input)

return response




Loading