Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Shorter tracebacks (bad solution maybe?) #295

Closed
wants to merge 5 commits into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 3 additions & 2 deletions modules/AlignmentNewsletterSearch.py
Original file line number Diff line number Diff line change
Expand Up @@ -147,16 +147,17 @@ def process_message(self, message: ServiceMessage) -> Response:

query = match.group("query")
return Response(
confidence=9, callback=self.process_search_request, args=[query]
confidence=9, callback=self.process_search_request, kwargs={"prompt": query}
)

async def process_search_request(self, query) -> Response:
async def process_search_request(self, prompt: str) -> Response:
"""Search for relevant items for the query.

First we load all items from the Alignment Newsletter database.
Then we sort the items by relevance to the query.
Finally we return the most relevant items, if any.
"""
query = prompt
Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I'll factor out these redundant lines once I know whether I'm on the right track at all.

self.log.info(self.class_name, newsletter_query=query)

items = self.load_items()
Expand Down
2 changes: 1 addition & 1 deletion modules/Eliza.py
Original file line number Diff line number Diff line change
Expand Up @@ -46,6 +46,6 @@ def process_message(self, message: ServiceMessage) -> Response:
return Response(
confidence=1,
text=result,
why=f"{message.author.name} said '{text}', and ELIZA responded '{result}'" ,
why=f"It seemed like a good idea at the time" ,
)
return Response()
4 changes: 2 additions & 2 deletions modules/Silly.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,8 +19,8 @@ def process_message(self, message):
atme = self.is_at_me(message)
text = atme or message.clean_content
who = message.author.name
print(atme)
print(text)
#print(atme) # DEBUG
#print(text) # DEBUG

if atme and utils.message_repeated(message, text):
self.log.info(
Expand Down
5 changes: 3 additions & 2 deletions modules/chatgpt.py
Original file line number Diff line number Diff line change
Expand Up @@ -67,7 +67,7 @@ def process_message(self, message: ServiceMessage) -> Response:
return Response()

return Response(
confidence=3, callback=self.chatgpt_chat, args=[message], kwargs={}
confidence=3, callback=self.chatgpt_chat, kwargs={ "prompt": message }
)

def process_message_from_stampy(self, message) -> None:
Expand Down Expand Up @@ -116,8 +116,9 @@ def generate_messages_list(self, channel) -> list[dict[str, str]]:

return messages

async def chatgpt_chat(self, message: ServiceMessage) -> Response:
async def chatgpt_chat(self, prompt: ServiceMessage) -> Response:
"""Ask ChatGPT what Stampy would say next in the chat log"""
message: ServiceMessage = prompt
if self.openai is None:
return Response()

Expand Down
10 changes: 5 additions & 5 deletions modules/duckduckgo.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,21 +23,21 @@ def process_message(self, message: ServiceMessage) -> Response:
return Response(
confidence=10,
callback=self.ask,
args=[text[m.end(0):]],
kwargs={ "prompt": text[m.end(0):] },
why="This is definitely a web search",
)
print(f"Text didn't match: {text}")
if text.endswith("?"):
return Response(
confidence=6,
callback=self.ask,
args=[text],
kwargs={ "prompt": text },
why="It's a question, we might be able to answer it",
)
return Response(
confidence=2,
callback=self.ask,
args=[text],
kwargs={ "prompt": text },
why="It's not a question but we might be able to look it up",
)
return Response()
Expand All @@ -57,12 +57,12 @@ def get_confidence(self, answer: str, max_confidence: float) -> float:
return 1
return max_confidence

def ask(self, question: str) -> Response:
def ask(self, prompt: str) -> Response:
"""Ask DuckDuckGo a question and return a response."""

# strip out question mark and common 'question phrases', e.g. 'who are',
# 'what is', 'tell me about'
q = question.lower().strip().strip("?")
q = prompt.lower().strip().strip("?")
q = re.sub(r"w(hat|ho)('s|'re| is| are| was| were) ?", "", q)
q = re.sub(r"(what do you know|(what )?(can you)? ?tell me) about", "", q)

Expand Down
5 changes: 3 additions & 2 deletions modules/gpt3module.py
Original file line number Diff line number Diff line change
Expand Up @@ -75,7 +75,7 @@ def process_message(self, message: ServiceMessage) -> Response:
return Response()

return Response(
confidence=2, callback=self.gpt3_chat, args=[message], kwargs={}
confidence=2, callback=self.gpt3_chat, kwargs={"prompt": message}
)

def process_message_from_stampy(self, message: ServiceMessage) -> None:
Expand Down Expand Up @@ -165,7 +165,8 @@ def get_engine(self, message: ServiceMessage) -> Optional[OpenAIEngines]:
if self.openai and self.openai.is_channel_allowed(message):
return self.openai.get_engine(message)

async def gpt3_chat(self, message: ServiceMessage) -> Response:
async def gpt3_chat(self, prompt: ServiceMessage) -> Response:
message: ServiceMessage = prompt
"""Ask GPT-3 what Stampy would say next in the chat log"""
self.openai = cast(OpenAI, self.openai)

Expand Down
14 changes: 13 additions & 1 deletion modules/module.py
Original file line number Diff line number Diff line change
Expand Up @@ -106,7 +106,7 @@ def __repr__(self) -> str:
text = self.text
callback = self.callback.__name__ if self.callback else None
args = self.args
kwargs = self.kwargs
kwargs = trim_kwargs(self.kwargs)
module = str(self.module)
why = self.why
return (
Expand Down Expand Up @@ -333,3 +333,15 @@ class IntegrationTest(TypedDict):
test_wait_time: float
minimum_allowed_similarity: float
result: Literal["PASSED", "FAILED", None]

def trim_kwargs(kwargs: dict) -> dict:
unwanted = frozenset("prompt")
# Prompt should already be known by the user: #294
for item in unwanted:
if item in kwargs:
if hasattr(kwargs[item], "id"):
kwargs[item] = "id={}".format(kwargs[item].id)
else:
kwargs[item] = "<{}>".format(item)

return kwargs
6 changes: 3 additions & 3 deletions modules/semanticanswers.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@ def process_message(self, message):
return Response(
confidence=6,
callback=self.ask,
args=[text],
kwargs={"prompt": text},
why="It's a question, there might be a similar question in the database",
)
else:
Expand All @@ -20,8 +20,8 @@ def process_message(self, message):
def __str__(self):
return "Semantic Answers"

def ask(self, question):
q = question.lower().strip()
def ask(self, prompt: str):
q = prompt.lower().strip()
url = (
"https://stampy-nlp-t6p37v2uia-uw.a.run.app/api/search?query=%s"
% urllib.parse.quote_plus(q)
Expand Down
10 changes: 6 additions & 4 deletions modules/why.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,14 +24,14 @@ def process_message(self, message: ServiceMessage) -> Response:
return Response(
confidence=10,
callback=self.specific,
args=[message],
kwargs={"prompt": message},
why="A stamp owner wants to know why I said something.",
)
else:
return Response(
confidence=10,
callback=self.general,
args=[message],
kwargs={"prompt": message},
why="A stamp owner wants to know why I said something.",
)
else:
Expand All @@ -54,7 +54,8 @@ async def _get_message_about(self, message: DiscordMessage) -> str:
return str(m.id)
raise Exception("No message from stampy found")

async def specific(self, message: DiscordMessage) -> Response:
async def specific(self, prompt: DiscordMessage) -> Response:
message: DiscordMessage = prompt
m_id = await self._get_message_about(message)
messages = self._get_known_messages()
if m_id not in messages:
Expand All @@ -68,7 +69,8 @@ async def specific(self, message: DiscordMessage) -> Response:
builder += f"{step}\n"
return Response(confidence=10, text=builder, why="I was asked why I said something.")

async def general(self, message: DiscordMessage) -> Response:
async def general(self, prompt: DiscordMessage) -> Response:
message: DiscordMessage = prompt
m_id = await self._get_message_about(message)
messages = self._get_known_messages()
if m_id not in messages:
Expand Down
7 changes: 4 additions & 3 deletions modules/wolfram.py
Original file line number Diff line number Diff line change
Expand Up @@ -35,14 +35,14 @@ def process_message(self, message):
return Response(
confidence=5,
callback=self.ask,
args=[text],
kwargs={"prompt": text},
why="It's a question, we might be able to answer it",
)
else:
return Response(
confidence=1,
callback=self.ask,
args=[text],
kwargs={"prompt": text},
why="It's not a question but we might be able to look it up",
)

Expand All @@ -59,7 +59,8 @@ def confidence_of_answer(self, answer: str) -> float:
else:
return 8

def ask(self, question):
def ask(self, prompt: str):
question: str = prompt
try:
self.log.info(self.class_name, wolfram_alpha_question=question)
question_escaped = urllib.parse.quote_plus(question.strip())
Expand Down
7 changes: 5 additions & 2 deletions servicemodules/discord.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@
youtube_api_key,
bot_private_channel_id,
)
from modules.module import Response
from modules.module import Response, trim_kwargs
from servicemodules import discordConstants
from utilities import (
Utilities,
Expand Down Expand Up @@ -169,6 +169,8 @@ async def on_message(

response.text = limit_text_and_notify(response, why_traceback)

response = trim_kwargs(response.kwargs)

why_traceback.append(
f"I asked the {module} module, and it responded with: {response}"
)
Expand All @@ -183,10 +185,11 @@ async def on_message(
if response.callback:
args_string = ", ".join([a.__repr__() for a in response.args])
if response.kwargs:
cleaned_items = trim_kwargs(response.kwargs)
args_string += ", " + ", ".join(
[
f"{k}={v.__repr__()}"
for k, v in response.kwargs.items()
for k, v in cleaned_items
]
)
log.info(
Expand Down