Skip to content

Commit

Permalink
fix(deriver) remove markup parsing from rich implementation (#76)
Browse files Browse the repository at this point in the history
  • Loading branch information
VVoruganti authored Nov 5, 2024
1 parent 363f90d commit 05a57cf
Showing 1 changed file with 14 additions and 10 deletions.
24 changes: 14 additions & 10 deletions src/deriver/consumer.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
import logging
import re

from rich import print as rprint
from rich.console import Console
from sqlalchemy import select
from sqlalchemy.ext.asyncio import AsyncSession

Expand All @@ -11,6 +11,8 @@
# Turn off SQLAlchemy Echo logging
logging.getLogger("sqlalchemy.engine.Engine").disabled = True

console = Console(markup=False)


# FIXME see if this is SAFE
async def add_metamessage(db, message_id, metamessage_type, content):
Expand Down Expand Up @@ -56,7 +58,7 @@ async def process_ai_message(
"""
Process an AI message. Make a prediction about what the user is going to say to it.
"""
rprint(f"[green]Processing AI message: {content}[/green]")
console.print(f"Processing AI message: {content}", style="green")

subquery = (
select(models.Message.id)
Expand Down Expand Up @@ -95,9 +97,9 @@ async def process_ai_message(

await db.commit()

rprint("[blue]Tom Inference:")
console.print("Tom Inference:", style="blue")
content_lines = str(prediction)
rprint(f"[blue]{content_lines}")
console.print(content_lines, style="blue")


async def process_user_message(
Expand All @@ -111,7 +113,7 @@ async def process_user_message(
"""
Process a user message. If there are revised user predictions to run VoE against, run it. Otherwise pass.
"""
rprint(f"[orange1]Processing User Message: {content}")
console.print(f"Processing User Message: {content}", style="orange1")
subquery = (
select(models.Message.id)
.where(models.Message.public_id == message_id)
Expand All @@ -131,7 +133,7 @@ async def process_user_message(
ai_message = response.scalar_one_or_none()

if ai_message and ai_message.content:
rprint(f"[orange1]AI Message: {ai_message.content}")
console.print(f"AI Message: {ai_message.content}", style="orange1")

# Fetch the tom_inference metamessage
tom_inference_stmt = (
Expand All @@ -145,7 +147,9 @@ async def process_user_message(
tom_inference_metamessage = response.scalar_one_or_none()

if tom_inference_metamessage and tom_inference_metamessage.content:
rprint(f"[orange1]Tom Inference: {tom_inference_metamessage.content}")
console.print(
f"Tom Inference: {tom_inference_metamessage.content}", style="orange1"
)

# Fetch the existing user representation
user_representation_stmt = (
Expand Down Expand Up @@ -183,11 +187,11 @@ async def process_user_message(
user_representation_response, "representation"
)

rprint("[bright_magenta]User Representation:")
rprint(f"[bright_magenta]{user_representation_response}")
console.print("User Representation:", style="bright_magenta")
console.print(user_representation_response, style="bright_magenta")

else:
raise Exception("\033[91mTom Inference NOT READY YET")
else:
rprint("[red]No AI message before this user message[/red]")
console.print("No AI message before this user message", style="red")
return

0 comments on commit 05a57cf

Please sign in to comment.