Skip to content

Commit

Permalink
llm logs --prompts option (#737)
Browse files Browse the repository at this point in the history
Closes #736
  • Loading branch information
simonw authored Feb 2, 2025
1 parent 21df241 commit 41d64a8
Show file tree
Hide file tree
Showing 4 changed files with 90 additions and 3 deletions.
1 change: 1 addition & 0 deletions docs/help.md
Original file line number Diff line number Diff line change
Expand Up @@ -302,6 +302,7 @@ Options:
-t, --truncate Truncate long strings in output
-u, --usage Include token usage
-r, --response Just output the last response
--prompts Output prompts, end-truncated if necessary
-x, --extract Extract first fenced code block
--xl, --extract-last Extract last fenced code block
-c, --current Show logs from the current conversation
Expand Down
16 changes: 16 additions & 0 deletions docs/logging.md
Original file line number Diff line number Diff line change
Expand Up @@ -89,6 +89,22 @@ You can truncate the display of the prompts and responses using the `-t/--trunca
```bash
llm logs -n 5 -t --json
```
Or use `--prompts` to see just the truncated prompts:
```bash
llm logs -n 2 --prompts
```
Example output:
```
- model: deepseek-reasoner
datetime: 2025-02-02T06:39:53
conversation: 01jk2pk05xq3d0vgk0202zrsg1
prompt: H01 There are five huts. H02 The Scotsman lives in the purple hut. H03 The Welshman owns the parrot. H04 Kombucha is...
- model: o3-mini
datetime: 2025-02-02T19:03:05
conversation: 01jk40qkxetedzpf1zd8k9bgww
system: Formatting re-enabled. Write a detailed README with extensive usage examples.
prompt: <documents> <document index="1"> <source>./Cargo.toml</source> <document_content> [package] name = "py-limbo" version...
```

(logs-conversation)=
### Logs for a conversation
Expand Down
51 changes: 48 additions & 3 deletions llm/cli.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@
from dataclasses import asdict
import io
import json
import re
from llm import (
Attachment,
AsyncResponse,
Expand Down Expand Up @@ -874,6 +875,9 @@ def logs_turn_off():
@click.option("-t", "--truncate", is_flag=True, help="Truncate long strings in output")
@click.option("-u", "--usage", is_flag=True, help="Include token usage")
@click.option("-r", "--response", is_flag=True, help="Just output the last response")
@click.option(
"--prompts", is_flag=True, help="Output prompts, end-truncated if necessary"
)
@click.option("-x", "--extract", is_flag=True, help="Extract first fenced code block")
@click.option(
"extract_last",
Expand Down Expand Up @@ -910,6 +914,7 @@ def logs_list(
truncate,
usage,
response,
prompts,
extract,
extract_last,
current_conversation,
Expand All @@ -923,6 +928,18 @@ def logs_list(
db = sqlite_utils.Database(path)
migrate(db)

if prompts and (json_output or response):
invalid = " or ".join(
[
flag[0]
for flag in (("--json", json_output), ("--response", response))
if flag[1]
]
)
raise click.ClickException(
"Cannot use --prompts and {} together".format(invalid)
)

if response and not current_conversation and not conversation_id:
current_conversation = True

Expand Down Expand Up @@ -1035,6 +1052,27 @@ def logs_list(
current_system = None
should_show_conversation = True
for row in rows:
if prompts:
system = _truncate_string(row["system"], 120, end=True)
prompt = _truncate_string(row["prompt"], 120, end=True)
cid = row["conversation_id"]
attachments = attachments_by_id.get(row["id"])
lines = [
"- model: {}".format(row["model"]),
" datetime: {}".format(row["datetime_utc"]).split(".")[0],
" conversation: {}".format(cid),
]
if system:
lines.append(" system: {}".format(system))
if prompt:
lines.append(" prompt: {}".format(prompt))
if attachments:
lines.append(" attachments:")
for attachment in attachments:
path = attachment["path"] or attachment["url"]
lines.append(" - {}: {}".format(attachment["type"], path))
click.echo("\n".join(lines))
continue
click.echo(
"# {}{}\n{}".format(
row["datetime_utc"].split(".")[0],
Expand Down Expand Up @@ -1897,10 +1935,17 @@ def template_dir():
return path


def _truncate_string(s, max_length=100):
if len(s) > max_length:
def _truncate_string(s, max_length=100, end=False):
if not s:
return s
if end:
s = re.sub(r"\s+", " ", s)
if len(s) <= max_length:
return s
return s[: max_length - 3] + "..."
return s
if len(s) <= max_length:
return s
return s[: max_length - 3] + "..."


def logs_db_path():
Expand Down
25 changes: 25 additions & 0 deletions tests/test_llm.py
Original file line number Diff line number Diff line change
Expand Up @@ -164,6 +164,31 @@ def test_logs_extract_last_code(args, log_path):
assert result.output == 'print("hello word")\n\n'


def test_logs_prompts(log_path):
runner = CliRunner()
result = runner.invoke(cli, ["logs", "--prompts", "-p", str(log_path)])
assert result.exit_code == 0
output = datetime_re.sub("YYYY-MM-DDTHH:MM:SS", result.output)
expected = (
"- model: davinci\n"
" datetime: YYYY-MM-DDTHH:MM:SS\n"
" conversation: abc123\n"
" system: system\n"
" prompt: prompt\n"
"- model: davinci\n"
" datetime: YYYY-MM-DDTHH:MM:SS\n"
" conversation: abc123\n"
" system: system\n"
" prompt: prompt\n"
"- model: davinci\n"
" datetime: YYYY-MM-DDTHH:MM:SS\n"
" conversation: abc123\n"
" system: system\n"
" prompt: prompt\n"
)
assert output == expected


@pytest.mark.xfail(sys.platform == "win32", reason="Expected to fail on Windows")
@pytest.mark.parametrize("env", ({}, {"LLM_USER_PATH": "/tmp/llm-user-path"}))
def test_logs_path(monkeypatch, env, user_path):
Expand Down

0 comments on commit 41d64a8

Please sign in to comment.