diff --git a/00_core.ipynb b/00_core.ipynb
index fb39d67..7502da6 100644
--- a/00_core.ipynb
+++ b/00_core.ipynb
@@ -81,7 +81,7 @@
},
{
"cell_type": "markdown",
- "id": "37e1b56d",
+ "id": "8f326dfa",
"metadata": {},
"source": [
":::{.callout-tip}\n",
@@ -106,7 +106,7 @@
},
{
"cell_type": "markdown",
- "id": "7aad63b7",
+ "id": "c7d860dd",
"metadata": {},
"source": [
"These are the current versions of Anthropic's model at the time of writing."
@@ -169,7 +169,7 @@
},
{
"cell_type": "markdown",
- "id": "877c3577",
+ "id": "80b96961",
"metadata": {},
"source": [
":::{.callout-tip}\n",
@@ -188,7 +188,7 @@
{
"data": {
"text/plain": [
- "Message(id='msg_014HjqFL5CM3dZ1z98nnh2f6', content=[TextBlock(text=\"It's nice to meet you Jeremy! I'm Claude, an AI assistant created by Anthropic. I'm here to help out however I can. Please let me know if you have any questions or if there is anything I can assist you with.\", type='text')], model='claude-3-haiku-20240307', role='assistant', stop_reason='end_turn', stop_sequence=None, type='message', usage=Usage(input_tokens=10, output_tokens=54))"
+ "Message(id='msg_01GhYT4edo9Zz8qH46xxHK7m', content=[TextBlock(text=\"It's nice to meet you, Jeremy! I'm Claude, an AI assistant. How can I help you today?\", type='text')], model='claude-3-haiku-20240307', role='assistant', stop_reason='end_turn', stop_sequence=None, type='message', usage=Usage(input_tokens=10, output_tokens=27))"
]
},
"execution_count": null,
@@ -202,17 +202,26 @@
"r"
]
},
+ {
+ "cell_type": "markdown",
+ "id": "494287f0",
+ "metadata": {},
+ "source": [
+ "### Formatting output"
+ ]
+ },
{
"cell_type": "markdown",
"id": "865c40a9",
"metadata": {},
"source": [
- "Now we're going to write our first function which will be included to the `claudia/core.py` module."
+ "That output is pretty long and hard to read, so let's clean it up. We'll start by pulling out the `Content` part of the message.\n",
+ "To do that, we're going to write our first function which will be included to the `claudia/core.py` module."
]
},
{
"cell_type": "markdown",
- "id": "c9645e9f",
+ "id": "fa71b71c",
"metadata": {},
"source": [
":::{.callout-tip}\n",
@@ -231,38 +240,34 @@
{
"cell_type": "code",
"execution_count": null,
- "id": "4f3e4de9",
+ "id": "2f23a5d8",
"metadata": {},
"outputs": [],
"source": [
"#| exports\n",
- "def mk_msg(content, role='user', **kw):\n",
- " \"Helper to create a `dict` appropriate for a Claude message.\"\n",
- " if hasattr(content, 'content'): content,role = content.content,content.role\n",
- " if isinstance(content, abc.Mapping): content=content['content']\n",
- " return dict(role=role, content=content, **kw)"
+ "def find_block(r, blk_type=TextBlock):\n",
+ " \"Find the first block of type `blk_type` in `r.content`.\"\n",
+ " return first(o for o in r.content if isinstance(o,blk_type))"
]
},
{
"cell_type": "markdown",
- "id": "1a76545a",
+ "id": "f7a90432",
"metadata": {},
"source": [
- "We make things a bit more convenient by writing a function to create these messages for us.\n",
- "\n",
- "As you see from the source, `mk_msg` doesn't only handle `str` for `content`, but can also deal with a `dict` or an object containing `content` (such as for from assistant responses)."
+ "This makes it easier to grab the needed parts of Claude's responses, which can include multiple pieces of content. By default, we look for the first text block. That will generally have the content we want to display."
]
},
{
"cell_type": "code",
"execution_count": null,
- "id": "28542783",
+ "id": "37450446",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
- "{'role': 'user', 'content': \"I'm Jeremy\"}"
+ "TextBlock(text=\"It's nice to meet you, Jeremy! I'm Claude, an AI assistant. How can I help you today?\", type='text')"
]
},
"execution_count": null,
@@ -271,21 +276,42 @@
}
],
"source": [
- "prompt = \"I'm Jeremy\"\n",
- "m = mk_msg(prompt)\n",
- "m"
+ "find_block(r)"
]
},
{
"cell_type": "code",
"execution_count": null,
- "id": "9ba165ff",
+ "id": "87d29621",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "#| exports\n",
+ "def contents(r):\n",
+ " \"Helper to get the contents from Claude response `r`.\"\n",
+ " blk = find_block(r)\n",
+ " if not blk: blk = r.content[0]\n",
+ " return blk.text.strip() if hasattr(blk,'text') else blk"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "375f5a1f",
+ "metadata": {},
+ "source": [
+ "For display purposes, we often just want to show the text itself."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "0c13217b",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
- "Message(id='msg_01JW2U8pDjrbPPqoq9rjqzNv', content=[TextBlock(text=\"It's nice to meet you Jeremy! I'm an AI assistant created by Anthropic. I'm here to help with any questions or tasks you may have. Please let me know how I can be of assistance.\", type='text')], model='claude-3-haiku-20240307', role='assistant', stop_reason='end_turn', stop_sequence=None, type='message', usage=Usage(input_tokens=10, output_tokens=47))"
+ "\"It's nice to meet you, Jeremy! I'm Claude, an AI assistant. How can I help you today?\""
]
},
"execution_count": null,
@@ -294,41 +320,56 @@
}
],
"source": [
- "r = cli.messages.create(messages=[m], model=model, max_tokens=100)\n",
- "r"
+ "contents(r)"
]
},
{
"cell_type": "code",
"execution_count": null,
- "id": "5bb8081a",
+ "id": "9779971e",
"metadata": {},
"outputs": [],
"source": [
- "#| exports\n",
- "def find_block(r, blk_type=TextBlock):\n",
- " \"Find the first block of type `blk_type` in `r.content`.\"\n",
- " return first(o for o in r.content if isinstance(o,blk_type))"
+ "@patch\n",
+ "def _repr_html_(self:(ToolsBetaMessage,Message)):\n",
+ " det = '\\n
'.join(f'{k}: {v}' for k,v in self.dict().items())\n",
+ " return f\"\"\"{contents(self)}\n",
+ " \"\"\""
]
},
{
"cell_type": "markdown",
- "id": "f9848259",
+ "id": "912999a3",
"metadata": {},
"source": [
- "This makes it easier to grab the needed parts of Claude's responses, which can include multiple pieces of content. By default, we look for the first text block."
+ "Jupyter looks for a `_repr_html_` method in displayed objects; we add this in order to display just the content text, and collapse full details into a hideable section. Note that `patch` is from `fastcore`, and is used to add (or replace) functionality in an existing class. We pass the class(es) that we want to patch as type annotations to `self`. In this case, `_repr_html_` is being added to Anthropic's `ToolsBetaMessage` and `Message` classes, so when we display the message now we just see the contents, and the details are hidden away in a collapsible details block."
]
},
{
"cell_type": "code",
"execution_count": null,
- "id": "ce8e9ee2",
+ "id": "00560e50",
"metadata": {},
"outputs": [
{
"data": {
+ "text/html": [
+ "It's nice to meet you, Jeremy! I'm Claude, an AI assistant. How can I help you today?\n",
+ "- \n",
+ "id: msg_01GhYT4edo9Zz8qH46xxHK7m
\n",
+ "- content: [{'text': \"It's nice to meet you, Jeremy! I'm Claude, an AI assistant. How can I help you today?\", 'type': 'text'}]
\n",
+ "- model: claude-3-haiku-20240307
\n",
+ "- role: assistant
\n",
+ "- stop_reason: end_turn
\n",
+ "- stop_sequence: None
\n",
+ "- type: message
\n",
+ "- usage: {'input_tokens': 10, 'output_tokens': 27}\n",
+ "
"
+ ],
"text/plain": [
- "TextBlock(text=\"It's nice to meet you Jeremy! I'm an AI assistant created by Anthropic. I'm here to help with any questions or tasks you may have. Please let me know how I can be of assistance.\", type='text')"
+ "Message(id='msg_01GhYT4edo9Zz8qH46xxHK7m', content=[TextBlock(text=\"It's nice to meet you, Jeremy! I'm Claude, an AI assistant. How can I help you today?\", type='text')], model='claude-3-haiku-20240307', role='assistant', stop_reason='end_turn', stop_sequence=None, type='message', usage=Usage(input_tokens=10, output_tokens=27))"
]
},
"execution_count": null,
@@ -337,42 +378,29 @@
}
],
"source": [
- "find_block(r)"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "id": "198371cb",
- "metadata": {},
- "outputs": [],
- "source": [
- "#| exports\n",
- "def contents(r):\n",
- " \"Helper to get the contents from Claude response `r`.\"\n",
- " blk = find_block(r)\n",
- " if not blk: blk = r.content[0]\n",
- " return blk.text.strip() if hasattr(blk,'text') else blk"
+ "r"
]
},
{
"cell_type": "markdown",
- "id": "63230fc3",
+ "id": "db13e368",
"metadata": {},
"source": [
- "For display purposes, we often just want to show the text itself."
+ "One key part of the response is the `usage` key, which tells us how many tokens we used by returning a `Usage` object.\n",
+ "\n",
+ "We'll add some helpers to make things a bit cleaner for creating and formatting these objects."
]
},
{
"cell_type": "code",
"execution_count": null,
- "id": "b96ea02b",
+ "id": "9f5622a7",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
- "\"It's nice to meet you Jeremy! I'm an AI assistant created by Anthropic. I'm here to help with any questions or tasks you may have. Please let me know how I can be of assistance.\""
+ "Usage(input_tokens=10, output_tokens=27)"
]
},
"execution_count": null,
@@ -381,44 +409,40 @@
}
],
"source": [
- "contents(r)"
+ "r.usage"
]
},
{
"cell_type": "code",
"execution_count": null,
- "id": "8cfb240d",
+ "id": "248dd091",
"metadata": {},
"outputs": [],
"source": [
"#| exports\n",
- "def mk_msgs(msgs, **kw):\n",
- " \"Helper to set 'assistant' role on alternate messages.\"\n",
- " if isinstance(msgs,str): msgs=[msgs]\n",
- " return [mk_msg(o, ('user','assistant')[i%2], **kw) for i,o in enumerate(msgs)]"
+ "def usage(inp=0, out=0):\n",
+ " \"Slightly more concise version of `Usage`.\"\n",
+ " return Usage(input_tokens=inp, output_tokens=out)"
]
},
{
"cell_type": "markdown",
- "id": "423b5739",
+ "id": "64312bff",
"metadata": {},
"source": [
- "LLMs, including Claude, don't actually have state, but instead dialogs are created by passing back all previous prompts and responses every time. With Claude, they always alternate *user* and *assistant*. Therefore we create a function to make it easier to build up these dialog lists."
+ "The constructor provided by Anthropic is rather verbose, so we clean it up a bit, using a lowercase version of the name."
]
},
{
"cell_type": "code",
"execution_count": null,
- "id": "7eef3715",
+ "id": "fc32c256",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
- "[{'role': 'user', 'content': \"I'm Jeremy\"},\n",
- " {'role': 'assistant',\n",
- " 'content': [TextBlock(text=\"It's nice to meet you Jeremy! I'm an AI assistant created by Anthropic. I'm here to help with any questions or tasks you may have. Please let me know how I can be of assistance.\", type='text')]},\n",
- " {'role': 'user', 'content': 'I forgot my name. Can you remind me please?'}]"
+ "Usage(input_tokens=5, output_tokens=0)"
]
},
"execution_count": null,
@@ -427,28 +451,39 @@
}
],
"source": [
- "msgs = mk_msgs([prompt, r, 'I forgot my name. Can you remind me please?'])\n",
- "msgs"
+ "usage(5)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "74d08f0f",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "#| exports\n",
+ "@patch(as_prop=True)\n",
+ "def total(self:Usage): return self.input_tokens+self.output_tokens"
]
},
{
"cell_type": "markdown",
- "id": "9ddf95e1",
+ "id": "fdd1acda",
"metadata": {},
"source": [
- "Now, if we pass this list of messages to Claude, the model treats it as a conversation to respond to."
+ "Adding a `total` property to `Usage` makes it easier to see how many tokens we've used up altogether."
]
},
{
"cell_type": "code",
"execution_count": null,
- "id": "6c464f8b",
+ "id": "5beb6328",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
- "Message(id='msg_01QU9shSR6KX5knJgzq7rDxf', content=[TextBlock(text=\"I do not actually know your name - you said earlier that your name is Jeremy, but if you've now forgotten that, I don't have any other information about your name. As an AI assistant, I don't have access to personal details about you unless you provide them to me directly. If you're unsure of your own name, I'd suggest trying to recall it or referring to other sources that may have that information.\", type='text')], model='claude-3-haiku-20240307', role='assistant', stop_reason='end_turn', stop_sequence=None, type='message', usage=Usage(input_tokens=71, output_tokens=90))"
+ "6"
]
},
"execution_count": null,
@@ -457,57 +492,39 @@
}
],
"source": [
- "r = cli.messages.create(messages=msgs, model=model, max_tokens=200)\n",
- "r"
+ "usage(5,1).total"
]
},
{
"cell_type": "code",
"execution_count": null,
- "id": "7cbc945b",
+ "id": "a0fce972",
"metadata": {},
"outputs": [],
"source": [
+ "#| exports\n",
"@patch\n",
- "def _repr_html_(self:(ToolsBetaMessage,Message)):\n",
- " det = '\\n'.join(f'{k}: {v}' for k,v in self.dict().items())\n",
- " return f\"\"\"{contents(self)}\n",
- " \"\"\""
+ "def __repr__(self:Usage): return f'In: {self.input_tokens}; Out: {self.output_tokens}; Total: {self.total}'"
]
},
{
"cell_type": "markdown",
- "id": "bc20e554",
+ "id": "4a11a109",
"metadata": {},
"source": [
- "Jupyter looks for a `_repr_html_` method in displayed objects; we add this in order to display just the content text, and collapse full details into a hideable section. Note that `patch` is from `fastcore`, and is used to add (or replace) functionality in an existing class."
+ "In python, patching `__repr__` let's us change how an object is displayed."
]
},
{
"cell_type": "code",
"execution_count": null,
- "id": "47157bbb",
+ "id": "07ab4de3",
"metadata": {},
"outputs": [
{
"data": {
- "text/html": [
- "I do not actually know your name - you said earlier that your name is Jeremy, but if you've now forgotten that, I don't have any other information about your name. As an AI assistant, I don't have access to personal details about you unless you provide them to me directly. If you're unsure of your own name, I'd suggest trying to recall it or referring to other sources that may have that information.\n",
- "- \n",
- "id: msg_01QU9shSR6KX5knJgzq7rDxf
\n",
- "- content: [{'text': \"I do not actually know your name - you said earlier that your name is Jeremy, but if you've now forgotten that, I don't have any other information about your name. As an AI assistant, I don't have access to personal details about you unless you provide them to me directly. If you're unsure of your own name, I'd suggest trying to recall it or referring to other sources that may have that information.\", 'type': 'text'}]
\n",
- "- model: claude-3-haiku-20240307
\n",
- "- role: assistant
\n",
- "- stop_reason: end_turn
\n",
- "- stop_sequence: None
\n",
- "- type: message
\n",
- "- usage: {'input_tokens': 71, 'output_tokens': 90}\n",
- "
"
- ],
"text/plain": [
- "Message(id='msg_01QU9shSR6KX5knJgzq7rDxf', content=[TextBlock(text=\"I do not actually know your name - you said earlier that your name is Jeremy, but if you've now forgotten that, I don't have any other information about your name. As an AI assistant, I don't have access to personal details about you unless you provide them to me directly. If you're unsure of your own name, I'd suggest trying to recall it or referring to other sources that may have that information.\", type='text')], model='claude-3-haiku-20240307', role='assistant', stop_reason='end_turn', stop_sequence=None, type='message', usage=Usage(input_tokens=71, output_tokens=90))"
+ "In: 10; Out: 27; Total: 37"
]
},
"execution_count": null,
@@ -516,27 +533,40 @@
}
],
"source": [
- "r"
+ "r.usage"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "d4d7d33e",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "#| exports\n",
+ "@patch\n",
+ "def __add__(self:Usage, b):\n",
+ " return usage(self.input_tokens+b.input_tokens, self.output_tokens+b.output_tokens)"
]
},
{
"cell_type": "markdown",
- "id": "d1b7014a",
+ "id": "58dab055",
"metadata": {},
"source": [
- "One key part of the response is the `usage` key, which tells us how many tokens we used:"
+ "And, patching `__add__` let's make `+` work on a class."
]
},
{
"cell_type": "code",
"execution_count": null,
- "id": "cb147d5d",
+ "id": "c0e977d8",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
- "Usage(input_tokens=71, output_tokens=90)"
+ "In: 20; Out: 54; Total: 74"
]
},
"execution_count": null,
@@ -545,40 +575,66 @@
}
],
"source": [
- "r.usage"
+ "r.usage+r.usage"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "d5308d92",
+ "metadata": {},
+ "source": [
+ "### Creating messages"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "83176107",
+ "metadata": {},
+ "source": [
+ "Creating correctly formatted `dict`s from scratch every time isn't very handy, so next up we'll add helpers for this."
]
},
{
"cell_type": "code",
"execution_count": null,
- "id": "6a8e0ed0",
+ "id": "4f3e4de9",
"metadata": {},
"outputs": [],
"source": [
- "#| exports\n",
- "def usage(inp=0, out=0):\n",
- " \"Slightly more concise version of `Usage`.\"\n",
- " return Usage(input_tokens=inp, output_tokens=out)"
+ "def mk_msg(content, role='user', **kw):\n",
+ " return dict(role=role, content=content, **kw)"
]
},
{
"cell_type": "markdown",
- "id": "4f30bcd7",
+ "id": "1a76545a",
"metadata": {},
"source": [
- "The constructor provided by Anthropic is rather verbose, so we clean it up a bit, using a lowercase version of the name."
+ "We make things a bit more convenient by writing a function to create a message for us."
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "1a1a2c7a",
+ "metadata": {},
+ "source": [
+ ":::{.callout-note}\n",
+ "\n",
+ "You may have noticed that we didn't export the `mk_msg` function (i.e. there's no \"Exported source\" block around it). That's because we'll need more functionality in our final version than this version has -- so we'll be defining a more complete version later. Rather than refactoring/editing in notebooks, often it's helpful to simply gradually build up complexity by re-defining a symbol.\n",
+ "\n",
+ ":::"
]
},
{
"cell_type": "code",
"execution_count": null,
- "id": "92d91f42",
+ "id": "28542783",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
- "Usage(input_tokens=5, output_tokens=0)"
+ "{'role': 'user', 'content': \"I'm Jeremy\"}"
]
},
"execution_count": null,
@@ -587,39 +643,34 @@
}
],
"source": [
- "usage(5)"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "id": "209ca17f",
- "metadata": {},
- "outputs": [],
- "source": [
- "#| exports\n",
- "@patch(as_prop=True)\n",
- "def total(self:Usage): return self.input_tokens+self.output_tokens"
- ]
- },
- {
- "cell_type": "markdown",
- "id": "98e0f698",
- "metadata": {},
- "source": [
- "Adding a `total` property to `Usage` makes it easier to see how many tokens we've used up altogether."
+ "prompt = \"I'm Jeremy\"\n",
+ "m = mk_msg(prompt)\n",
+ "m"
]
},
{
"cell_type": "code",
"execution_count": null,
- "id": "65954f4d",
+ "id": "9ba165ff",
"metadata": {},
"outputs": [
{
"data": {
+ "text/html": [
+ "Nice to meet you Jeremy! How can I assist you today?\n",
+ "- \n",
+ "id: msg_01R89aKPWo6Y8E9bASxs8FDW
\n",
+ "- content: [{'text': 'Nice to meet you Jeremy! How can I assist you today?', 'type': 'text'}]
\n",
+ "- model: claude-3-haiku-20240307
\n",
+ "- role: assistant
\n",
+ "- stop_reason: end_turn
\n",
+ "- stop_sequence: None
\n",
+ "- type: message
\n",
+ "- usage: {'input_tokens': 10, 'output_tokens': 16}\n",
+ "
"
+ ],
"text/plain": [
- "6"
+ "Message(id='msg_01R89aKPWo6Y8E9bASxs8FDW', content=[TextBlock(text='Nice to meet you Jeremy! How can I assist you today?', type='text')], model='claude-3-haiku-20240307', role='assistant', stop_reason='end_turn', stop_sequence=None, type='message', usage=In: 10; Out: 16; Total: 26)"
]
},
"execution_count": null,
@@ -628,39 +679,74 @@
}
],
"source": [
- "usage(5,1).total"
+ "r = cli.messages.create(messages=[m], model=model, max_tokens=100)\n",
+ "r"
]
},
{
"cell_type": "code",
"execution_count": null,
- "id": "8d8c9e2f",
+ "id": "8cfb240d",
"metadata": {},
"outputs": [],
"source": [
"#| exports\n",
- "@patch\n",
- "def __repr__(self:Usage): return f'In: {self.input_tokens}; Out: {self.output_tokens}; Total: {self.total}'"
+ "def mk_msgs(msgs, **kw):\n",
+ " \"Helper to set 'assistant' role on alternate messages.\"\n",
+ " if isinstance(msgs,str): msgs=[msgs]\n",
+ " return [mk_msg(o, ('user','assistant')[i%2], **kw) for i,o in enumerate(msgs)]"
]
},
{
"cell_type": "markdown",
- "id": "abf2e0ae",
+ "id": "423b5739",
"metadata": {},
"source": [
- "In python, patching `__repr__` let's us change how an object is displayed."
+ "LLMs, including Claude, don't actually have state, but instead dialogs are created by passing back all previous prompts and responses every time. With Claude, they always alternate *user* and *assistant*. Therefore we create a function to make it easier to build up these dialog lists.\n",
+ "\n",
+ "But to do so, we need to update `mk_msg` so that we can't only pass a `str` as `content`, but can also pass a `dict` or an object with a `content` attr, since these are both types of message that Claude can create. To do so, we check for a `content` key or attr, and use it if found."
]
},
{
"cell_type": "code",
"execution_count": null,
- "id": "533e8959",
+ "id": "493f69fe",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "#| exports\n",
+ "def mk_msg(content, role='user', **kw):\n",
+ " \"Helper to create a `dict` appropriate for a Claude message.\"\n",
+ " if hasattr(content, 'content'): content,role = content.content,content.role\n",
+ " if isinstance(content, abc.Mapping): content=content['content']\n",
+ " return dict(role=role, content=content, **kw)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "e8a209d6",
+ "metadata": {},
+ "source": [
+ ":::{.callout-note}\n",
+ "\n",
+ "As mentioned above, we've now re-defined `mk_msg`, and this version is the one we export to the Python module.\n",
+ "\n",
+ ":::"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "7eef3715",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
- "In: 71; Out: 90; Total: 161"
+ "[{'role': 'user', 'content': \"I'm Jeremy\"},\n",
+ " {'role': 'assistant',\n",
+ " 'content': [TextBlock(text='Nice to meet you Jeremy! How can I assist you today?', type='text')]},\n",
+ " {'role': 'user', 'content': 'I forgot my name. Can you remind me please?'}]"
]
},
"execution_count": null,
@@ -669,40 +755,41 @@
}
],
"source": [
- "r.usage"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "id": "63290a10",
- "metadata": {},
- "outputs": [],
- "source": [
- "#| exports\n",
- "@patch\n",
- "def __add__(self:Usage, b):\n",
- " return usage(self.input_tokens+b.input_tokens, self.output_tokens+b.output_tokens)"
+ "msgs = mk_msgs([prompt, r, 'I forgot my name. Can you remind me please?'])\n",
+ "msgs"
]
},
{
"cell_type": "markdown",
- "id": "12764e60",
+ "id": "9ddf95e1",
"metadata": {},
"source": [
- "And, patching `__add__` let's make `+` work on a class."
+ "Now, if we pass this list of messages to Claude, the model treats it as a conversation to respond to."
]
},
{
"cell_type": "code",
"execution_count": null,
- "id": "e5135499",
+ "id": "6c464f8b",
"metadata": {},
"outputs": [
{
"data": {
+ "text/html": [
+ "I'm afraid I don't actually know your name. When you introduced yourself at the beginning of our conversation, you said your name was Jeremy. If you've forgotten your own name, I don't have any other information to remind you of what it is. I'm an AI assistant, I don't have personal knowledge about you beyond what you've directly told me.\n",
+ "- \n",
+ "id: msg_01CPXhjGbB6ARuz932K2uuuz
\n",
+ "- content: [{'text': \"I'm afraid I don't actually know your name. When you introduced yourself at the beginning of our conversation, you said your name was Jeremy. If you've forgotten your own name, I don't have any other information to remind you of what it is. I'm an AI assistant, I don't have personal knowledge about you beyond what you've directly told me.\", 'type': 'text'}]
\n",
+ "- model: claude-3-haiku-20240307
\n",
+ "- role: assistant
\n",
+ "- stop_reason: end_turn
\n",
+ "- stop_sequence: None
\n",
+ "- type: message
\n",
+ "- usage: {'input_tokens': 40, 'output_tokens': 77}\n",
+ "
"
+ ],
"text/plain": [
- "In: 142; Out: 180; Total: 322"
+ "Message(id='msg_01CPXhjGbB6ARuz932K2uuuz', content=[TextBlock(text=\"I'm afraid I don't actually know your name. When you introduced yourself at the beginning of our conversation, you said your name was Jeremy. If you've forgotten your own name, I don't have any other information to remind you of what it is. I'm an AI assistant, I don't have personal knowledge about you beyond what you've directly told me.\", type='text')], model='claude-3-haiku-20240307', role='assistant', stop_reason='end_turn', stop_sequence=None, type='message', usage=In: 40; Out: 77; Total: 117)"
]
},
"execution_count": null,
@@ -711,7 +798,8 @@
}
],
"source": [
- "r.usage+r.usage"
+ "r = cli.messages.create(messages=msgs, model=model, max_tokens=200)\n",
+ "r"
]
},
{
@@ -802,7 +890,7 @@
{
"data": {
"text/plain": [
- "In: 71; Out: 90; Total: 161"
+ "In: 40; Out: 77; Total: 117"
]
},
"execution_count": null,
@@ -850,7 +938,7 @@
"text/html": [
"Hello! How can I assist you today?\n",
"- \n",
- "id: msg_01XKoCB5Zg82ntkA6CHQ5Gve
\n",
+ "id: msg_01TN6YyHseRApC1tE6EJS3A9\n",
"- content: [{'text': 'Hello! How can I assist you today?', 'type': 'text'}]
\n",
"- model: claude-3-haiku-20240307
\n",
"- role: assistant
\n",
@@ -861,7 +949,7 @@
"
"
],
"text/plain": [
- "ToolsBetaMessage(id='msg_01XKoCB5Zg82ntkA6CHQ5Gve', content=[TextBlock(text='Hello! How can I assist you today?', type='text')], model='claude-3-haiku-20240307', role='assistant', stop_reason='end_turn', stop_sequence=None, type='message', usage=In: 8; Out: 12; Total: 20)"
+ "ToolsBetaMessage(id='msg_01TN6YyHseRApC1tE6EJS3A9', content=[TextBlock(text='Hello! How can I assist you today?', type='text')], model='claude-3-haiku-20240307', role='assistant', stop_reason='end_turn', stop_sequence=None, type='message', usage=In: 8; Out: 12; Total: 20)"
]
},
"execution_count": null,
@@ -882,7 +970,7 @@
{
"data": {
"text/plain": [
- "In: 79; Out: 102; Total: 181"
+ "In: 48; Out: 89; Total: 137"
]
},
"execution_count": null,
@@ -946,7 +1034,7 @@
{
"data": {
"text/plain": [
- "In: 87; Out: 114; Total: 201"
+ "In: 56; Out: 101; Total: 157"
]
},
"execution_count": null,
@@ -971,14 +1059,8 @@
"id": "7ec35c95",
"metadata": {},
"source": [
- "[Tool use](https://docs.anthropic.com/claude/docs/tool-use) lets Claude use external tools. The API uses JSON schema, so we need a way to map Python functions to JSON."
- ]
- },
- {
- "cell_type": "markdown",
- "id": "78065472",
- "metadata": {},
- "source": [
+ "[Tool use](https://docs.anthropic.com/claude/docs/tool-use) lets Claude use external tools. The API uses JSON schema, so we need a way to map Python functions to JSON.\n",
+ "\n",
"We'll use [docments](https://fastcore.fast.ai/docments.html) to make defining Python functions as ergonomic as possible. Each parameter (and the return value) should have a type, and a docments comment with the description of what it is. Here's an example:"
]
},
@@ -1216,7 +1298,7 @@
"name": "stdout",
"output_type": "stream",
"text": [
- "Adds a + b\n",
+ "Adds a + b.\n",
"\n",
"Returns:\n",
"- type: integer\n",
@@ -1307,10 +1389,10 @@
{
"data": {
"text/html": [
- "ToolUseBlock(id='toolu_01GZCfB1mAG58UMgv2Vosv9r', input={'a': 604542, 'b': 6458932}, name='sums', type='tool_use')\n",
+ "ToolUseBlock(id='toolu_01GsWMNuccsQbWgf5uWuprUA', input={'a': 604542, 'b': 6458932}, name='sums', type='tool_use')\n",
"- \n",
- "id: msg_01CNbyP4QHZcmrz315nEFHJc
\n",
- "- content: [{'id': 'toolu_01GZCfB1mAG58UMgv2Vosv9r', 'input': {'a': 604542, 'b': 6458932}, 'name': 'sums', 'type': 'tool_use'}]
\n",
+ "id: msg_01LeVbuRH8UBbB86bGkVW8cZ\n",
+ "- content: [{'id': 'toolu_01GsWMNuccsQbWgf5uWuprUA', 'input': {'a': 604542, 'b': 6458932}, 'name': 'sums', 'type': 'tool_use'}]
\n",
"- model: claude-3-haiku-20240307
\n",
"- role: assistant
\n",
"- stop_reason: tool_use
\n",
@@ -1320,7 +1402,7 @@
"
"
],
"text/plain": [
- "ToolsBetaMessage(id='msg_01CNbyP4QHZcmrz315nEFHJc', content=[ToolUseBlock(id='toolu_01GZCfB1mAG58UMgv2Vosv9r', input={'a': 604542, 'b': 6458932}, name='sums', type='tool_use')], model='claude-3-haiku-20240307', role='assistant', stop_reason='tool_use', stop_sequence=None, type='message', usage=In: 414; Out: 72; Total: 486)"
+ "ToolsBetaMessage(id='msg_01LeVbuRH8UBbB86bGkVW8cZ', content=[ToolUseBlock(id='toolu_01GsWMNuccsQbWgf5uWuprUA', input={'a': 604542, 'b': 6458932}, name='sums', type='tool_use')], model='claude-3-haiku-20240307', role='assistant', stop_reason='tool_use', stop_sequence=None, type='message', usage=In: 414; Out: 72; Total: 486)"
]
},
"execution_count": null,
@@ -1480,7 +1562,7 @@
"text/plain": [
"{'role': 'user',\n",
" 'content': [{'type': 'tool_result',\n",
- " 'tool_use_id': 'toolu_01GZCfB1mAG58UMgv2Vosv9r',\n",
+ " 'tool_use_id': 'toolu_01GsWMNuccsQbWgf5uWuprUA',\n",
" 'content': '7063474'}]}"
]
},
@@ -1675,8 +1757,8 @@
"text/html": [
"Okay, let me calculate that for you:\n",
"- \n",
- "id: msg_01BuonV8LEsPgVGxQcnKtwek
\n",
- "- content: [{'text': 'Okay, let me calculate that for you:', 'type': 'text'}, {'id': 'toolu_018q7WHBsxxyzLWPmR9THBHN', 'input': {'a': 604542, 'b': 6458932}, 'name': 'sums', 'type': 'tool_use'}]
\n",
+ "id: msg_013AfSS2Rv1ADeskdMKJXHRS\n",
+ "- content: [{'text': 'Okay, let me calculate that for you:', 'type': 'text'}, {'id': 'toolu_013LuckApSxxrYwCDvych8rs', 'input': {'a': 604542, 'b': 6458932}, 'name': 'sums', 'type': 'tool_use'}]
\n",
"- model: claude-3-haiku-20240307
\n",
"- role: assistant
\n",
"- stop_reason: tool_use
\n",
@@ -1686,7 +1768,7 @@
"
"
],
"text/plain": [
- "ToolsBetaMessage(id='msg_01BuonV8LEsPgVGxQcnKtwek', content=[TextBlock(text='Okay, let me calculate that for you:', type='text'), ToolUseBlock(id='toolu_018q7WHBsxxyzLWPmR9THBHN', input={'a': 604542, 'b': 6458932}, name='sums', type='tool_use')], model='claude-3-haiku-20240307', role='assistant', stop_reason='tool_use', stop_sequence=None, type='message', usage=In: 493; Out: 83; Total: 576)"
+ "ToolsBetaMessage(id='msg_013AfSS2Rv1ADeskdMKJXHRS', content=[TextBlock(text='Okay, let me calculate that for you:', type='text'), ToolUseBlock(id='toolu_013LuckApSxxrYwCDvych8rs', input={'a': 604542, 'b': 6458932}, name='sums', type='tool_use')], model='claude-3-haiku-20240307', role='assistant', stop_reason='tool_use', stop_sequence=None, type='message', usage=In: 493; Out: 83; Total: 576)"
]
},
"execution_count": null,
@@ -1710,7 +1792,7 @@
"text/html": [
"The sum of 604,542 and 6,458,932 is 7,063,474.\n",
"- \n",
- "id: msg_013dUcWHJhbDVRThcFomcjsa
\n",
+ "id: msg_01Ldm99HKbMrVqg9zhM6h7tM\n",
"- content: [{'text': 'The sum of 604,542 and 6,458,932 is 7,063,474.', 'type': 'text'}]
\n",
"- model: claude-3-haiku-20240307
\n",
"- role: assistant
\n",
@@ -1721,7 +1803,7 @@
"
"
],
"text/plain": [
- "ToolsBetaMessage(id='msg_013dUcWHJhbDVRThcFomcjsa', content=[TextBlock(text='The sum of 604,542 and 6,458,932 is 7,063,474.', type='text')], model='claude-3-haiku-20240307', role='assistant', stop_reason='end_turn', stop_sequence=None, type='message', usage=In: 590; Out: 28; Total: 618)"
+ "ToolsBetaMessage(id='msg_01Ldm99HKbMrVqg9zhM6h7tM', content=[TextBlock(text='The sum of 604,542 and 6,458,932 is 7,063,474.', type='text')], model='claude-3-haiku-20240307', role='assistant', stop_reason='end_turn', stop_sequence=None, type='message', usage=In: 590; Out: 28; Total: 618)"
]
},
"execution_count": null,
@@ -1770,6 +1852,14 @@
"## XML helpers"
]
},
+ {
+ "cell_type": "markdown",
+ "id": "db4b3f2b",
+ "metadata": {},
+ "source": [
+ "TODO: Document this bit."
+ ]
+ },
{
"cell_type": "code",
"execution_count": null,
diff --git a/claudio/core.py b/claudio/core.py
index e54c580..6b7c8df 100644
--- a/claudio/core.py
+++ b/claudio/core.py
@@ -1,7 +1,7 @@
# AUTOGENERATED! DO NOT EDIT! File to edit: ../00_core.ipynb.
# %% auto 0
-__all__ = ['models', 'empty', 'g', 'tags', 'mk_msg', 'find_block', 'contents', 'mk_msgs', 'usage', 'Client', 'get_schema',
+__all__ = ['models', 'empty', 'g', 'tags', 'find_block', 'contents', 'usage', 'mk_msgs', 'mk_msg', 'Client', 'get_schema',
'mk_ns', 'call_func', 'mk_toolres', 'Chat', 'hl_md', 'to_xml', 'xt', 'json_to_xml']
# %% ../00_core.ipynb 6
@@ -26,57 +26,57 @@
# %% ../00_core.ipynb 10
empty = Parameter.empty
-# %% ../00_core.ipynb 20
-def mk_msg(content, role='user', **kw):
- "Helper to create a `dict` appropriate for a Claude message."
- if hasattr(content, 'content'): content,role = content.content,content.role
- if isinstance(content, abc.Mapping): content=content['content']
- return dict(role=role, content=content, **kw)
-
-# %% ../00_core.ipynb 24
+# %% ../00_core.ipynb 21
def find_block(r, blk_type=TextBlock):
"Find the first block of type `blk_type` in `r.content`."
return first(o for o in r.content if isinstance(o,blk_type))
-# %% ../00_core.ipynb 27
+# %% ../00_core.ipynb 24
def contents(r):
"Helper to get the contents from Claude response `r`."
blk = find_block(r)
if not blk: blk = r.content[0]
return blk.text.strip() if hasattr(blk,'text') else blk
-# %% ../00_core.ipynb 30
-def mk_msgs(msgs, **kw):
- "Helper to set 'assistant' role on alternate messages."
- if isinstance(msgs,str): msgs=[msgs]
- return [mk_msg(o, ('user','assistant')[i%2], **kw) for i,o in enumerate(msgs)]
-
-# %% ../00_core.ipynb 40
+# %% ../00_core.ipynb 32
def usage(inp=0, out=0):
"Slightly more concise version of `Usage`."
return Usage(input_tokens=inp, output_tokens=out)
-# %% ../00_core.ipynb 43
+# %% ../00_core.ipynb 35
@patch(as_prop=True)
def total(self:Usage): return self.input_tokens+self.output_tokens
-# %% ../00_core.ipynb 46
+# %% ../00_core.ipynb 38
@patch
def __repr__(self:Usage): return f'In: {self.input_tokens}; Out: {self.output_tokens}; Total: {self.total}'
-# %% ../00_core.ipynb 49
+# %% ../00_core.ipynb 41
@patch
def __add__(self:Usage, b):
return usage(self.input_tokens+b.input_tokens, self.output_tokens+b.output_tokens)
+# %% ../00_core.ipynb 51
+def mk_msgs(msgs, **kw):
+ "Helper to set 'assistant' role on alternate messages."
+ if isinstance(msgs,str): msgs=[msgs]
+ return [mk_msg(o, ('user','assistant')[i%2], **kw) for i,o in enumerate(msgs)]
+
# %% ../00_core.ipynb 53
+def mk_msg(content, role='user', **kw):
+ "Helper to create a `dict` appropriate for a Claude message."
+ if hasattr(content, 'content'): content,role = content.content,content.role
+ if isinstance(content, abc.Mapping): content=content['content']
+ return dict(role=role, content=content, **kw)
+
+# %% ../00_core.ipynb 59
class Client:
def __init__(self, model, cli=None):
"Basic Anthropic messages client."
self.model,self.use = model,Usage(input_tokens=0,output_tokens=0)
self.c = (cli or Anthropic())
-# %% ../00_core.ipynb 56
+# %% ../00_core.ipynb 62
@patch
def _r(self:Client, r:ToolsBetaMessage):
"Store the result of the message and accrue total usage."
@@ -84,7 +84,7 @@ def _r(self:Client, r:ToolsBetaMessage):
self.use += r.usage
return r
-# %% ../00_core.ipynb 59
+# %% ../00_core.ipynb 65
@patch
def __call__(self:Client, msgs, sp='', temp=0, maxtok=4096, stop=None, **kw):
"Make a call to Claude without streaming."
@@ -92,7 +92,7 @@ def __call__(self:Client, msgs, sp='', temp=0, maxtok=4096, stop=None, **kw):
model=self.model, messages=mk_msgs(msgs), max_tokens=maxtok, system=sp, temperature=temp, stop_sequences=stop, **kw)
return self._r(r)
-# %% ../00_core.ipynb 63
+# %% ../00_core.ipynb 69
@patch
def stream(self:Client, msgs, sp='', temp=0, maxtok=4096, stop=None, **kw):
"Make a call to Claude, streaming the result."
@@ -101,14 +101,14 @@ def stream(self:Client, msgs, sp='', temp=0, maxtok=4096, stop=None, **kw):
yield from s.text_stream
return self._r(s.get_final_message())
-# %% ../00_core.ipynb 75
+# %% ../00_core.ipynb 80
def _types(t:type)->tuple[str,Optional[str]]:
"Tuple of json schema type name and (if appropriate) array item name."
tmap = {int:"integer", float:"number", str:"string", bool:"boolean", list:"array", dict:"object"}
if getattr(t, '__origin__', None) in (list,tuple): return "array", tmap.get(t.__args__[0], "object")
else: return tmap.get(t, "object"), None
-# %% ../00_core.ipynb 78
+# %% ../00_core.ipynb 83
def _param(name, info):
"json schema parameter given `name` and `info` from docments full dict."
paramt,itemt = _types(info.anno)
@@ -117,7 +117,7 @@ def _param(name, info):
if info.default is not empty: pschema["default"] = info.default
return pschema
-# %% ../00_core.ipynb 81
+# %% ../00_core.ipynb 86
def get_schema(f:callable)->dict:
"Convert function `f` into a JSON schema `dict` for tool use."
d = docments(f, full=True)
@@ -132,12 +132,12 @@ def get_schema(f:callable)->dict:
if ret.docment: desc += f'\n- description: {ret.docment}'
return dict(name=f.__name__, description=desc, input_schema=paramd)
-# %% ../00_core.ipynb 92
+# %% ../00_core.ipynb 97
def mk_ns(*funcs:list[callable]) -> dict[str,callable]:
"Create a `dict` of name to function in `funcs`, to use as a namespace"
return {f.__name__:f for f in funcs}
-# %% ../00_core.ipynb 94
+# %% ../00_core.ipynb 99
def call_func(tr, ns=None):
"Call the function in the tool response `tr`, using namespace `ns`."
if ns is None: ns=globals()
@@ -145,7 +145,7 @@ def call_func(tr, ns=None):
fc = find_block(r, tool_use_block.ToolUseBlock)
return ns[fc.name](**fc.input)
-# %% ../00_core.ipynb 97
+# %% ../00_core.ipynb 102
def mk_toolres(r, res=None, ns=None):
"Create a `tool_result` message from response `r`."
if not hasattr(r, 'content'): return r
@@ -155,7 +155,7 @@ def mk_toolres(r, res=None, ns=None):
tr = dict(type="tool_result", tool_use_id=tool.id, content=str(res))
return mk_msg([tr])
-# %% ../00_core.ipynb 104
+# %% ../00_core.ipynb 109
class Chat:
def __init__(self, model=None, cli=None, sp='', tools=None):
"Anthropic chat client."
@@ -163,13 +163,13 @@ def __init__(self, model=None, cli=None, sp='', tools=None):
self.c = (cli or Client(model))
self.h,self.sp,self.tools = [],sp,tools
-# %% ../00_core.ipynb 117
+# %% ../00_core.ipynb 123
def hl_md(s, lang='xml'):
"Syntax highlight `s` using `lang`."
if Markdown: return Markdown(f'```{lang}\n{s}\n```')
print(s)
-# %% ../00_core.ipynb 118
+# %% ../00_core.ipynb 124
def to_xml(node, hl=False):
"Convert `node` to an XML string."
def mk_el(tag, cs, attrs):
@@ -183,18 +183,18 @@ def mk_el(tag, cs, attrs):
res = ET.tostring(root, encoding='unicode')
return hl_md(res) if hl else res
-# %% ../00_core.ipynb 119
+# %% ../00_core.ipynb 125
def xt(tag, c=None, **kw):
"Helper to create appropriate data structure for `to_xml`."
kw = {k.lstrip('_'):str(v) for k,v in kw.items()}
return tag,c,kw
-# %% ../00_core.ipynb 120
+# %% ../00_core.ipynb 126
g = globals()
tags = 'div','img','h1','h2','h3','h4','h5','p','hr','span','html'
for o in tags: g[o] = partial(xt, o)
-# %% ../00_core.ipynb 123
+# %% ../00_core.ipynb 129
def json_to_xml(d:dict, rnm:str)->str:
"Convert `d` to XML with root name `rnm`."
root = ET.Element(rnm)