diff --git a/00_core.ipynb b/00_core.ipynb
index 7502da6..da4446c 100644
--- a/00_core.ipynb
+++ b/00_core.ipynb
@@ -72,8 +72,8 @@
"from anthropic.types.beta.tools import ToolsBetaMessage, tool_use_block\n",
"from inspect import Parameter\n",
"from io import BytesIO\n",
- "try: from IPython.display import Markdown,HTML\n",
- "except: Markdown,HTML=None,None\n",
+ "try: from IPython import display\n",
+ "except: display=None\n",
"\n",
"from fastcore.docments import docments\n",
"from fastcore.utils import *"
@@ -81,7 +81,7 @@
},
{
"cell_type": "markdown",
- "id": "8f326dfa",
+ "id": "5d4d0ec8",
"metadata": {},
"source": [
":::{.callout-tip}\n",
@@ -106,7 +106,7 @@
},
{
"cell_type": "markdown",
- "id": "c7d860dd",
+ "id": "9bdeca1e",
"metadata": {},
"source": [
"These are the current versions of Anthropic's model at the time of writing."
@@ -169,7 +169,7 @@
},
{
"cell_type": "markdown",
- "id": "80b96961",
+ "id": "7b28b175",
"metadata": {},
"source": [
":::{.callout-tip}\n",
@@ -188,7 +188,7 @@
{
"data": {
"text/plain": [
- "Message(id='msg_01GhYT4edo9Zz8qH46xxHK7m', content=[TextBlock(text=\"It's nice to meet you, Jeremy! I'm Claude, an AI assistant. How can I help you today?\", type='text')], model='claude-3-haiku-20240307', role='assistant', stop_reason='end_turn', stop_sequence=None, type='message', usage=Usage(input_tokens=10, output_tokens=27))"
+ "Message(id='msg_01GqMVsZxfNekkAGksqyC1To', content=[TextBlock(text=\"It's nice to meet you, Jeremy! As an AI assistant, I don't have a personal identity, but I'm happy to chat with you and try my best to assist you with any questions or tasks you may have. Please feel free to ask me anything.\", type='text')], model='claude-3-haiku-20240307', role='assistant', stop_reason='end_turn', stop_sequence=None, type='message', usage=Usage(input_tokens=10, output_tokens=57))"
]
},
"execution_count": null,
@@ -204,7 +204,7 @@
},
{
"cell_type": "markdown",
- "id": "494287f0",
+ "id": "e5ebf6d8",
"metadata": {},
"source": [
"### Formatting output"
@@ -221,7 +221,7 @@
},
{
"cell_type": "markdown",
- "id": "fa71b71c",
+ "id": "d3c70683",
"metadata": {},
"source": [
":::{.callout-tip}\n",
@@ -240,7 +240,7 @@
{
"cell_type": "code",
"execution_count": null,
- "id": "2f23a5d8",
+ "id": "94f246a2",
"metadata": {},
"outputs": [],
"source": [
@@ -252,7 +252,7 @@
},
{
"cell_type": "markdown",
- "id": "f7a90432",
+ "id": "b32c9d29",
"metadata": {},
"source": [
"This makes it easier to grab the needed parts of Claude's responses, which can include multiple pieces of content. By default, we look for the first text block. That will generally have the content we want to display."
@@ -261,13 +261,13 @@
{
"cell_type": "code",
"execution_count": null,
- "id": "37450446",
+ "id": "0692e934",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
- "TextBlock(text=\"It's nice to meet you, Jeremy! I'm Claude, an AI assistant. How can I help you today?\", type='text')"
+ "TextBlock(text=\"It's nice to meet you, Jeremy! As an AI assistant, I don't have a personal identity, but I'm happy to chat with you and try my best to assist you with any questions or tasks you may have. Please feel free to ask me anything.\", type='text')"
]
},
"execution_count": null,
@@ -282,7 +282,7 @@
{
"cell_type": "code",
"execution_count": null,
- "id": "87d29621",
+ "id": "38c3c8c3",
"metadata": {},
"outputs": [],
"source": [
@@ -296,7 +296,7 @@
},
{
"cell_type": "markdown",
- "id": "375f5a1f",
+ "id": "21ace55a",
"metadata": {},
"source": [
"For display purposes, we often just want to show the text itself."
@@ -305,13 +305,13 @@
{
"cell_type": "code",
"execution_count": null,
- "id": "0c13217b",
+ "id": "bf5b8704",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
- "\"It's nice to meet you, Jeremy! I'm Claude, an AI assistant. How can I help you today?\""
+ "\"It's nice to meet you, Jeremy! As an AI assistant, I don't have a personal identity, but I'm happy to chat with you and try my best to assist you with any questions or tasks you may have. Please feel free to ask me anything.\""
]
},
"execution_count": null,
@@ -326,50 +326,56 @@
{
"cell_type": "code",
"execution_count": null,
- "id": "9779971e",
+ "id": "f80bfdd8",
"metadata": {},
"outputs": [],
"source": [
"@patch\n",
- "def _repr_html_(self:(ToolsBetaMessage,Message)):\n",
- " det = '\\n
'.join(f'{k}: {v}' for k,v in self.dict().items())\n",
+ "def _repr_markdown_(self:(ToolsBetaMessage,Message)):\n",
+ " det = '\\n- '.join(f'{k}: {v}' for k,v in self.dict().items())\n",
" return f\"\"\"{contents(self)}\n",
- "- \n",
+ "\n",
+ "
\n",
+ "\n",
"{det}\n",
- "
\"\"\""
+ "\n",
+ "\"\"\""
]
},
{
"cell_type": "markdown",
- "id": "912999a3",
+ "id": "e10da614",
"metadata": {},
"source": [
- "Jupyter looks for a `_repr_html_` method in displayed objects; we add this in order to display just the content text, and collapse full details into a hideable section. Note that `patch` is from `fastcore`, and is used to add (or replace) functionality in an existing class. We pass the class(es) that we want to patch as type annotations to `self`. In this case, `_repr_html_` is being added to Anthropic's `ToolsBetaMessage` and `Message` classes, so when we display the message now we just see the contents, and the details are hidden away in a collapsible details block."
+ "Jupyter looks for a `_repr_markdown_` method in displayed objects; we add this in order to display just the content text, and collapse full details into a hideable section. Note that `patch` is from `fastcore`, and is used to add (or replace) functionality in an existing class. We pass the class(es) that we want to patch as type annotations to `self`. In this case, `_repr_markdown_` is being added to Anthropic's `ToolsBetaMessage` and `Message` classes, so when we display the message now we just see the contents, and the details are hidden away in a collapsible details block."
]
},
{
"cell_type": "code",
"execution_count": null,
- "id": "00560e50",
+ "id": "43eb7b93",
"metadata": {},
"outputs": [
{
"data": {
- "text/html": [
- "It's nice to meet you, Jeremy! I'm Claude, an AI assistant. How can I help you today?\n",
- "- \n",
- "id: msg_01GhYT4edo9Zz8qH46xxHK7m
\n",
- "- content: [{'text': \"It's nice to meet you, Jeremy! I'm Claude, an AI assistant. How can I help you today?\", 'type': 'text'}]
\n",
- "- model: claude-3-haiku-20240307
\n",
- "- role: assistant
\n",
- "- stop_reason: end_turn
\n",
- "- stop_sequence: None
\n",
- "- type: message
\n",
- "- usage: {'input_tokens': 10, 'output_tokens': 27}\n",
- "
"
+ "text/markdown": [
+ "It's nice to meet you, Jeremy! As an AI assistant, I don't have a personal identity, but I'm happy to chat with you and try my best to assist you with any questions or tasks you may have. Please feel free to ask me anything.\n",
+ "\n",
+ "\n",
+ "\n",
+ "id: msg_01GqMVsZxfNekkAGksqyC1To\n",
+ "- content: [{'text': \"It's nice to meet you, Jeremy! As an AI assistant, I don't have a personal identity, but I'm happy to chat with you and try my best to assist you with any questions or tasks you may have. Please feel free to ask me anything.\", 'type': 'text'}]\n",
+ "- model: claude-3-haiku-20240307\n",
+ "- role: assistant\n",
+ "- stop_reason: end_turn\n",
+ "- stop_sequence: None\n",
+ "- type: message\n",
+ "- usage: {'input_tokens': 10, 'output_tokens': 57}\n",
+ "\n",
+ " "
],
"text/plain": [
- "Message(id='msg_01GhYT4edo9Zz8qH46xxHK7m', content=[TextBlock(text=\"It's nice to meet you, Jeremy! I'm Claude, an AI assistant. How can I help you today?\", type='text')], model='claude-3-haiku-20240307', role='assistant', stop_reason='end_turn', stop_sequence=None, type='message', usage=Usage(input_tokens=10, output_tokens=27))"
+ "Message(id='msg_01GqMVsZxfNekkAGksqyC1To', content=[TextBlock(text=\"It's nice to meet you, Jeremy! As an AI assistant, I don't have a personal identity, but I'm happy to chat with you and try my best to assist you with any questions or tasks you may have. Please feel free to ask me anything.\", type='text')], model='claude-3-haiku-20240307', role='assistant', stop_reason='end_turn', stop_sequence=None, type='message', usage=Usage(input_tokens=10, output_tokens=57))"
]
},
"execution_count": null,
@@ -383,7 +389,7 @@
},
{
"cell_type": "markdown",
- "id": "db13e368",
+ "id": "1eb3488b",
"metadata": {},
"source": [
"One key part of the response is the `usage` key, which tells us how many tokens we used by returning a `Usage` object.\n",
@@ -394,13 +400,13 @@
{
"cell_type": "code",
"execution_count": null,
- "id": "9f5622a7",
+ "id": "926eb735",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
- "Usage(input_tokens=10, output_tokens=27)"
+ "Usage(input_tokens=10, output_tokens=57)"
]
},
"execution_count": null,
@@ -415,7 +421,7 @@
{
"cell_type": "code",
"execution_count": null,
- "id": "248dd091",
+ "id": "2c0cfc6c",
"metadata": {},
"outputs": [],
"source": [
@@ -427,7 +433,7 @@
},
{
"cell_type": "markdown",
- "id": "64312bff",
+ "id": "14040037",
"metadata": {},
"source": [
"The constructor provided by Anthropic is rather verbose, so we clean it up a bit, using a lowercase version of the name."
@@ -436,7 +442,7 @@
{
"cell_type": "code",
"execution_count": null,
- "id": "fc32c256",
+ "id": "a1b94be9",
"metadata": {},
"outputs": [
{
@@ -457,7 +463,7 @@
{
"cell_type": "code",
"execution_count": null,
- "id": "74d08f0f",
+ "id": "9fba6f08",
"metadata": {},
"outputs": [],
"source": [
@@ -468,7 +474,7 @@
},
{
"cell_type": "markdown",
- "id": "fdd1acda",
+ "id": "101c38b5",
"metadata": {},
"source": [
"Adding a `total` property to `Usage` makes it easier to see how many tokens we've used up altogether."
@@ -477,7 +483,7 @@
{
"cell_type": "code",
"execution_count": null,
- "id": "5beb6328",
+ "id": "3bcf68b2",
"metadata": {},
"outputs": [
{
@@ -498,7 +504,7 @@
{
"cell_type": "code",
"execution_count": null,
- "id": "a0fce972",
+ "id": "b008fb59",
"metadata": {},
"outputs": [],
"source": [
@@ -509,7 +515,7 @@
},
{
"cell_type": "markdown",
- "id": "4a11a109",
+ "id": "9262cbb2",
"metadata": {},
"source": [
"In python, patching `__repr__` let's us change how an object is displayed."
@@ -518,13 +524,13 @@
{
"cell_type": "code",
"execution_count": null,
- "id": "07ab4de3",
+ "id": "6b1761a4",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
- "In: 10; Out: 27; Total: 37"
+ "In: 10; Out: 57; Total: 67"
]
},
"execution_count": null,
@@ -539,7 +545,7 @@
{
"cell_type": "code",
"execution_count": null,
- "id": "d4d7d33e",
+ "id": "c772a8a3",
"metadata": {},
"outputs": [],
"source": [
@@ -551,7 +557,7 @@
},
{
"cell_type": "markdown",
- "id": "58dab055",
+ "id": "9420dbdd",
"metadata": {},
"source": [
"And, patching `__add__` let's make `+` work on a class."
@@ -560,13 +566,13 @@
{
"cell_type": "code",
"execution_count": null,
- "id": "c0e977d8",
+ "id": "a36ed18c",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
- "In: 20; Out: 54; Total: 74"
+ "In: 20; Out: 114; Total: 134"
]
},
"execution_count": null,
@@ -580,7 +586,7 @@
},
{
"cell_type": "markdown",
- "id": "d5308d92",
+ "id": "ef435b67",
"metadata": {},
"source": [
"### Creating messages"
@@ -588,7 +594,7 @@
},
{
"cell_type": "markdown",
- "id": "83176107",
+ "id": "9deb52ff",
"metadata": {},
"source": [
"Creating correctly formatted `dict`s from scratch every time isn't very handy, so next up we'll add helpers for this."
@@ -615,7 +621,7 @@
},
{
"cell_type": "markdown",
- "id": "1a1a2c7a",
+ "id": "7ed3d284",
"metadata": {},
"source": [
":::{.callout-note}\n",
@@ -656,21 +662,24 @@
"outputs": [
{
"data": {
- "text/html": [
- "Nice to meet you Jeremy! How can I assist you today?\n",
- "- \n",
- "id: msg_01R89aKPWo6Y8E9bASxs8FDW
\n",
- "- content: [{'text': 'Nice to meet you Jeremy! How can I assist you today?', 'type': 'text'}]
\n",
- "- model: claude-3-haiku-20240307
\n",
- "- role: assistant
\n",
- "- stop_reason: end_turn
\n",
- "- stop_sequence: None
\n",
- "- type: message
\n",
- "- usage: {'input_tokens': 10, 'output_tokens': 16}\n",
- "
"
+ "text/markdown": [
+ "It's nice to meet you, Jeremy! How can I assist you today?\n",
+ "\n",
+ "\n",
+ "\n",
+ "id: msg_01WDH5Hegmij4xEwH9ispkrW\n",
+ "- content: [{'text': \"It's nice to meet you, Jeremy! How can I assist you today?\", 'type': 'text'}]\n",
+ "- model: claude-3-haiku-20240307\n",
+ "- role: assistant\n",
+ "- stop_reason: end_turn\n",
+ "- stop_sequence: None\n",
+ "- type: message\n",
+ "- usage: {'input_tokens': 10, 'output_tokens': 19}\n",
+ "\n",
+ " "
],
"text/plain": [
- "Message(id='msg_01R89aKPWo6Y8E9bASxs8FDW', content=[TextBlock(text='Nice to meet you Jeremy! How can I assist you today?', type='text')], model='claude-3-haiku-20240307', role='assistant', stop_reason='end_turn', stop_sequence=None, type='message', usage=In: 10; Out: 16; Total: 26)"
+ "Message(id='msg_01WDH5Hegmij4xEwH9ispkrW', content=[TextBlock(text=\"It's nice to meet you, Jeremy! How can I assist you today?\", type='text')], model='claude-3-haiku-20240307', role='assistant', stop_reason='end_turn', stop_sequence=None, type='message', usage=In: 10; Out: 19; Total: 29)"
]
},
"execution_count": null,
@@ -710,7 +719,7 @@
{
"cell_type": "code",
"execution_count": null,
- "id": "493f69fe",
+ "id": "8d60691e",
"metadata": {},
"outputs": [],
"source": [
@@ -724,7 +733,7 @@
},
{
"cell_type": "markdown",
- "id": "e8a209d6",
+ "id": "cb58b15f",
"metadata": {},
"source": [
":::{.callout-note}\n",
@@ -745,7 +754,7 @@
"text/plain": [
"[{'role': 'user', 'content': \"I'm Jeremy\"},\n",
" {'role': 'assistant',\n",
- " 'content': [TextBlock(text='Nice to meet you Jeremy! How can I assist you today?', type='text')]},\n",
+ " 'content': [TextBlock(text=\"It's nice to meet you, Jeremy! How can I assist you today?\", type='text')]},\n",
" {'role': 'user', 'content': 'I forgot my name. Can you remind me please?'}]"
]
},
@@ -775,21 +784,24 @@
"outputs": [
{
"data": {
- "text/html": [
- "I'm afraid I don't actually know your name. When you introduced yourself at the beginning of our conversation, you said your name was Jeremy. If you've forgotten your own name, I don't have any other information to remind you of what it is. I'm an AI assistant, I don't have personal knowledge about you beyond what you've directly told me.\n",
- "- \n",
- "id: msg_01CPXhjGbB6ARuz932K2uuuz
\n",
- "- content: [{'text': \"I'm afraid I don't actually know your name. When you introduced yourself at the beginning of our conversation, you said your name was Jeremy. If you've forgotten your own name, I don't have any other information to remind you of what it is. I'm an AI assistant, I don't have personal knowledge about you beyond what you've directly told me.\", 'type': 'text'}]
\n",
- "- model: claude-3-haiku-20240307
\n",
- "- role: assistant
\n",
- "- stop_reason: end_turn
\n",
- "- stop_sequence: None
\n",
- "- type: message
\n",
- "- usage: {'input_tokens': 40, 'output_tokens': 77}\n",
- "
"
+ "text/markdown": [
+ "I'm afraid I don't actually know your name. You introduced yourself as Jeremy, but if you've forgotten your name, I don't have any other information about what your name is. I'm an AI assistant created by Anthropic to be helpful, harmless, and honest. I don't have access to your personal information or identity details. If you've genuinely forgotten your name, I would suggest trying to recall it or contacting someone who knows you well to help remind you.\n",
+ "\n",
+ "\n",
+ "\n",
+ "id: msg_01RoqBDVrbD8cwuDYGCEh86F\n",
+ "- content: [{'text': \"I'm afraid I don't actually know your name. You introduced yourself as Jeremy, but if you've forgotten your name, I don't have any other information about what your name is. I'm an AI assistant created by Anthropic to be helpful, harmless, and honest. I don't have access to your personal information or identity details. If you've genuinely forgotten your name, I would suggest trying to recall it or contacting someone who knows you well to help remind you.\", 'type': 'text'}]\n",
+ "- model: claude-3-haiku-20240307\n",
+ "- role: assistant\n",
+ "- stop_reason: end_turn\n",
+ "- stop_sequence: None\n",
+ "- type: message\n",
+ "- usage: {'input_tokens': 43, 'output_tokens': 103}\n",
+ "\n",
+ " "
],
"text/plain": [
- "Message(id='msg_01CPXhjGbB6ARuz932K2uuuz', content=[TextBlock(text=\"I'm afraid I don't actually know your name. When you introduced yourself at the beginning of our conversation, you said your name was Jeremy. If you've forgotten your own name, I don't have any other information to remind you of what it is. I'm an AI assistant, I don't have personal knowledge about you beyond what you've directly told me.\", type='text')], model='claude-3-haiku-20240307', role='assistant', stop_reason='end_turn', stop_sequence=None, type='message', usage=In: 40; Out: 77; Total: 117)"
+ "Message(id='msg_01RoqBDVrbD8cwuDYGCEh86F', content=[TextBlock(text=\"I'm afraid I don't actually know your name. You introduced yourself as Jeremy, but if you've forgotten your name, I don't have any other information about what your name is. I'm an AI assistant created by Anthropic to be helpful, harmless, and honest. I don't have access to your personal information or identity details. If you've genuinely forgotten your name, I would suggest trying to recall it or contacting someone who knows you well to help remind you.\", type='text')], model='claude-3-haiku-20240307', role='assistant', stop_reason='end_turn', stop_sequence=None, type='message', usage=In: 43; Out: 103; Total: 146)"
]
},
"execution_count": null,
@@ -798,8 +810,7 @@
}
],
"source": [
- "r = cli.messages.create(messages=msgs, model=model, max_tokens=200)\n",
- "r"
+ "cli.messages.create(messages=msgs, model=model, max_tokens=200)"
]
},
{
@@ -890,7 +901,7 @@
{
"data": {
"text/plain": [
- "In: 40; Out: 77; Total: 117"
+ "In: 10; Out: 19; Total: 29"
]
},
"execution_count": null,
@@ -935,21 +946,24 @@
"outputs": [
{
"data": {
- "text/html": [
+ "text/markdown": [
"Hello! How can I assist you today?\n",
- "- \n",
- "id: msg_01TN6YyHseRApC1tE6EJS3A9
\n",
- "- content: [{'text': 'Hello! How can I assist you today?', 'type': 'text'}]
\n",
- "- model: claude-3-haiku-20240307
\n",
- "- role: assistant
\n",
- "- stop_reason: end_turn
\n",
- "- stop_sequence: None
\n",
- "- type: message
\n",
- "- usage: {'input_tokens': 8, 'output_tokens': 12}\n",
- "
"
+ "\n",
+ "\n",
+ "\n",
+ "id: msg_01TgP2TdWGP68VfHmmrQuSqF\n",
+ "- content: [{'text': 'Hello! How can I assist you today?', 'type': 'text'}]\n",
+ "- model: claude-3-haiku-20240307\n",
+ "- role: assistant\n",
+ "- stop_reason: end_turn\n",
+ "- stop_sequence: None\n",
+ "- type: message\n",
+ "- usage: {'input_tokens': 8, 'output_tokens': 12}\n",
+ "\n",
+ " "
],
"text/plain": [
- "ToolsBetaMessage(id='msg_01TN6YyHseRApC1tE6EJS3A9', content=[TextBlock(text='Hello! How can I assist you today?', type='text')], model='claude-3-haiku-20240307', role='assistant', stop_reason='end_turn', stop_sequence=None, type='message', usage=In: 8; Out: 12; Total: 20)"
+ "ToolsBetaMessage(id='msg_01TgP2TdWGP68VfHmmrQuSqF', content=[TextBlock(text='Hello! How can I assist you today?', type='text')], model='claude-3-haiku-20240307', role='assistant', stop_reason='end_turn', stop_sequence=None, type='message', usage=In: 8; Out: 12; Total: 20)"
]
},
"execution_count": null,
@@ -970,7 +984,7 @@
{
"data": {
"text/plain": [
- "In: 48; Out: 89; Total: 137"
+ "In: 18; Out: 31; Total: 49"
]
},
"execution_count": null,
@@ -1034,7 +1048,7 @@
{
"data": {
"text/plain": [
- "In: 56; Out: 101; Total: 157"
+ "In: 26; Out: 43; Total: 69"
]
},
"execution_count": null,
@@ -1388,21 +1402,24 @@
"outputs": [
{
"data": {
- "text/html": [
- "ToolUseBlock(id='toolu_01GsWMNuccsQbWgf5uWuprUA', input={'a': 604542, 'b': 6458932}, name='sums', type='tool_use')\n",
- "- \n",
- "id: msg_01LeVbuRH8UBbB86bGkVW8cZ
\n",
- "- content: [{'id': 'toolu_01GsWMNuccsQbWgf5uWuprUA', 'input': {'a': 604542, 'b': 6458932}, 'name': 'sums', 'type': 'tool_use'}]
\n",
- "- model: claude-3-haiku-20240307
\n",
- "- role: assistant
\n",
- "- stop_reason: tool_use
\n",
- "- stop_sequence: None
\n",
- "- type: message
\n",
- "- usage: {'input_tokens': 414, 'output_tokens': 72}\n",
- "
"
+ "text/markdown": [
+ "ToolUseBlock(id='toolu_0193iWxNNL9hMQLefhW8x4xo', input={'a': 604542, 'b': 6458932}, name='sums', type='tool_use')\n",
+ "\n",
+ "\n",
+ "\n",
+ "id: msg_01XBsa5kDACbbb66SUDNstFJ\n",
+ "- content: [{'id': 'toolu_0193iWxNNL9hMQLefhW8x4xo', 'input': {'a': 604542, 'b': 6458932}, 'name': 'sums', 'type': 'tool_use'}]\n",
+ "- model: claude-3-haiku-20240307\n",
+ "- role: assistant\n",
+ "- stop_reason: tool_use\n",
+ "- stop_sequence: None\n",
+ "- type: message\n",
+ "- usage: {'input_tokens': 414, 'output_tokens': 72}\n",
+ "\n",
+ " "
],
"text/plain": [
- "ToolsBetaMessage(id='msg_01LeVbuRH8UBbB86bGkVW8cZ', content=[ToolUseBlock(id='toolu_01GsWMNuccsQbWgf5uWuprUA', input={'a': 604542, 'b': 6458932}, name='sums', type='tool_use')], model='claude-3-haiku-20240307', role='assistant', stop_reason='tool_use', stop_sequence=None, type='message', usage=In: 414; Out: 72; Total: 486)"
+ "ToolsBetaMessage(id='msg_01XBsa5kDACbbb66SUDNstFJ', content=[ToolUseBlock(id='toolu_0193iWxNNL9hMQLefhW8x4xo', input={'a': 604542, 'b': 6458932}, name='sums', type='tool_use')], model='claude-3-haiku-20240307', role='assistant', stop_reason='tool_use', stop_sequence=None, type='message', usage=In: 414; Out: 72; Total: 486)"
]
},
"execution_count": null,
@@ -1562,7 +1579,7 @@
"text/plain": [
"{'role': 'user',\n",
" 'content': [{'type': 'tool_result',\n",
- " 'tool_use_id': 'toolu_01GsWMNuccsQbWgf5uWuprUA',\n",
+ " 'tool_use_id': 'toolu_0193iWxNNL9hMQLefhW8x4xo',\n",
" 'content': '7063474'}]}"
]
},
@@ -1665,23 +1682,49 @@
],
"source": [
"sp = \"If asked to add things up, use the `sums` function instead of doing it yourself. Never mention what tools you use.\"\n",
- "chat = Chat(model, sp=sp, tools=[sums])\n",
+ "chat = Chat(model, sp=sp)\n",
"chat.c.use, chat.h"
]
},
{
"cell_type": "code",
"execution_count": null,
- "id": "73ef72d9",
+ "id": "24aad233",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "#| exports\n",
+ "def _add_prefill(prefill, r):\n",
+ " \"Add `prefill` to the start of response `r`, since Claude doesn't include it otherwise\"\n",
+ " if not prefill: return\n",
+ " blk = find_block(r)\n",
+ " blk.text = prefill + blk.text"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "22a25253",
+ "metadata": {},
+ "source": [
+ "Claude supports adding an extra `assistant` message at the end, which contains the *prefill* -- i.e. the text we want Claude to assume the response starts with. However Claude doesn't actually repeat that in the response, so for convenience we'll add it."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "b2f5eef7",
"metadata": {},
"outputs": [],
"source": [
+ "#| exports\n",
"@patch\n",
- "def __call__(self:Chat, pr, temp=0, maxtok=4096, stop=None, ns=None, **kw):\n",
+ "def __call__(self:Chat, pr, temp=0, maxtok=4096, stop=None, ns=None, prefill='', **kw):\n",
" if ns is None: ns=self.tools\n",
- " self.h = mk_msgs(self.h + [mk_toolres(pr, ns=ns)])\n",
+ " if isinstance(pr,str): pr = pr.strip()\n",
+ " self.h.append(mk_toolres(pr, ns=ns))\n",
" if self.tools: kw['tools'] = [get_schema(o) for o in self.tools]\n",
- " res = self.c(self.h, sp=self.sp, temp=temp, maxtok=maxtok, stop=stop, **kw)\n",
+ " res = self.c(self.h + ([prefill.strip()] if prefill else []), sp=self.sp, temp=temp, maxtok=maxtok, stop=stop, **kw)\n",
+ " _add_prefill(prefill, res)\n",
" self.h.append(mk_msg(res, role='assistant'))\n",
" return res"
]
@@ -1703,7 +1746,7 @@
{
"data": {
"text/plain": [
- "'Your name is Jeremy, as you told me earlier.'"
+ "'Your name is Jeremy.'"
]
},
"execution_count": null,
@@ -1716,6 +1759,126 @@
"contents(chat(\"What's my name?\"))"
]
},
+ {
+ "cell_type": "markdown",
+ "id": "1dbd0186",
+ "metadata": {},
+ "source": [
+ "Let's try out prefill too:"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "0f7828c9",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "q = \"Concisely, what is the meaning of life?\"\n",
+ "pref = 'According to Douglas Adams,'"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "f6c6b93e",
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/markdown": [
+ "According to Douglas Adams, the answer is 42.\n",
+ "\n",
+ "\n",
+ "\n",
+ "id: msg_01VkMDbaEybi1Xz52Rsoz3Jr\n",
+ "- content: [{'text': 'According to Douglas Adams, the answer is 42.', 'type': 'text'}]\n",
+ "- model: claude-3-haiku-20240307\n",
+ "- role: assistant\n",
+ "- stop_reason: end_turn\n",
+ "- stop_sequence: None\n",
+ "- type: message\n",
+ "- usage: {'input_tokens': 124, 'output_tokens': 10}\n",
+ "\n",
+ " "
+ ],
+ "text/plain": [
+ "ToolsBetaMessage(id='msg_01VkMDbaEybi1Xz52Rsoz3Jr', content=[TextBlock(text='According to Douglas Adams, the answer is 42.', type='text')], model='claude-3-haiku-20240307', role='assistant', stop_reason='end_turn', stop_sequence=None, type='message', usage=In: 124; Out: 10; Total: 134)"
+ ]
+ },
+ "execution_count": null,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "chat(q, prefill=pref)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "aee372cc",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "#| exports\n",
+ "@patch\n",
+ "def stream(self:Chat, pr, temp=0, maxtok=4096, stop=None, prefill='', **kw):\n",
+ " \"Add a prompt and get a response from the chat dialog, streaming the result\"\n",
+ " if isinstance(pr,str): pr = pr.strip()\n",
+ " self.h.append(pr)\n",
+ " if prefill: yield(prefill)\n",
+ " yield from self.c.stream(self.h + ([prefill.strip()] if prefill else []), sp=self.sp, temp=temp, maxtok=maxtok, stop=stop, **kw)\n",
+ " _add_prefill(prefill, self.c.result)\n",
+ " self.h.append(mk_msg(self.c.result, role='assistant'))"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "d3aee23c",
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "It's nice to meet you, Jeremy! I'm an AI assistant created by Anthropic. I'm here to help with any tasks or questions you may have. Please let me know if there's anything I can assist you with."
+ ]
+ }
+ ],
+ "source": [
+ "chat = Chat(model, sp=sp)\n",
+ "for o in chat.stream(\"I'm Jeremy\"): print(o, end='')"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "aa466638",
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "According to Douglas Adams, the answer is 42."
+ ]
+ }
+ ],
+ "source": [
+ "for o in chat.stream(q, prefill=pref): print(o, end='')"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "eac28cde",
+ "metadata": {},
+ "source": [
+ "### Chat tool use"
+ ]
+ },
{
"cell_type": "markdown",
"id": "062f7edf",
@@ -1754,21 +1917,24 @@
"outputs": [
{
"data": {
- "text/html": [
- "Okay, let me calculate that for you:\n",
- "- \n",
- "id: msg_013AfSS2Rv1ADeskdMKJXHRS
\n",
- "- content: [{'text': 'Okay, let me calculate that for you:', 'type': 'text'}, {'id': 'toolu_013LuckApSxxrYwCDvych8rs', 'input': {'a': 604542, 'b': 6458932}, 'name': 'sums', 'type': 'tool_use'}]
\n",
- "- model: claude-3-haiku-20240307
\n",
- "- role: assistant
\n",
- "- stop_reason: tool_use
\n",
- "- stop_sequence: None
\n",
- "- type: message
\n",
- "- usage: {'input_tokens': 493, 'output_tokens': 83}\n",
- "
"
+ "text/markdown": [
+ "ToolUseBlock(id='toolu_01Kg1appFFPkxHMvUXewgBky', input={'a': 604542, 'b': 6458932}, name='sums', type='tool_use')\n",
+ "\n",
+ "\n",
+ "\n",
+ "id: msg_01Uv8ujpvg7wYsoSvc9hBoTd\n",
+ "- content: [{'id': 'toolu_01Kg1appFFPkxHMvUXewgBky', 'input': {'a': 604542, 'b': 6458932}, 'name': 'sums', 'type': 'tool_use'}]\n",
+ "- model: claude-3-haiku-20240307\n",
+ "- role: assistant\n",
+ "- stop_reason: tool_use\n",
+ "- stop_sequence: None\n",
+ "- type: message\n",
+ "- usage: {'input_tokens': 418, 'output_tokens': 72}\n",
+ "\n",
+ " "
],
"text/plain": [
- "ToolsBetaMessage(id='msg_013AfSS2Rv1ADeskdMKJXHRS', content=[TextBlock(text='Okay, let me calculate that for you:', type='text'), ToolUseBlock(id='toolu_013LuckApSxxrYwCDvych8rs', input={'a': 604542, 'b': 6458932}, name='sums', type='tool_use')], model='claude-3-haiku-20240307', role='assistant', stop_reason='tool_use', stop_sequence=None, type='message', usage=In: 493; Out: 83; Total: 576)"
+ "ToolsBetaMessage(id='msg_01Uv8ujpvg7wYsoSvc9hBoTd', content=[ToolUseBlock(id='toolu_01Kg1appFFPkxHMvUXewgBky', input={'a': 604542, 'b': 6458932}, name='sums', type='tool_use')], model='claude-3-haiku-20240307', role='assistant', stop_reason='tool_use', stop_sequence=None, type='message', usage=In: 418; Out: 72; Total: 490)"
]
},
"execution_count": null,
@@ -1777,6 +1943,7 @@
}
],
"source": [
+ "chat = Chat(model, sp=sp, tools=[sums])\n",
"r = chat(pr)\n",
"r"
]
@@ -1789,21 +1956,24 @@
"outputs": [
{
"data": {
- "text/html": [
- "The sum of 604,542 and 6,458,932 is 7,063,474.\n",
- "- \n",
- "id: msg_01Ldm99HKbMrVqg9zhM6h7tM
\n",
- "- content: [{'text': 'The sum of 604,542 and 6,458,932 is 7,063,474.', 'type': 'text'}]
\n",
- "- model: claude-3-haiku-20240307
\n",
- "- role: assistant
\n",
- "- stop_reason: end_turn
\n",
- "- stop_sequence: None
\n",
- "- type: message
\n",
- "- usage: {'input_tokens': 590, 'output_tokens': 28}\n",
- "
"
+ "text/markdown": [
+ "The sum of 604542 and 6458932 is 7063474.\n",
+ "\n",
+ "\n",
+ "\n",
+ "id: msg_01AfTpTHU3evB2BW1JH9ufF1\n",
+ "- content: [{'text': 'The sum of 604542 and 6458932 is 7063474.', 'type': 'text'}]\n",
+ "- model: claude-3-haiku-20240307\n",
+ "- role: assistant\n",
+ "- stop_reason: end_turn\n",
+ "- stop_sequence: None\n",
+ "- type: message\n",
+ "- usage: {'input_tokens': 505, 'output_tokens': 23}\n",
+ "\n",
+ " "
],
"text/plain": [
- "ToolsBetaMessage(id='msg_01Ldm99HKbMrVqg9zhM6h7tM', content=[TextBlock(text='The sum of 604,542 and 6,458,932 is 7,063,474.', type='text')], model='claude-3-haiku-20240307', role='assistant', stop_reason='end_turn', stop_sequence=None, type='message', usage=In: 590; Out: 28; Total: 618)"
+ "ToolsBetaMessage(id='msg_01AfTpTHU3evB2BW1JH9ufF1', content=[TextBlock(text='The sum of 604542 and 6458932 is 7063474.', type='text')], model='claude-3-haiku-20240307', role='assistant', stop_reason='end_turn', stop_sequence=None, type='message', usage=In: 505; Out: 23; Total: 528)"
]
},
"execution_count": null,
@@ -1820,7 +1990,7 @@
"id": "b7054a02",
"metadata": {},
"source": [
- "It should be correct, because it actually used our Python function to do the addition..."
+ "It should be correct, because it actually used our Python function to do the addition. Let's check:"
]
},
{
@@ -1846,7 +2016,23 @@
},
{
"cell_type": "markdown",
- "id": "ed39be44",
+ "id": "df1dd4c3",
+ "metadata": {},
+ "source": [
+ "## Images"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "20bdb122",
+ "metadata": {},
+ "source": [
+ "Not done yet."
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "68932d15",
"metadata": {},
"source": [
"## XML helpers"
@@ -1854,7 +2040,7 @@
},
{
"cell_type": "markdown",
- "id": "db4b3f2b",
+ "id": "2489fb45",
"metadata": {},
"source": [
"TODO: Document this bit."
@@ -1863,21 +2049,21 @@
{
"cell_type": "code",
"execution_count": null,
- "id": "c08eb47b",
+ "id": "f8dd5139",
"metadata": {},
"outputs": [],
"source": [
"#| exports\n",
"def hl_md(s, lang='xml'):\n",
" \"Syntax highlight `s` using `lang`.\"\n",
- " if Markdown: return Markdown(f'```{lang}\\n{s}\\n```')\n",
+ " if display: return display.Markdown(f'```{lang}\\n{s}\\n```')\n",
" print(s)"
]
},
{
"cell_type": "code",
"execution_count": null,
- "id": "a9dfca1c",
+ "id": "6ce74be2",
"metadata": {},
"outputs": [],
"source": [
@@ -1899,7 +2085,7 @@
{
"cell_type": "code",
"execution_count": null,
- "id": "545f8e5c",
+ "id": "72b8150c",
"metadata": {},
"outputs": [],
"source": [
@@ -1913,7 +2099,7 @@
{
"cell_type": "code",
"execution_count": null,
- "id": "0cd36f5b",
+ "id": "84d533e7",
"metadata": {},
"outputs": [],
"source": [
@@ -1926,7 +2112,7 @@
{
"cell_type": "code",
"execution_count": null,
- "id": "a2e2cdeb",
+ "id": "8f2664cc",
"metadata": {},
"outputs": [],
"source": [
@@ -1945,7 +2131,7 @@
{
"cell_type": "code",
"execution_count": null,
- "id": "25692697",
+ "id": "80d644f4",
"metadata": {},
"outputs": [
{
@@ -1980,7 +2166,7 @@
{
"cell_type": "code",
"execution_count": null,
- "id": "bf251158",
+ "id": "3e61fbb6",
"metadata": {},
"outputs": [],
"source": [
@@ -2002,7 +2188,7 @@
{
"cell_type": "code",
"execution_count": null,
- "id": "20f3546e",
+ "id": "7d4da1ad",
"metadata": {},
"outputs": [
{
@@ -2029,22 +2215,6 @@
"print(json_to_xml(a, 'person'))"
]
},
- {
- "cell_type": "markdown",
- "id": "df1dd4c3",
- "metadata": {},
- "source": [
- "## Images"
- ]
- },
- {
- "cell_type": "markdown",
- "id": "20bdb122",
- "metadata": {},
- "source": [
- "Not done yet."
- ]
- },
{
"cell_type": "markdown",
"id": "94ec4289",