You signed in with another tab or window. Reload to refresh your session.You signed out in another tab or window. Reload to refresh your session.You switched accounts on another tab or window. Reload to refresh your session.Dismiss alert
I wanted to continue a chat even after a function had generated an exception.
What I Did
importchatlabdefadd_two_numbers(a: float, b: float) ->float:
"""Add two numbers together. Raises an exception when the numbers are in the wrong order."""ifb<a:
returna+braiseException("I can't do math")
chat=chatlab.Chat(model=chatlab.models.GPT_4_0125_PREVIEW, chat_functions=[add_two_numbers])
awaitchat("Please add 1 + 2 for me")
This generates the exception:
---------------------------------------------------------------------------
Exception Traceback (most recent call last)
Cell In[11], line 10
7 raise Exception("I can't do math")
9 chat = chatlab.Chat(model=chatlab.models.GPT_4_0125_PREVIEW, chat_functions=[add_two_numbers])
---> 10 await chat("Please add 1 + 2 for me")
File ~/Wharf/src/chatlab/chatlab/chat.py:125, in Chat.__call__(self, stream, *messages, **kwargs)
123 async def __call__(self, *messages: Union[ChatCompletionMessageParam, str], stream=True, **kwargs):
124 """Send messages to the chat model and display the response."""
--> 125 return await self.submit(*messages, stream=stream, **kwargs)
File ~/Wharf/src/chatlab/chatlab/chat.py:350, in Chat.submit(self, stream, *messages, **kwargs)
347 self.append(assistant_tool_calls(tool_arguments))
348 for tool_argument in tool_arguments:
349 # Oh crap I need to append the big assistant call of it too. May have to assume we've done it by here.
--> 350 function_called = await tool_argument.call(self.function_registry)
351 # TODO: Format the tool message
352 self.append(function_called.get_tool_called_message())
File ~/Wharf/src/chatlab/chatlab/views/tools.py:146, in ToolArguments.call(self, function_registry)
144 # Execute the function and get the result
145 try:
--> 146 output = await function_registry.call(function_name, function_args)
147 except FunctionArgumentError as e:
148 self.finished = True
File ~/Wharf/src/chatlab/chatlab/registry.py:474, in FunctionRegistry.call(self, name, arguments)
472 result = await function(**prepared_arguments)
473 else:
--> 474 result = function(**prepared_arguments)
475 return result
Cell In[11], line 7, in add_two_numbers(a, b)
5 if b < a:
6 return a + b
----> 7 raise Exception("I can't do math")
Exception: I can't do math
and all future calls to the chat generate a 400 error code from OpenAI:
awaitchat("what went wrong there?")
---------------------------------------------------------------------------BadRequestErrorTraceback (mostrecentcalllast)
CellIn[10], line1---->1awaitchat("what went wrong there?")
File~/Wharf/src/chatlab/chatlab/chat.py:125, inChat.__call__(self, stream, *messages, **kwargs)
123asyncdef__call__(self, *messages: Union[ChatCompletionMessageParam, str], stream=True, **kwargs):
124"""Send messages to the chat model and display the response."""-->125returnawaitself.submit(*messages, stream=stream, **kwargs)
File~/Wharf/src/chatlab/chatlab/chat.py:302, inChat.submit(self, stream, *messages, **kwargs)
299# Due to the strict response typing based on `Literal` typing on `stream`, we have to process these300# two cases separately301ifstream:
-->302streaming_response=awaitclient.chat.completions.create(
303**chat_create_kwargs,
304stream=True,
305 )
307self.append(*messages)
309finish_reason, function_call_request, tool_arguments=awaitself.__process_stream(streaming_response)
File~/.pyenv/versions/3.12.1/lib/python3.12/site-packages/openai/resources/chat/completions.py:1291, inAsyncCompletions.create(self, messages, model, frequency_penalty, function_call, functions, logit_bias, logprobs, max_tokens, n, presence_penalty, response_format, seed, stop, stream, temperature, tool_choice, tools, top_logprobs, top_p, user, extra_headers, extra_query, extra_body, timeout)
1242 @required_args(["messages", "model"], ["messages", "model", "stream"])
1243asyncdefcreate(
1244self,
(...)
1289timeout: float|httpx.Timeout|None|NotGiven=NOT_GIVEN,
1290 ) ->ChatCompletion|AsyncStream[ChatCompletionChunk]:
->1291returnawaitself._post(
1292"/chat/completions",
1293body=maybe_transform(
1294 {
1295"messages": messages,
1296"model": model,
1297"frequency_penalty": frequency_penalty,
1298"function_call": function_call,
1299"functions": functions,
1300"logit_bias": logit_bias,
1301"logprobs": logprobs,
1302"max_tokens": max_tokens,
1303"n": n,
1304"presence_penalty": presence_penalty,
1305"response_format": response_format,
1306"seed": seed,
1307"stop": stop,
1308"stream": stream,
1309"temperature": temperature,
1310"tool_choice": tool_choice,
1311"tools": tools,
1312"top_logprobs": top_logprobs,
1313"top_p": top_p,
1314"user": user,
1315 },
1316completion_create_params.CompletionCreateParams,
1317 ),
1318options=make_request_options(
1319extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout1320 ),
1321cast_to=ChatCompletion,
1322stream=streamorFalse,
1323stream_cls=AsyncStream[ChatCompletionChunk],
1324 )
File~/.pyenv/versions/3.12.1/lib/python3.12/site-packages/openai/_base_client.py:1578, inAsyncAPIClient.post(self, path, cast_to, body, files, options, stream, stream_cls)
1564asyncdefpost(
1565self,
1566path: str,
(...)
1573stream_cls: type[_AsyncStreamT] |None=None,
1574 ) ->ResponseT|_AsyncStreamT:
1575opts=FinalRequestOptions.construct(
1576method="post", url=path, json_data=body, files=awaitasync_to_httpx_files(files), **options1577 )
->1578returnawaitself.request(cast_to, opts, stream=stream, stream_cls=stream_cls)
File~/.pyenv/versions/3.12.1/lib/python3.12/site-packages/openai/_base_client.py:1339, inAsyncAPIClient.request(self, cast_to, options, stream, stream_cls, remaining_retries)
1330asyncdefrequest(
1331self,
1332cast_to: Type[ResponseT],
(...)
1337remaining_retries: Optional[int] =None,
1338 ) ->ResponseT|_AsyncStreamT:
->1339returnawaitself._request(
1340cast_to=cast_to,
1341options=options,
1342stream=stream,
1343stream_cls=stream_cls,
1344remaining_retries=remaining_retries,
1345 )
File~/.pyenv/versions/3.12.1/lib/python3.12/site-packages/openai/_base_client.py:1429, inAsyncAPIClient._request(self, cast_to, options, stream, stream_cls, remaining_retries)
1426awaiterr.response.aread()
1428log.debug("Re-raising status error")
->1429raiseself._make_status_error_from_response(err.response) fromNone1431returnself._process_response(
1432cast_to=cast_to,
1433options=options,
(...)
1436stream_cls=stream_cls,
1437 )
BadRequestError: Errorcode: 400- {'error': {'message': "An assistant message with 'tool_calls' must be followed by tool messages responding to each 'tool_call_id'. The following tool_call_ids did not have response messages: call_KTJddJ7ScPS972aOPs2Owwdl", 'type': 'invalid_request_error', 'param': 'messages.[2].role', 'code': None}}
Ideally, the exception result would be added to the message log, and allow the chat to continue. (And perhaps even allow the model to try a fix for the exception...)
The text was updated successfully, but these errors were encountered:
The @chatlab.expose_exception_to_llm decorator fixes this, but perhaps the default should be to expose all exceptions to the LLM, and have a decorator that hides them?
The @chatlab.expose_exception_to_llm decorator fixes this, but perhaps the default should be to expose all exceptions to the LLM, and have a decorator that hides them?
Description
I wanted to continue a chat even after a function had generated an exception.
What I Did
This generates the exception:
and all future calls to the chat generate a 400 error code from OpenAI:
Ideally, the exception result would be added to the message log, and allow the chat to continue. (And perhaps even allow the model to try a fix for the exception...)
The text was updated successfully, but these errors were encountered: