9
9
2. Method-style for direct tool access: `agent.tool.tool_name(param1="value")`
10
10
"""
11
11
12
+ import asyncio
12
13
import json
13
14
import logging
14
15
import os
15
16
import random
16
17
from concurrent .futures import ThreadPoolExecutor
17
- from typing import Any , AsyncIterator , Callable , Generator , List , Mapping , Optional , Type , TypeVar , Union , cast
18
+ from typing import Any , AsyncGenerator , AsyncIterator , Callable , Mapping , Optional , Type , TypeVar , Union , cast
18
19
19
20
from opentelemetry import trace
20
21
from pydantic import BaseModel
@@ -418,33 +419,43 @@ def __call__(self, prompt: str, **kwargs: Any) -> AgentResult:
418
419
- metrics: Performance metrics from the event loop
419
420
- state: The final state of the event loop
420
421
"""
421
- callback_handler = kwargs .get ("callback_handler" , self .callback_handler )
422
422
423
- self ._start_agent_trace_span (prompt )
423
+ def execute () -> AgentResult :
424
+ return asyncio .run (self .invoke_async (prompt , ** kwargs ))
424
425
425
- try :
426
- events = self ._run_loop (prompt , kwargs )
427
- for event in events :
428
- if "callback" in event :
429
- callback_handler (** event ["callback" ])
426
+ with ThreadPoolExecutor () as executor :
427
+ future = executor .submit (execute )
428
+ return future .result ()
430
429
431
- stop_reason , message , metrics , state = event [ "stop" ]
432
- result = AgentResult ( stop_reason , message , metrics , state )
430
+ async def invoke_async ( self , prompt : str , ** kwargs : Any ) -> AgentResult :
431
+ """Process a natural language prompt through the agent's event loop.
433
432
434
- self ._end_agent_trace_span (response = result )
433
+ This method implements the conversational interface (e.g., `agent("hello!")`). It adds the user's prompt to
434
+ the conversation history, processes it through the model, executes any tool calls, and returns the final result.
435
435
436
- return result
436
+ Args:
437
+ prompt: The natural language prompt from the user.
438
+ **kwargs: Additional parameters to pass through the event loop.
437
439
438
- except Exception as e :
439
- self ._end_agent_trace_span (error = e )
440
- raise
440
+ Returns:
441
+ Result object containing:
442
+
443
+ - stop_reason: Why the event loop stopped (e.g., "end_turn", "max_tokens")
444
+ - message: The final message from the model
445
+ - metrics: Performance metrics from the event loop
446
+ - state: The final state of the event loop
447
+ """
448
+ events = self .stream_async (prompt , ** kwargs )
449
+ async for event in events :
450
+ _ = event
451
+
452
+ return cast (AgentResult , event ["result" ])
441
453
442
454
def structured_output (self , output_model : Type [T ], prompt : Optional [str ] = None ) -> T :
443
455
"""This method allows you to get structured output from the agent.
444
456
445
457
If you pass in a prompt, it will be added to the conversation history and the agent will respond to it.
446
458
If you don't pass in a prompt, it will use only the conversation history to respond.
447
- If no conversation history exists and no prompt is provided, an error will be raised.
448
459
449
460
For smaller models, you may want to use the optional prompt string to add additional instructions to explicitly
450
461
instruct the model to output the structured data.
@@ -453,25 +464,52 @@ def structured_output(self, output_model: Type[T], prompt: Optional[str] = None)
453
464
output_model: The output model (a JSON schema written as a Pydantic BaseModel)
454
465
that the agent will use when responding.
455
466
prompt: The prompt to use for the agent.
467
+
468
+ Raises:
469
+ ValueError: If no conversation history or prompt is provided.
470
+ """
471
+
472
+ def execute () -> T :
473
+ return asyncio .run (self .structured_output_async (output_model , prompt ))
474
+
475
+ with ThreadPoolExecutor () as executor :
476
+ future = executor .submit (execute )
477
+ return future .result ()
478
+
479
+ async def structured_output_async (self , output_model : Type [T ], prompt : Optional [str ] = None ) -> T :
480
+ """This method allows you to get structured output from the agent.
481
+
482
+ If you pass in a prompt, it will be added to the conversation history and the agent will respond to it.
483
+ If you don't pass in a prompt, it will use only the conversation history to respond.
484
+
485
+ For smaller models, you may want to use the optional prompt string to add additional instructions to explicitly
486
+ instruct the model to output the structured data.
487
+
488
+ Args:
489
+ output_model: The output model (a JSON schema written as a Pydantic BaseModel)
490
+ that the agent will use when responding.
491
+ prompt: The prompt to use for the agent.
492
+
493
+ Raises:
494
+ ValueError: If no conversation history or prompt is provided.
456
495
"""
457
496
self ._hooks .invoke_callbacks (StartRequestEvent (agent = self ))
458
497
459
498
try :
460
- messages = self .messages
461
- if not messages and not prompt :
499
+ if not self .messages and not prompt :
462
500
raise ValueError ("No conversation history or prompt provided" )
463
501
464
502
# add the prompt as the last message
465
503
if prompt :
466
- messages .append ({"role" : "user" , "content" : [{"text" : prompt }]})
504
+ self . messages .append ({"role" : "user" , "content" : [{"text" : prompt }]})
467
505
468
- # get the structured output from the model
469
- events = self .model .structured_output (output_model , messages )
470
- for event in events :
506
+ events = self .model .structured_output (output_model , self .messages )
507
+ async for event in events :
471
508
if "callback" in event :
472
509
self .callback_handler (** cast (dict , event ["callback" ]))
473
510
474
511
return event ["output" ]
512
+
475
513
finally :
476
514
self ._hooks .invoke_callbacks (EndRequestEvent (agent = self ))
477
515
@@ -511,21 +549,22 @@ async def stream_async(self, prompt: str, **kwargs: Any) -> AsyncIterator[Any]:
511
549
512
550
try :
513
551
events = self ._run_loop (prompt , kwargs )
514
- for event in events :
552
+ async for event in events :
515
553
if "callback" in event :
516
554
callback_handler (** event ["callback" ])
517
555
yield event ["callback" ]
518
556
519
- stop_reason , message , metrics , state = event ["stop" ]
520
- result = AgentResult (stop_reason , message , metrics , state )
557
+ result = AgentResult (* event ["stop" ])
558
+ callback_handler (result = result )
559
+ yield {"result" : result }
521
560
522
561
self ._end_agent_trace_span (response = result )
523
562
524
563
except Exception as e :
525
564
self ._end_agent_trace_span (error = e )
526
565
raise
527
566
528
- def _run_loop (self , prompt : str , kwargs : dict [str , Any ]) -> Generator [dict [str , Any ], None , None ]:
567
+ async def _run_loop (self , prompt : str , kwargs : dict [str , Any ]) -> AsyncGenerator [dict [str , Any ], None ]:
529
568
"""Execute the agent's event loop with the given prompt and parameters."""
530
569
self ._hooks .invoke_callbacks (StartRequestEvent (agent = self ))
531
570
@@ -539,13 +578,15 @@ def _run_loop(self, prompt: str, kwargs: dict[str, Any]) -> Generator[dict[str,
539
578
self .messages .append (new_message )
540
579
541
580
# Execute the event loop cycle with retry logic for context limits
542
- yield from self ._execute_event_loop_cycle (kwargs )
581
+ events = self ._execute_event_loop_cycle (kwargs )
582
+ async for event in events :
583
+ yield event
543
584
544
585
finally :
545
586
self .conversation_manager .apply_management (self )
546
587
self ._hooks .invoke_callbacks (EndRequestEvent (agent = self ))
547
588
548
- def _execute_event_loop_cycle (self , kwargs : dict [str , Any ]) -> Generator [dict [str , Any ], None , None ]:
589
+ async def _execute_event_loop_cycle (self , kwargs : dict [str , Any ]) -> AsyncGenerator [dict [str , Any ], None ]:
549
590
"""Execute the event loop cycle with retry logic for context window limits.
550
591
551
592
This internal method handles the execution of the event loop cycle and implements
@@ -583,7 +624,7 @@ def _execute_event_loop_cycle(self, kwargs: dict[str, Any]) -> Generator[dict[st
583
624
584
625
try :
585
626
# Execute the main event loop cycle
586
- yield from event_loop_cycle (
627
+ events = event_loop_cycle (
587
628
model = self .model ,
588
629
system_prompt = self .system_prompt ,
589
630
messages = self .messages , # will be modified by event_loop_cycle
@@ -594,11 +635,15 @@ def _execute_event_loop_cycle(self, kwargs: dict[str, Any]) -> Generator[dict[st
594
635
event_loop_parent_span = self .trace_span ,
595
636
kwargs = kwargs ,
596
637
)
638
+ async for event in events :
639
+ yield event
597
640
598
641
except ContextWindowOverflowException as e :
599
642
# Try reducing the context size and retrying
600
643
self .conversation_manager .reduce_context (self , e = e )
601
- yield from self ._execute_event_loop_cycle (kwargs )
644
+ events = self ._execute_event_loop_cycle (kwargs )
645
+ async for event in events :
646
+ yield event
602
647
603
648
def _record_tool_execution (
604
649
self ,
@@ -623,7 +668,7 @@ def _record_tool_execution(
623
668
messages: The message history to append to.
624
669
"""
625
670
# Create user message describing the tool call
626
- user_msg_content : List [ContentBlock ] = [
671
+ user_msg_content : list [ContentBlock ] = [
627
672
{"text" : (f"agent.tool.{ tool ['name' ]} direct tool call.\n Input parameters: { json .dumps (tool ['input' ])} \n " )}
628
673
]
629
674
0 commit comments