Skip to content

Commit

Permalink
Make it work
Browse files Browse the repository at this point in the history
  • Loading branch information
aymeric-roucher committed Feb 3, 2025
1 parent aafa5ae commit 9294c9f
Show file tree
Hide file tree
Showing 6 changed files with 18 additions and 21 deletions.
10 changes: 3 additions & 7 deletions docs/source/en/reference/models.md
Original file line number Diff line number Diff line change
Expand Up @@ -61,7 +61,7 @@ from smolagents import TransformersModel

model = TransformersModel(model_id="HuggingFaceTB/SmolLM-135M-Instruct")

print(model([{"role": "user", "content": "Ok!"}], stop_sequences=["great"]))
print(model([{"role": "user", "content": [{"type": "text", "text": "Ok!"}]}], stop_sequences=["great"]))
```
```text
>>> What a
Expand All @@ -80,9 +80,7 @@ The `HfApiModel` wraps huggingface_hub's [InferenceClient](https://huggingface.c
from smolagents import HfApiModel

messages = [
{"role": "user", "content": "Hello, how are you?"},
{"role": "assistant", "content": "I'm doing great. How can I help you today?"},
{"role": "user", "content": "No need to help, take it easy."},
{"role": "user", "content": [{"type": "text", "text": "Hello, how are you?"}]}
]

model = HfApiModel()
Expand All @@ -102,9 +100,7 @@ You can pass kwargs upon model initialization that will then be used whenever us
from smolagents import LiteLLMModel

messages = [
{"role": "user", "content": "Hello, how are you?"},
{"role": "assistant", "content": "I'm doing great. How can I help you today?"},
{"role": "user", "content": "No need to help, take it easy."},
{"role": "user", "content": [{"type": "text", "text": "Hello, how are you?"}]}
]

model = LiteLLMModel("anthropic/claude-3-5-sonnet-latest", temperature=0.2, max_tokens=10)
Expand Down
2 changes: 1 addition & 1 deletion examples/e2b_example.py
Original file line number Diff line number Diff line change
Expand Up @@ -42,7 +42,7 @@ def forward(self):
)

agent.run(
"Return me an image of a cat. Directly use the image provided in your state.",
"Calculate how much is 2+2, then return me an image of a cat. Directly use the image provided in your state.",
additional_args={"cat_image": get_cat_image()},
) # Asking to directly return the image from state tests that additional_args are properly sent to server.

Expand Down
File renamed without changes.
15 changes: 6 additions & 9 deletions src/smolagents/agents.py
Original file line number Diff line number Diff line change
Expand Up @@ -58,6 +58,7 @@
from .monitoring import Monitor
from .prompts import (
CODE_SYSTEM_PROMPT,
MANAGED_AGENT_PROMPT,
PLAN_UPDATE_FINAL_PLAN_REDACTION,
SYSTEM_PROMPT_FACTS,
SYSTEM_PROMPT_FACTS_UPDATE,
Expand Down Expand Up @@ -179,7 +180,7 @@ def __init__(
self.state = {}
self.name = name
self.description = description
self.managed_agent_prompt = managed_agent_prompt
self.managed_agent_prompt = managed_agent_prompt if managed_agent_prompt else MANAGED_AGENT_PROMPT

self.managed_agents = {}
if managed_agents is not None:
Expand Down Expand Up @@ -652,17 +653,13 @@ def replay(self, detailed: bool = False):

def __call__(self, request, provide_run_summary=False, **kwargs):
"""Adds additional prompting for the managed agent, and runs it."""
full_task = self.managed_agent_prompt.format(name=self.name, task=request)
if self.additional_prompting:
full_task = full_task.replace("\n{additional_prompting}", self.additional_prompting).strip()
else:
full_task = full_task.replace("\n{additional_prompting}", "").strip()
output = self.agent.run(full_task, **kwargs)
full_task = self.managed_agent_prompt.format(name=self.name, task=request).strip()
output = self.run(full_task, **kwargs)
if provide_run_summary:
answer = f"Here is the final answer from your managed agent '{self.name}':\n"
answer += str(output)
answer += f"\n\nFor more detail, find below a summary of this agent's work:\nSUMMARY OF WORK FROM AGENT '{self.name}':\n"
for message in self.agent.write_memory_to_messages(summary_mode=True):
for message in self.write_memory_to_messages(summary_mode=True):
content = message["content"]
answer += "\n" + truncate_content(str(content)) + "\n---"
answer += f"\nEND OF SUMMARY OF WORK FROM AGENT '{self.name}'."
Expand Down Expand Up @@ -928,7 +925,7 @@ def step(self, memory_step: ActionStep) -> Union[None, Any]:
]
observation = "Execution logs:\n" + execution_logs
except Exception as e:
if "print_outputs" in self.python_executor.state:
if hasattr(self.python_executor, "state") and "print_outputs" in self.python_executor.state:
execution_logs = self.python_executor.state["print_outputs"]
if len(execution_logs) > 0:
execution_outputs_console = [
Expand Down
10 changes: 7 additions & 3 deletions src/smolagents/e2b_executor.py
Original file line number Diff line number Diff line change
Expand Up @@ -45,9 +45,11 @@ def __init__(self, additional_imports: List[str], tools: List[Tool], logger):
"""Please install 'e2b' extra to use E2BExecutor: `pip install "smolagents[e2b]"`"""
)

self.logger.log("Initializing E2B executor, hold on...")

self.custom_tools = {}
self.final_answer = False
self.final_answer_pattern = re.compile(r"^final_answer\((.*)\)$")
self.final_answer_pattern = re.compile(r"final_answer\((.*?)\)")
self.sbx = Sandbox() # "qywp2ctmu2q7jzprcf4j")
# TODO: validate installing agents package or not
# print("Installing agents package on remote executor...")
Expand Down Expand Up @@ -90,7 +92,7 @@ def forward(self, *args, **kwargs):
self.logger.log(tool_definition_execution.logs)

def run_code_raise_errors(self, code: str):
if self.final_answer_pattern.match(code):
if self.final_answer_pattern.search(code) is not None:
self.final_answer = True
execution = self.sbx.run_code(
code,
Expand Down Expand Up @@ -152,7 +154,9 @@ def __call__(self, code_action: str, additional_args: dict) -> Tuple[Any, Any]:
]:
if getattr(result, attribute_name) is not None:
return getattr(result, attribute_name), execution_logs, self.final_answer
raise ValueError("No main result returned by executor!")
if self.final_answer:
raise ValueError("No main result returned by executor!")
return None, execution_logs, False


__all__ = ["E2BExecutor"]
2 changes: 1 addition & 1 deletion src/smolagents/prompts.py
Original file line number Diff line number Diff line change
Expand Up @@ -510,7 +510,7 @@
Put all these in your final_answer tool, everything that you do not pass as an argument to final_answer will be lost.
And even if your task resolution is not successful, please return as much context as possible, so that your manager can act upon this feedback.
{{additional_prompting}}"""
"""

__all__ = [
"USER_PROMPT_PLAN_UPDATE",
Expand Down

0 comments on commit 9294c9f

Please sign in to comment.