Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Node names and other stuff #38

Merged
merged 12 commits into from
Oct 2, 2024
40 changes: 15 additions & 25 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -43,38 +43,28 @@ make setup

The simplest agent just to show the basic structure of the agent:
```python

from tapeagents.agent import Agent
from tapeagents.agent import Agent, Node
from tapeagents.core import Prompt
from tapeagents.dialog_tape import AssistantStep, DialogTape, SystemStep, UserStep
from tapeagents.dialog_tape import AssistantStep, UserStep, DialogTape
from tapeagents.llms import LLMStream, LiteLLM
from tapeagents.prompting import tape_to_messages

llm = LiteLLM(model_name="gpt-4o-mini")


class MyFirstAgent(Agent[DialogTape]):
def make_prompt(self, tape: DialogTape) -> Prompt:
"""
Render tape into the prompt, each step is converted into a message
"""
return Prompt(messages=tape.as_prompt_messages())
class MainNode(Node):
def make_prompt(self, agent: Agent, tape: DialogTape) -> Prompt:
# Render the whole tape into the prompt, each step is converted to message
return Prompt(messages=tape_to_messages(tape))

def generate_steps(self, tape: DialogTape, llm_stream: LLMStream):
"""
Generate single tape step from the LLM output messages stream.
"""
def generate_steps(self, agent: Agent, tape: DialogTape, llm_stream: LLMStream):
# Generate single tape step from the LLM output messages stream.
yield AssistantStep(content=llm_stream.get_text())

llm = LiteLLM(model_name="gpt-4o-mini-2024-07-18")
agent = MyFirstAgent.create(llm)

# Tape is a sequence of steps that contains all the interactions between the user and the agent happened during the session.
# Let's provide the agent with the description of the task and the first step to start the conversation.
start_tape = DialogTape(
steps=[
SystemStep(content="Respond to the user using the style of Shakespeare books. Be very brief, 50 words max."),
UserStep(content="Hello, tell me about Montreal."),
],
)
final_tape = agent.run(start_tape).get_final_tape()

agent = Agent[DialogTape].create(llm, nodes=[MainNode()])
start_tape = DialogTape(steps=[UserStep(content="Tell me about Montreal in 3 sentences")])
final_tape = agent.run(start_tape).get_final_tape() # agent will start executing the first node
print(f"Final tape: {final_tape.model_dump_json(indent=2)}")
```

Expand Down
8 changes: 0 additions & 8 deletions example.env

This file was deleted.

19 changes: 8 additions & 11 deletions examples/agent.py
Original file line number Diff line number Diff line change
@@ -1,9 +1,10 @@
import sys

from tapeagents.agent import Agent, Node
from tapeagents.core import SetNextNode, Prompt, Tape
from tapeagents.core import Prompt, SetNextNode, Tape
from tapeagents.dialog_tape import AssistantStep, AssistantThought, DialogTape, UserStep
from tapeagents.llms import TrainableLLM, LLM, LLMStream
from tapeagents.llms import LLM, LLMStream, TrainableLLM
from tapeagents.prompting import prompt_with_guidance, tape_to_messages


def hello_world(llm: LLM):
Expand Down Expand Up @@ -50,7 +51,9 @@ def router(agent: Agent, tape: Tape, llm_stream: LLMStream):
nodes=[
Node(name="router").with_generate_steps(router),
Node(name="go_left").with_fixed_steps([AssistantStep(content="You went left!"), SetNextNode(next_node=0)]),
Node(name="go_right").with_fixed_steps([AssistantStep(content="You went right!"), SetNextNode(next_node=0)]),
Node(name="go_right").with_fixed_steps(
[AssistantStep(content="You went right!"), SetNextNode(next_node=0)]
),
Node(name="something_else").with_fixed_steps(
[AssistantStep(content="What do you mean?"), SetNextNode(next_node=0)]
),
Expand All @@ -67,21 +70,15 @@ def router(agent: Agent, tape: Tape, llm_stream: LLMStream):

def classy_hello_world(llm: LLM):
class ThinkingNode(Node):
name: str = "think"

def make_prompt(self, agent, tape: Tape) -> Prompt:
messages = tape.steps + [UserStep(content="Describe how Shakespeare would say hello world")]
return Prompt(messages=[m.llm_dict() for m in messages])
return prompt_with_guidance(tape, "Describe how Shakespeare would say hello world")

def generate_steps(self, agent, tape: Tape, llm_stream: LLMStream):
yield AssistantThought(content=llm_stream.get_text())

class RespondingNode(Node):
name: str = "respond"

def make_prompt(self, agent, tape: Tape) -> Prompt:
messages = tape.steps + [UserStep(content="Respond with the hello world in the described style")]
return Prompt(messages=[m.llm_dict() for m in messages])
return prompt_with_guidance(tape, "Respond with the hello world in the described style")

def generate_steps(self, agent, tape: Tape, llm_stream: LLMStream):
yield AssistantStep(content=llm_stream.get_text())
Expand Down
46 changes: 22 additions & 24 deletions examples/delegate_stack.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,15 +6,15 @@
from pydantic import Field

from tapeagents.agent import Agent
from tapeagents.chain import Chain
from tapeagents.chain import CallSubagent, Chain
from tapeagents.core import (
Action,
FinalStep,
Prompt,
Tape,
Thought,
)
from tapeagents.llms import TrainableLLM, LLM, LLMStream
from tapeagents.llms import LLM, LLMStream, TrainableLLM
from tapeagents.view import Call, Respond, TapeViewStack

EXAMPLE_TEXT = """I am a text with some verbs like running, jumping, and swimming."""
Expand Down Expand Up @@ -142,20 +142,19 @@ class Linguist(Chain[ExampleTape]):
def create(cls, llm: LLM): # type: ignore
return super().create(
llms=llm,
subagents_with_inputs=[
(FindNouns.create(llms=llm, templates=FIND_NOUNS_MESSAGE), ()),
(
Chain.create(
nodes=[
CallSubagent(agent=FindNouns.create(llms=llm, templates=FIND_NOUNS_MESSAGE)),
CallSubagent(
agent=Chain.create(
name="FindIrregularVerbs",
subagents_with_inputs=[
(FindVerbs.create(llm, templates=FIND_VERBS_MESSAGE), ()),
(
FilterIrregular.create(llm, templates=FILTER_IRREGULAR_MESSAGE),
(-1,),
nodes=[
CallSubagent(agent=FindVerbs.create(llm, templates=FIND_VERBS_MESSAGE)),
CallSubagent(
agent=FilterIrregular.create(llm, templates=FILTER_IRREGULAR_MESSAGE),
inputs=(-1,),
),
],
),
(),
),
],
templates=PRESENT_RESULTS_MESSAGE,
Expand Down Expand Up @@ -206,24 +205,23 @@ def make_analyze_text_chain(llm: LLM):
"""
return Chain.create(
name="Linguist",
subagents_with_inputs=[
(FindNouns.create(llms=llm, templates=FIND_NOUNS_MESSAGE), ()),
(
Chain.create(
nodes=[
CallSubagent(agent=FindNouns.create(llms=llm, templates=FIND_NOUNS_MESSAGE)),
CallSubagent(
agent=Chain.create(
name="FindIrregularVerbs",
subagents_with_inputs=[
(FindVerbs.create(llm, templates=FIND_VERBS_MESSAGE), ()),
(
FilterIrregular.create(llm, templates=FILTER_IRREGULAR_MESSAGE),
(-1,),
nodes=[
CallSubagent(agent=FindVerbs.create(llm, templates=FIND_VERBS_MESSAGE)),
CallSubagent(
agent=FilterIrregular.create(llm, templates=FILTER_IRREGULAR_MESSAGE),
inputs=(-1,),
),
],
),
(),
),
(PresentAnalysis.create(llm, templates=PRESENT_RESULTS_MESSAGE), (-2, -1)),
CallSubagent(agent=PresentAnalysis.create(llm, templates=PRESENT_RESULTS_MESSAGE), inputs=(-2, -1)),
],
) # type: ignore
)


def main():
Expand Down
33 changes: 0 additions & 33 deletions examples/first.py

This file was deleted.

23 changes: 23 additions & 0 deletions examples/hello_world.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,23 @@
from tapeagents.agent import Agent, Node
from tapeagents.core import Prompt
from tapeagents.dialog_tape import AssistantStep, DialogTape, UserStep
from tapeagents.llms import LiteLLM, LLMStream
from tapeagents.prompting import tape_to_messages

llm = LiteLLM(model_name="gpt-4o-mini")


class MainNode(Node):
def make_prompt(self, agent: Agent, tape: DialogTape) -> Prompt:
# Render the whole tape into the prompt, each step is converted to message
return Prompt(messages=tape_to_messages(tape))

def generate_steps(self, agent: Agent, tape: DialogTape, llm_stream: LLMStream):
# Generate single tape step from the LLM output messages stream.
yield AssistantStep(content=llm_stream.get_text())


agent = Agent[DialogTape].create(llm, nodes=[MainNode()])
start_tape = DialogTape(steps=[UserStep(content="Tell me about Montreal in 3 sentences")])
final_tape = agent.run(start_tape).get_final_tape() # agent will start executing the first node
print(f"Final tape: {final_tape.model_dump_json(indent=2)}")
Loading