-
Notifications
You must be signed in to change notification settings - Fork 58
/
Copy pathmulti_tool_calling.py
85 lines (60 loc) · 2.48 KB
/
multi_tool_calling.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
from llama_index.core.llms import ChatMessage
from llama_index.core.tools import FunctionTool
from llama_index.llms.openai import OpenAI
from opentelemetry.exporter.otlp.proto.http.trace_exporter import (
OTLPSpanExporter as HTTPSpanExporter,
)
from opentelemetry.sdk import trace as trace_sdk
from opentelemetry.sdk.trace.export import SimpleSpanProcessor
from pydantic import BaseModel
from openinference.instrumentation.llama_index import LlamaIndexInstrumentor
# Add Phoenix
span_phoenix_processor = SimpleSpanProcessor(
HTTPSpanExporter(endpoint="http://localhost:6006/v1/traces")
)
# Add them to the tracer
tracer_provider = trace_sdk.TracerProvider()
tracer_provider.add_span_processor(span_processor=span_phoenix_processor)
# Instrument the application
LlamaIndexInstrumentor().instrument(tracer_provider=tracer_provider)
class Song(BaseModel):
"""A song with name and artist"""
name: str
artist: str
class SongList(BaseModel):
"""A list of song names"""
songs: list[str]
def generate_song(name: str, artist: str) -> Song:
"""Generates a song with provided name and artist."""
return Song(name=name, artist=artist)
def process_song_list(songs: list[str]) -> SongList:
"""Processes a list of song names."""
return SongList(songs=songs)
tool = FunctionTool.from_defaults(fn=generate_song)
list_tool = FunctionTool.from_defaults(fn=process_song_list)
chat_history = [ChatMessage(role="user", content="Generate five songs from the Beatles")]
llm = OpenAI(model="gpt-4o-mini")
resp = llm.chat_with_tools(
[tool, list_tool],
chat_history=chat_history,
)
tools_by_name = {t.metadata.name: t for t in [tool, list_tool]}
tool_calls = llm.get_tool_calls_from_response(resp, error_on_no_tool_call=False)
while tool_calls:
# add the LLM's response to the chat history
chat_history.append(resp.message)
for tool_call in tool_calls:
tool_name = tool_call.tool_name
tool_kwargs = tool_call.tool_kwargs
print(f"Calling {tool_name} with {tool_kwargs}")
tool_output = tools_by_name[tool_name](**tool_kwargs)
chat_history.append(
ChatMessage(
role="tool",
content=str(tool_output),
additional_kwargs={"tool_call_id": tool_call.tool_id},
)
)
resp = llm.chat_with_tools([tool, list_tool], chat_history=chat_history)
tool_calls = llm.get_tool_calls_from_response(resp, error_on_no_tool_call=False)
print(resp.message.content)