Skip to content

Commit

Permalink
v2.5.5
Browse files Browse the repository at this point in the history
  • Loading branch information
ashpreetbedi committed Oct 17, 2024
1 parent 87388fb commit 05fbef1
Show file tree
Hide file tree
Showing 11 changed files with 228 additions and 32 deletions.
1 change: 1 addition & 0 deletions cookbook/agents_101/.gitignore
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
tmp
60 changes: 60 additions & 0 deletions cookbook/agents_101/README.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,60 @@
# Agents 101

> Note: Fork and clone this repository if needed
### 1. Create and activate a virtual environment

```shell
python3 -m venv ~/.venvs/aienv
source ~/.venvs/aienv/bin/activate
```

### 2. Export your `OPENAI_API_KEY`

```shell
export OPENAI_API_KEY=***
```

### 3. Install libraries

```shell
pip install -U openai duckduckgo-search duckdb yfinance lancedb tantivy pypdf sqlalchemy 'fastapi[standard]' phidata
```

### 4. Web Search Agent

```shell
python cookbook/agents_101/web_search.py
```

### 5. Web Reader Agent

```shell
python cookbook/agents_101/web_reader.py
```

### 5. Finance Agent

```shell
python cookbook/agents_101/finance_agent.py
```

### 6. RAG Agent

```shell
python cookbook/agents_101/rag_agent.py
```

### 7. Playground

Authenticate with phidata.app

```
phi auth
```

Run the playground

```shell
python cookbook/agents_101/playground.py
```
Empty file added cookbook/agents_101/__init__.py
Empty file.
16 changes: 16 additions & 0 deletions cookbook/agents_101/finance_agent.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,16 @@
"""Run `pip install yfinance` to install dependencies."""

from phi.agent import Agent
from phi.model.openai import OpenAIChat
from phi.tools.yfinance import YFinanceTools

finance_agent = Agent(
name="Finance Agent",
role="Get financial data",
model=OpenAIChat(id="gpt-4o"),
tools=[YFinanceTools(stock_price=True, analyst_recommendations=True, company_info=True, company_news=True)],
instructions=["Always use tables to display data"],
markdown=True,
show_tool_calls=True,
)
finance_agent.print_response("Share analyst recommendations for NVDA and provide a recommendation", stream=True)
41 changes: 41 additions & 0 deletions cookbook/agents_101/playground.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,41 @@
from phi.agent import Agent
from phi.model.openai import OpenAIChat
from phi.tools.duckduckgo import DuckDuckGo
from phi.tools.yfinance import YFinanceTools
from phi.storage.agent.sqlite import SqlAgentStorage
from phi.playground import Playground, serve_playground_app

web_agent = Agent(
name="Web Agent",
agent_id="web_agent",
role="Search the web for information",
model=OpenAIChat(id="gpt-4o"),
tools=[DuckDuckGo()],
instructions=["Always include sources"],
storage=SqlAgentStorage(table_name="web_agent_sessions", db_file="tmp/agents.db"),
markdown=True,
)

finance_agent = Agent(
name="Finance Agent",
agent_id="finance_agent",
role="Get financial data",
model=OpenAIChat(id="gpt-4o"),
tools=[YFinanceTools(stock_price=True, analyst_recommendations=True, company_info=True, company_news=True)],
instructions=["Always use tables to display data"],
storage=SqlAgentStorage(table_name="finance_agent_sessions", db_file="tmp/agents.db"),
markdown=True,
)

agent_team = Agent(
name="Agent Team",
agent_id="agent_team",
team=[web_agent, finance_agent],
storage=SqlAgentStorage(table_name="agent_team_sessions", db_file="tmp/agents.db"),
markdown=True,
)

app = Playground(agents=[finance_agent, web_agent, agent_team]).get_app()

if __name__ == "__main__":
serve_playground_app("playground:app", reload=True)
25 changes: 25 additions & 0 deletions cookbook/agents_101/rag_agent.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,25 @@
"""Run `pip install openai lancedb tantivy` to install dependencies."""

from phi.agent import Agent
from phi.model.openai import OpenAIChat
from phi.knowledge.pdf import PDFUrlKnowledgeBase
from phi.vectordb.lancedb import LanceDb, SearchType

db_uri = "tmp/lancedb"
knowledge_base = PDFUrlKnowledgeBase(
urls=["https://phi-public.s3.amazonaws.com/recipes/ThaiRecipes.pdf"],
vector_db=LanceDb(table_name="recipes", uri=db_uri, search_type=SearchType.vector),
)
# Load the knowledge base: Comment out after first run
knowledge_base.load(upsert=True)

agent = Agent(
model=OpenAIChat(id="gpt-4o"),
knowledge=knowledge_base,
# Add a tool to read chat history.
read_chat_history=True,
show_tool_calls=True,
markdown=True,
# debug_mode=True,
)
agent.print_response("How do I make chicken and galangal in coconut milk soup", stream=True)
13 changes: 13 additions & 0 deletions cookbook/agents_101/sqlite_storage.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,13 @@
"""Run `pip install duckduckgo-search sqlalchemy openai` to install dependencies."""

from phi.agent import Agent
from phi.tools.duckduckgo import DuckDuckGo
from phi.storage.agent.sqlite import SqlAgentStorage

agent = Agent(
storage=SqlAgentStorage(table_name="agent_runs", db_file="tmp/data.db"),
tools=[DuckDuckGo()],
add_history_to_messages=True,
)
agent.print_response("How many people live in Canada?")
agent.print_response("What is their national anthem called?")
17 changes: 17 additions & 0 deletions cookbook/agents_101/web_search.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,17 @@
"""Run `pip install openai duckduckgo-search` to install dependencies."""

from phi.agent import Agent
from phi.model.openai import OpenAIChat
from phi.tools.duckduckgo import DuckDuckGo

web_agent = Agent(
name="Web Agent",
role="Search the web for information",
model=OpenAIChat(id="gpt-4o"),
tools=[DuckDuckGo()],
instructions=["Always include sources"],
markdown=True,
show_tool_calls=True,
add_datetime_to_instructions=True,
)
web_agent.print_response("Write a report on the US election", stream=True)
32 changes: 18 additions & 14 deletions phi/agent/agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -2568,7 +2568,7 @@ def print_response(
_response_content += resp.content
if resp.extra_data is not None and resp.extra_data.reasoning_steps is not None:
reasoning_steps = resp.extra_data.reasoning_steps
response_content = Markdown(_response_content) if self.markdown else _response_content
response_content_stream = Markdown(_response_content) if self.markdown else _response_content

panels = [status]

Expand Down Expand Up @@ -2614,7 +2614,7 @@ def print_response(
render = True
# Create panel for response
response_panel = self.create_panel(
content=response_content,
content=response_content_stream,
title=f"Response ({response_timer.elapsed:.1f}s)",
border_style="blue",
)
Expand Down Expand Up @@ -2669,28 +2669,30 @@ def print_response(
)
panels.append(reasoning_panel)

response_content = ""
response_content_batch: Union[str, JSON, Markdown] = ""
if isinstance(run_response, RunResponse):
if isinstance(run_response.content, str):
response_content = (
response_content_batch = (
Markdown(run_response.content)
if self.markdown
else run_response.get_content_as_string(indent=4)
)
elif self.response_model is not None and isinstance(run_response.content, BaseModel):
try:
response_content = JSON(run_response.content.model_dump_json(exclude_none=True), indent=2)
response_content_batch = JSON(
run_response.content.model_dump_json(exclude_none=True), indent=2
)
except Exception as e:
logger.warning(f"Failed to convert response to JSON: {e}")
else:
try:
response_content = JSON(json.dumps(run_response.content), indent=4)
response_content_batch = JSON(json.dumps(run_response.content), indent=4)
except Exception as e:
logger.warning(f"Failed to convert response to JSON: {e}")

# Create panel for response
response_panel = self.create_panel(
content=response_content,
content=response_content_batch,
title=f"Response ({response_timer.elapsed:.1f}s)",
border_style="blue",
)
Expand Down Expand Up @@ -2739,7 +2741,7 @@ async def aprint_response(
_response_content += resp.content
if resp.extra_data is not None and resp.extra_data.reasoning_steps is not None:
reasoning_steps = resp.extra_data.reasoning_steps
response_content = Markdown(_response_content) if self.markdown else _response_content
response_content_stream = Markdown(_response_content) if self.markdown else _response_content

panels = [status]

Expand Down Expand Up @@ -2785,7 +2787,7 @@ async def aprint_response(
render = True
# Create panel for response
response_panel = self.create_panel(
content=response_content,
content=response_content_stream,
title=f"Response ({response_timer.elapsed:.1f}s)",
border_style="blue",
)
Expand Down Expand Up @@ -2840,28 +2842,30 @@ async def aprint_response(
)
panels.append(reasoning_panel)

response_content = ""
response_content_batch: Union[str, JSON, Markdown] = ""
if isinstance(run_response, RunResponse):
if isinstance(run_response.content, str):
response_content = (
response_content_batch = (
Markdown(run_response.content)
if self.markdown
else run_response.get_content_as_string(indent=4)
)
elif self.response_model is not None and isinstance(run_response.content, BaseModel):
try:
response_content = JSON(run_response.content.model_dump_json(exclude_none=True), indent=2)
response_content_batch = JSON(
run_response.content.model_dump_json(exclude_none=True), indent=2
)
except Exception as e:
logger.warning(f"Failed to convert response to JSON: {e}")
else:
try:
response_content = JSON(json.dumps(run_response.content), indent=4)
response_content_batch = JSON(json.dumps(run_response.content), indent=4)
except Exception as e:
logger.warning(f"Failed to convert response to JSON: {e}")

# Create panel for response
response_panel = self.create_panel(
content=response_content,
content=response_content_batch,
title=f"Response ({response_timer.elapsed:.1f}s)",
border_style="blue",
)
Expand Down
53 changes: 36 additions & 17 deletions phi/tools/jina_tools.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,7 @@
from typing import Optional, Dict
import httpx
from os import getenv
from typing import Optional, Dict

from pydantic import BaseModel, HttpUrl, Field
from phi.tools import Toolkit
from phi.utils.log import logger
Expand All @@ -9,24 +11,39 @@ class JinaReaderToolsConfig(BaseModel):
api_key: Optional[str] = Field(None, description="API key for Jina Reader")
base_url: HttpUrl = Field("https://r.jina.ai/", description="Base URL for Jina Reader API") # type: ignore
search_url: HttpUrl = Field("https://s.jina.ai/", description="Search URL for Jina Reader API") # type: ignore
max_content_length: int = Field(4000, description="Maximum content length in characters")
max_content_length: int = Field(10000, description="Maximum content length in characters")
timeout: Optional[int] = Field(None, description="Timeout for Jina Reader API requests")


class JinaReaderTools(Toolkit):
def __init__(self, api_key: Optional[str] = None, max_content_length: int = 4000):
def __init__(
self,
api_key: Optional[str] = getenv("JINA_API_KEY"),
base_url: str = "https://r.jina.ai/",
search_url: str = "https://s.jina.ai/",
max_content_length: int = 10000,
timeout: Optional[int] = None,
read_url: bool = True,
search_query: bool = False,
):
super().__init__(name="jina_reader_tools")
config = JinaReaderToolsConfig(api_key=api_key, max_content_length=max_content_length)
self.api_key = config.api_key
self.base_url = config.base_url
self.search_url = config.search_url
self.max_content_length = config.max_content_length

self.register(self.read_url)
self.register(self.search_query)
self.config: JinaReaderToolsConfig = JinaReaderToolsConfig(
api_key=api_key,
base_url=base_url,
search_url=search_url,
max_content_length=max_content_length,
timeout=timeout,
)

if read_url:
self.register(self.read_url)
if search_query:
self.register(self.search_query)

def read_url(self, url: str) -> str:
"""Reads a URL and returns the truncated content using Jina Reader API."""
full_url = f"{self.base_url}{url}"
full_url = f"{self.config.base_url}{url}"
logger.info(f"Reading URL: {full_url}")
try:
response = httpx.get(full_url, headers=self._get_headers())
Expand All @@ -40,7 +57,7 @@ def read_url(self, url: str) -> str:

def search_query(self, query: str) -> str:
"""Performs a web search using Jina Reader API and returns the truncated results."""
full_url = f"{self.search_url}{query}"
full_url = f"{self.config.search_url}{query}"
logger.info(f"Performing search: {full_url}")
try:
response = httpx.get(full_url, headers=self._get_headers())
Expand All @@ -55,17 +72,19 @@ def search_query(self, query: str) -> str:
def _get_headers(self) -> Dict[str, str]:
headers = {
"Accept": "application/json",
"X-With-Generated-Alt": "true",
"X-With-Links-Summary": "true",
"X-With-Images-Summary": "true",
}
if self.api_key:
headers["Authorization"] = f"Bearer {self.api_key}"
if self.config.api_key:
headers["Authorization"] = f"Bearer {self.config.api_key}"
if self.config.timeout:
headers["X-Timeout"] = str(self.config.timeout)

return headers

def _truncate_content(self, content: str) -> str:
"""Truncate content to the maximum allowed length."""
if len(content) > self.max_content_length:
truncated = content[: self.max_content_length]
if len(content) > self.config.max_content_length:
truncated = content[: self.config.max_content_length]
return truncated + "... (content truncated)"
return content
2 changes: 1 addition & 1 deletion pyproject.toml
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
[project]
name = "phidata"
version = "2.5.4"
version = "2.5.5"
description = "Build AI Agents with memory, knowledge and tools."
requires-python = ">=3.7"
readme = "README.md"
Expand Down

0 comments on commit 05fbef1

Please sign in to comment.