Skip to content

Commit

Permalink
Merge branch 'main' into examples/workflows/social-media-content-planner
Browse files Browse the repository at this point in the history
  • Loading branch information
manthanguptaa authored Dec 30, 2024
2 parents 5dc4049 + a4cf57f commit 4749a64
Show file tree
Hide file tree
Showing 20 changed files with 336 additions and 16 deletions.
8 changes: 2 additions & 6 deletions cookbook/agents/14_generate_image.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,9 +16,5 @@
images = image_agent.get_images()
if images and isinstance(images, list):
for image_response in images:
image_data = image_response.get("data") # type: ignore
if image_data:
for image in image_data:
image_url = image.get("url") # type: ignore
if image_url:
print(image_url)
image_url = image_response.url
print(image_url)
Empty file added cookbook/chunking/__init__.py
Empty file.
16 changes: 16 additions & 0 deletions cookbook/mysql-init/init.sql
Original file line number Diff line number Diff line change
@@ -0,0 +1,16 @@
-- Create 'users' table
CREATE TABLE IF NOT EXISTS users (
id INT AUTO_INCREMENT PRIMARY KEY,
username VARCHAR(50) NOT NULL UNIQUE,
email VARCHAR(100) NOT NULL UNIQUE,
created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP
);

-- Create 'products' table
CREATE TABLE IF NOT EXISTS products (
id INT AUTO_INCREMENT PRIMARY KEY,
name VARCHAR(100) NOT NULL,
description TEXT,
price DECIMAL(10,2) NOT NULL,
created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP
);
18 changes: 18 additions & 0 deletions cookbook/providers/ollama/agent_set_client.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,18 @@
"""Run `pip install yfinance` to install dependencies."""

from ollama import Client as OllamaClient
from phi.agent import Agent, RunResponse # noqa
from phi.model.ollama import Ollama
from phi.playground import Playground, serve_playground_app
from phi.tools.yfinance import YFinanceTools

agent = Agent(
model=Ollama(id="llama3.1:8b", client=OllamaClient()),
tools=[YFinanceTools(stock_price=True)],
markdown=True,
)

app = Playground(agents=[agent]).get_app()

if __name__ == "__main__":
serve_playground_app("agent_set_client:app", reload=True)
6 changes: 6 additions & 0 deletions cookbook/providers/openai/.gitignore
Original file line number Diff line number Diff line change
@@ -0,0 +1,6 @@
*.jpg
*.png
*.mp3
*.wav
*.mp4
*.mp3
18 changes: 18 additions & 0 deletions cookbook/providers/openai/audio_input_agent.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,18 @@
import base64
import requests
from phi.agent import Agent, RunResponse # noqa
from phi.model.openai import OpenAIChat

# Fetch the audio file and convert it to a base64 encoded string
url = "https://openaiassets.blob.core.windows.net/$web/API/docs/audio/alloy.wav"
response = requests.get(url)
response.raise_for_status()
wav_data = response.content
encoded_string = base64.b64encode(wav_data).decode("utf-8")

# Provide the agent with the audio file and get result as text
agent = Agent(
model=OpenAIChat(id="gpt-4o-audio-preview", modalities=["text"]),
markdown=True,
)
agent.print_response("What is in this audio?", audio={"data": encoded_string, "format": "wav"})
25 changes: 25 additions & 0 deletions cookbook/providers/openai/audio_output_agent.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,25 @@
import base64
import requests
from phi.agent import Agent, RunResponse # noqa
from phi.model.openai import OpenAIChat
from phi.utils.audio import write_audio_to_file

# Fetch the audio file and convert it to a base64 encoded string
url = "https://openaiassets.blob.core.windows.net/$web/API/docs/audio/alloy.wav"
response = requests.get(url)
response.raise_for_status()
wav_data = response.content
encoded_string = base64.b64encode(wav_data).decode("utf-8")

# Provide the agent with the audio file and audio configuration and get result as text + audio
agent = Agent(
model=OpenAIChat(
id="gpt-4o-audio-preview", modalities=["text", "audio"], audio={"voice": "alloy", "format": "wav"}
),
markdown=True,
)
agent.print_response("What is in this audio?", audio={"data": encoded_string, "format": "wav"})

# Save the response audio to a file
if agent.run_response.response_audio is not None and "data" in agent.run_response.response_audio:
write_audio_to_file(audio=agent.run_response.response_audio["data"], filename="tmp/dog.wav")
Empty file added cookbook/readers/__init__.py
Empty file.
10 changes: 10 additions & 0 deletions cookbook/run_mysql.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,10 @@
docker run -d \
-e MYSQL_ROOT_PASSWORD=phi \
-e MYSQL_DATABASE=phi \
-e MYSQL_USER=phi \
-e MYSQL_PASSWORD=phi \
-p 3306:3306 \
-v mysql_data:/var/lib/mysql \
-v $(pwd)/cookbook/mysql-init:/docker-entrypoint-initdb.d \
--name mysql \
mysql:8.0
1 change: 0 additions & 1 deletion cookbook/tools/composio_tools.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,6 @@
from phi.agent import Agent
from composio_phidata import Action, ComposioToolSet # type: ignore


toolset = ComposioToolSet()
composio_tools = toolset.get_tools(actions=[Action.GITHUB_STAR_A_REPOSITORY_FOR_THE_AUTHENTICATED_USER])

Expand Down
213 changes: 213 additions & 0 deletions cookbook/workflows/startup_idea_validator.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,213 @@
"""
1. Install dependencies using: `pip install openai exa_py sqlalchemy phidata`
2. Run the script using: `python cookbook/workflows/blog_post_generator.py`
"""

import json
from typing import Optional, Iterator

from pydantic import BaseModel, Field

from phi.agent import Agent
from phi.model.openai import OpenAIChat
from phi.tools.googlesearch import GoogleSearch
from phi.workflow import Workflow, RunResponse, RunEvent
from phi.storage.workflow.sqlite import SqlWorkflowStorage
from phi.utils.pprint import pprint_run_response
from phi.utils.log import logger


class IdeaClarification(BaseModel):
originality: str = Field(..., description="Originality of the idea.")
mission: str = Field(..., description="Mission of the company.")
objectives: str = Field(..., description="Objectives of the company.")


class MarketResearch(BaseModel):
total_addressable_market: str = Field(..., description="Total addressable market (TAM).")
serviceable_available_market: str = Field(..., description="Serviceable available market (SAM).")
serviceable_obtainable_market: str = Field(..., description="Serviceable obtainable market (SOM).")
target_customer_segments: str = Field(..., description="Target customer segments.")


class StartupIdeaValidator(Workflow):
idea_clarifier_agent: Agent = Agent(
model=OpenAIChat(id="gpt-4o-mini"),
instructions=[
"Given a user's startup idea, its your goal to refine that idea. ",
"Evaluates the originality of the idea by comparing it with existing concepts. ",
"Define the mission and objectives of the startup.",
],
add_history_to_messages=True,
add_datetime_to_instructions=True,
response_model=IdeaClarification,
structured_outputs=True,
debug_mode=False,
)

market_research_agent: Agent = Agent(
model=OpenAIChat(id="gpt-4o-mini"),
tools=[GoogleSearch()],
instructions=[
"You are provided with a startup idea and the company's mission and objectives. ",
"Estimate the total addressable market (TAM), serviceable available market (SAM), and serviceable obtainable market (SOM). ",
"Define target customer segments and their characteristics. ",
"Search the web for resources if you need to.",
],
add_history_to_messages=True,
add_datetime_to_instructions=True,
response_model=MarketResearch,
structured_outputs=True,
debug_mode=False,
)

competitor_analysis_agent: Agent = Agent(
model=OpenAIChat(id="gpt-4o-mini"),
tools=[GoogleSearch()],
instructions=[
"You are provided with a startup idea and some market research related to the idea. ",
"Identify existing competitors in the market. ",
"Perform Strengths, Weaknesses, Opportunities, and Threats (SWOT) analysis for each competitor. ",
"Assess the startup’s potential positioning relative to competitors.",
],
add_history_to_messages=True,
add_datetime_to_instructions=True,
markdown=True,
debug_mode=False,
)

report_agent: Agent = Agent(
model=OpenAIChat(id="gpt-4o-mini"),
instructions=[
"You are provided with a startup idea and other data about the idea. ",
"Summarise everything into a single report.",
],
add_history_to_messages=True,
add_datetime_to_instructions=True,
markdown=True,
debug_mode=False,
)

def get_idea_clarification(self, startup_idea: str) -> Optional[IdeaClarification]:
try:
response: RunResponse = self.idea_clarifier_agent.run(startup_idea)

# Check if we got a valid response
if not response or not response.content:
logger.warning("Empty Idea Clarification response")
# Check if the response is of the expected type
if not isinstance(response.content, IdeaClarification):
logger.warning("Invalid response type")

return response.content

except Exception as e:
logger.warning(f"Failed: {str(e)}")

return None

def get_market_research(self, startup_idea: str, idea_clarification: IdeaClarification) -> Optional[MarketResearch]:
agent_input = {"startup_idea": startup_idea, **idea_clarification.model_dump()}

try:
response: RunResponse = self.market_research_agent.run(json.dumps(agent_input, indent=4))

# Check if we got a valid response
if not response or not response.content:
logger.warning("Empty Market Research response")

# Check if the response is of the expected type
if not isinstance(response.content, MarketResearch):
logger.warning("Invalid response type")

return response.content

except Exception as e:
logger.warning(f"Failed: {str(e)}")

return None

def get_competitor_analysis(self, startup_idea: str, market_research: MarketResearch) -> Optional[str]:
agent_input = {"startup_idea": startup_idea, **market_research.model_dump()}

try:
response: RunResponse = self.competitor_analysis_agent.run(json.dumps(agent_input, indent=4))

# Check if we got a valid response
if not response or not response.content:
logger.warning("Empty Competitor Analysis response")

return response.content

except Exception as e:
logger.warning(f"Failed: {str(e)}")

return None

def run(self, startup_idea: str) -> Iterator[RunResponse]:
logger.info(f"Generating a startup validation report for: {startup_idea}")

# Clarify and quantify the idea
idea_clarification: Optional[IdeaClarification] = self.get_idea_clarification(startup_idea)

if idea_clarification is None:
yield RunResponse(
event=RunEvent.workflow_completed,
content=f"Sorry, could not even clarify the idea: {startup_idea}",
)
return

# Do some market research
market_research: Optional[MarketResearch] = self.get_market_research(startup_idea, idea_clarification)

if market_research is None:
yield RunResponse(
event=RunEvent.workflow_completed,
content="Market research failed",
)
return

competitor_analysis: Optional[str] = self.get_competitor_analysis(startup_idea, market_research)

# Compile the final report
final_response: RunResponse = self.report_agent.run(
json.dumps(
{
"startup_idea": startup_idea,
**idea_clarification.model_dump(),
**market_research.model_dump(),
"competitor_analysis_report": competitor_analysis,
},
indent=4,
)
)

yield RunResponse(content=final_response.content, event=RunEvent.workflow_completed)


# Run the workflow if the script is executed directly
if __name__ == "__main__":
from rich.prompt import Prompt

# Get idea from user
idea = Prompt.ask(
"[bold]What is your startup idea?[/bold]\n✨",
default="A marketplace for Christmas Ornaments made from leather",
)

# Convert the idea to a URL-safe string for use in session_id
url_safe_idea = idea.lower().replace(" ", "-")

startup_idea_validator = StartupIdeaValidator(
description="Startup Idea Validator",
session_id=f"validate-startup-idea-{url_safe_idea}",
storage=SqlWorkflowStorage(
table_name="validate_startup_ideas_workflow",
db_file="tmp/workflows.db",
),
debug_mode=True,
)

final_report: Iterator[RunResponse] = startup_idea_validator.run(startup_idea=idea)

pprint_run_response(final_report, markdown=True)
2 changes: 1 addition & 1 deletion phi/document/reader/csv_reader.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@ def read(self, file: Union[Path, IO[Any]], delimiter: str = ",", quotechar: str
else:
logger.info(f"Reading uploaded file: {file.name}")
file.seek(0)
file_content = io.StringIO(file.read().decode("utf-8"))
file_content = io.StringIO(file.read().decode("utf-8")) # type: ignore

csv_name = Path(file.name).stem if isinstance(file, Path) else file.name.split(".")[0]
csv_content = ""
Expand Down
2 changes: 1 addition & 1 deletion phi/llm/ollama/hermes.py
Original file line number Diff line number Diff line change
Expand Up @@ -258,7 +258,7 @@ def response_stream(self, messages: List[Message]) -> Iterator[str]:
# logger.info(f"Ollama partial response: {response}")
# logger.info(f"Ollama partial response type: {type(response)}")
response_message: Optional[dict] = response.get("message")
response_content = response_message.get("content") if response_message else None
response_content: str = response_message.get("content", "") if response_message else ""
# logger.info(f"Ollama partial response content: {response_content}")

# Add response content to assistant message
Expand Down
2 changes: 1 addition & 1 deletion phi/llm/ollama/tools.py
Original file line number Diff line number Diff line change
Expand Up @@ -259,7 +259,7 @@ def response_stream(self, messages: List[Message]) -> Iterator[str]:
# logger.info(f"Ollama partial response: {response}")
# logger.info(f"Ollama partial response type: {type(response)}")
response_message: Optional[dict] = response.get("message")
response_content = response_message.get("content") if response_message else None
response_content: str = response_message.get("content", "") if response_message else ""
# logger.info(f"Ollama partial response content: {response_content}")

# Add response content to assistant message
Expand Down
4 changes: 4 additions & 0 deletions phi/model/ollama/chat.py
Original file line number Diff line number Diff line change
Expand Up @@ -722,3 +722,7 @@ async def aresponse_stream(self, messages: List[Message]) -> Any:
async for post_tool_call_response in self.ahandle_post_tool_call_messages_stream(messages=messages):
yield post_tool_call_response
logger.debug("---------- Ollama Async Response End ----------")

def model_copy(self, *, update: Optional[dict[str, Any]] = None, deep: bool = False) -> "Ollama":
new_model = Ollama(**self.model_dump(exclude={"client"}), client=self.client)
return new_model
7 changes: 6 additions & 1 deletion phi/playground/router.py
Original file line number Diff line number Diff line change
Expand Up @@ -553,8 +553,13 @@ async def run_workflow(workflow_id: str, body: WorkflowRunRequest):
if workflow is None:
raise HTTPException(status_code=404, detail="Workflow not found")

if body.session_id is not None:
logger.debug(f"Continuing session: {body.session_id}")
else:
logger.debug("Creating new session")

# Create a new instance of this workflow
new_workflow_instance = workflow.deep_copy(update={"workflow_id": workflow_id})
new_workflow_instance = workflow.deep_copy(update={"workflow_id": workflow_id, "session_id": body.session_id})
new_workflow_instance.user_id = body.user_id

# Return based on the response type
Expand Down
1 change: 1 addition & 0 deletions phi/playground/schemas.py
Original file line number Diff line number Diff line change
Expand Up @@ -68,3 +68,4 @@ class WorkflowRenameRequest(BaseModel):
class WorkflowRunRequest(BaseModel):
input: Dict[str, Any]
user_id: Optional[str] = None
session_id: Optional[str] = None
Loading

0 comments on commit 4749a64

Please sign in to comment.