diff --git a/cookbook/agents/README.md b/cookbook/agents/README.md
deleted file mode 100644
index 3d00f48565..0000000000
--- a/cookbook/agents/README.md
+++ /dev/null
@@ -1,71 +0,0 @@
-# Building Agents with gpt-4o
-
-This cookbook shows how to build agents with gpt-4o
-
-> Note: Fork and clone this repository if needed
-
-### 1. Create a virtual environment
-
-```shell
-python3 -m venv ~/.venvs/aienv
-source ~/.venvs/aienv/bin/activate
-```
-
-### 2. Install libraries
-
-```shell
-pip install -r cookbook/agents/requirements.txt
-```
-
-### 3. Export credentials
-
-- We use gpt-4o as the llm, so export your OpenAI API Key
-
-```shell
-export OPENAI_API_KEY=***
-```
-
-- To use Exa for research, export your EXA_API_KEY (get it from [here](https://dashboard.exa.ai/api-keys))
-
-```shell
-export EXA_API_KEY=xxx
-```
-
-### 4. Run PgVector
-
-We use PgVector to provide long-term memory and knowledge to the LLM OS.
-Please install [docker desktop](https://docs.docker.com/desktop/install/mac-install/) and run PgVector using either the helper script or the `docker run` command.
-
-- Run using a helper script
-
-```shell
-./cookbook/run_pgvector.sh
-```
-
-- OR run using the docker run command
-
-```shell
-docker run -d \
-  -e POSTGRES_DB=ai \
-  -e POSTGRES_USER=ai \
-  -e POSTGRES_PASSWORD=ai \
-  -e PGDATA=/var/lib/postgresql/data/pgdata \
-  -v pgvolume:/var/lib/postgresql/data \
-  -p 5532:5432 \
-  --name pgvector \
-  phidata/pgvector:16
-```
-
-### 5. Run the App
-
-```shell
-streamlit run cookbook/agents/app.py
-```
-
-- Open [localhost:8501](http://localhost:8501) to view your LLM OS.
-
-### 6. Message on [discord](https://discord.gg/4MtYHHrgA8) if you have any questions
-
-### 7. Star ⭐️ the project if you like it.
-
-### Share with your friends: https://phidata.link/agents
diff --git a/cookbook/agents/agent.py b/cookbook/agents/agent.py
deleted file mode 100644
index 5b8a91e304..0000000000
--- a/cookbook/agents/agent.py
+++ /dev/null
@@ -1,294 +0,0 @@
-import json
-from pathlib import Path
-from typing import Optional
-from textwrap import dedent
-from typing import List
-
-from phi.assistant import Assistant
-from phi.tools import Toolkit
-from phi.tools.exa import ExaTools
-from phi.tools.calculator import Calculator
-from phi.tools.duckduckgo import DuckDuckGo
-from phi.tools.yfinance import YFinanceTools
-from phi.tools.file import FileTools
-from phi.llm.openai import OpenAIChat
-from phi.knowledge import AssistantKnowledge
-from phi.embedder.openai import OpenAIEmbedder
-from phi.assistant.duckdb import DuckDbAssistant
-from phi.assistant.python import PythonAssistant
-from phi.storage.assistant.postgres import PgAssistantStorage
-from phi.utils.log import logger
-from phi.vectordb.pgvector import PgVector2
-
-db_url = "postgresql+psycopg://ai:ai@localhost:5532/ai"
-cwd = Path(__file__).parent.resolve()
-scratch_dir = cwd.joinpath("scratch")
-if not scratch_dir.exists():
-    scratch_dir.mkdir(exist_ok=True, parents=True)
-
-
-def get_agent(
-    llm_id: str = "gpt-4o",
-    calculator: bool = False,
-    ddg_search: bool = False,
-    file_tools: bool = False,
-    finance_tools: bool = False,
-    data_analyst: bool = False,
-    python_assistant: bool = False,
-    research_assistant: bool = False,
-    investment_assistant: bool = False,
-    user_id: Optional[str] = None,
-    run_id: Optional[str] = None,
-    debug_mode: bool = True,
-) -> Assistant:
-    logger.info(f"-*- Creating {llm_id} Agent -*-")
-
-    # Add tools available to the Agent
-    tools: List[Toolkit] = []
-    extra_instructions: List[str] = []
-    if calculator:
-        tools.append(
-            Calculator(
-                add=True,
-                subtract=True,
-                multiply=True,
-                divide=True,
-                exponentiate=True,
-                factorial=True,
-                is_prime=True,
-                square_root=True,
-            )
-        )
-    if ddg_search:
-        tools.append(DuckDuckGo(fixed_max_results=3))
-    if finance_tools:
-        tools.append(
-            YFinanceTools(stock_price=True, company_info=True, analyst_recommendations=True, company_news=True)
-        )
-    if file_tools:
-        tools.append(FileTools(base_dir=cwd))
-        extra_instructions.append(
-            "You can use the `read_file` tool to read a file, `save_file` to save a file, and `list_files` to list files in the working directory."
-        )
-
-    # Add team members available to the Agent
-    team: List[Assistant] = []
-    if data_analyst:
-        _data_analyst = DuckDbAssistant(
-            name="Data Analyst",
-            llm=OpenAIChat(model=llm_id),
-            role="Analyze movie data and provide insights",
-            semantic_model=json.dumps(
-                {
-                    "tables": [
-                        {
-                            "name": "movies",
-                            "description": "CSV of my favorite movies.",
-                            "path": "https://phidata-public.s3.amazonaws.com/demo_data/IMDB-Movie-Data.csv",
-                        }
-                    ]
-                }
-            ),
-            base_dir=scratch_dir,
-        )
-        team.append(_data_analyst)
-        extra_instructions.append(
-            "To answer questions about my favorite movies, delegate the task to the `Data Analyst`."
-        )
-    if python_assistant:
-        _python_assistant = PythonAssistant(
-            name="Python Assistant",
-            llm=OpenAIChat(model=llm_id),
-            role="Write and run python code",
-            pip_install=True,
-            charting_libraries=["streamlit"],
-            base_dir=scratch_dir,
-        )
-        team.append(_python_assistant)
-        extra_instructions.append("To write and run python code, delegate the task to the `Python Assistant`.")
-    if research_assistant:
-        _research_assistant = Assistant(
-            name="Research Assistant",
-            role="Write a research report on a given topic",
-            llm=OpenAIChat(model=llm_id),
-            description="You are a Senior New York Times researcher tasked with writing a cover story research report.",
-            instructions=[
-                "For a given topic, use the `search_exa` to get the top 10 search results.",
-                "Carefully read the results and generate a final - NYT cover story worthy report in the <report_format> provided below.",
-                "Make your report engaging, informative, and well-structured.",
-                "Remember: you are writing for the New York Times, so the quality of the report is important.",
-            ],
-            expected_output=dedent(
-                """\
-            An engaging, informative, and well-structured report in the following format:
-            <report_format>
-            ## Title
-
-            - **Overview** Brief introduction of the topic.
-            - **Importance** Why is this topic significant now?
-
-            ### Section 1
-            - **Detail 1**
-            - **Detail 2**
-
-            ### Section 2
-            - **Detail 1**
-            - **Detail 2**
-
-            ## Conclusion
-            - **Summary of report:** Recap of the key findings from the report.
-            - **Implications:** What these findings mean for the future.
-
-            ## References
-            - [Reference 1](Link to Source)
-            - [Reference 2](Link to Source)
-            </report_format>
-            """
-            ),
-            tools=[ExaTools(num_results=5, text_length_limit=1000)],
-            # This setting tells the LLM to format messages in markdown
-            markdown=True,
-            add_datetime_to_instructions=True,
-            debug_mode=debug_mode,
-        )
-        team.append(_research_assistant)
-        extra_instructions.append(
-            "To write a research report, delegate the task to the `Research Assistant`. "
-            "Return the report in the <report_format> to the user as is, without any additional text like 'here is the report'."
-        )
-    if investment_assistant:
-        _investment_assistant = Assistant(
-            name="Investment Assistant",
-            role="Write a investment report on a given company (stock) symbol",
-            llm=OpenAIChat(model=llm_id),
-            description="You are a Senior Investment Analyst for Goldman Sachs tasked with writing an investment report for a very important client.",
-            instructions=[
-                "For a given stock symbol, get the stock price, company information, analyst recommendations, and company news",
-                "Carefully read the research and generate a final - Goldman Sachs worthy investment report in the <report_format> provided below.",
-                "Provide thoughtful insights and recommendations based on the research.",
-                "When you share numbers, make sure to include the units (e.g., millions/billions) and currency.",
-                "REMEMBER: This report is for a very important client, so the quality of the report is important.",
-            ],
-            expected_output=dedent(
-                """\
-            <report_format>
-            ## [Company Name]: Investment Report
-
-            ### **Overview**
-            {give a brief introduction of the company and why the user should read this report}
-            {make this section engaging and create a hook for the reader}
-
-            ### Core Metrics
-            {provide a summary of core metrics and show the latest data}
-            - Current price: {current price}
-            - 52-week high: {52-week high}
-            - 52-week low: {52-week low}
-            - Market Cap: {Market Cap} in billions
-            - P/E Ratio: {P/E Ratio}
-            - Earnings per Share: {EPS}
-            - 50-day average: {50-day average}
-            - 200-day average: {200-day average}
-            - Analyst Recommendations: {buy, hold, sell} (number of analysts)
-
-            ### Financial Performance
-            {analyze the company's financial performance}
-
-            ### Growth Prospects
-            {analyze the company's growth prospects and future potential}
-
-            ### News and Updates
-            {summarize relevant news that can impact the stock price}
-
-            ### [Summary]
-            {give a summary of the report and what are the key takeaways}
-
-            ### [Recommendation]
-            {provide a recommendation on the stock along with a thorough reasoning}
-
-            </report_format>
-            """
-            ),
-            tools=[YFinanceTools(stock_price=True, company_info=True, analyst_recommendations=True, company_news=True)],
-            # This setting tells the LLM to format messages in markdown
-            markdown=True,
-            add_datetime_to_instructions=True,
-            debug_mode=debug_mode,
-        )
-        team.append(_investment_assistant)
-        extra_instructions.extend(
-            [
-                "To get an investment report on a stock, delegate the task to the `Investment Assistant`. "
-                "Return the report in the <report_format> to the user without any additional text like 'here is the report'.",
-                "Answer any questions they may have using the information in the report.",
-                "Never provide investment advise without the investment report.",
-            ]
-        )
-
-    # Create the Agent
-    agent = Assistant(
-        name="agent",
-        run_id=run_id,
-        user_id=user_id,
-        llm=OpenAIChat(model=llm_id),
-        description=dedent(
-            """\
-        You are a powerful AI Agent called `Optimus Prime v7`.
-        You have access to a set of tools and a team of AI Assistants at your disposal.
-        Your goal is to assist the user in the best way possible.\
-        """
-        ),
-        instructions=[
-            "When the user sends a message, first **think** and determine if:\n"
-            " - You can answer by using a tool available to you\n"
-            " - You need to search the knowledge base\n"
-            " - You need to search the internet\n"
-            " - You need to delegate the task to a team member\n"
-            " - You need to ask a clarifying question",
-            "If the user asks about a topic, first ALWAYS search your knowledge base using the `search_knowledge_base` tool.",
-            "If you dont find relevant information in your knowledge base, use the `duckduckgo_search` tool to search the internet.",
-            "If the user asks to summarize the conversation or if you need to reference your chat history with the user, use the `get_chat_history` tool.",
-            "If the users message is unclear, ask clarifying questions to get more information.",
-            "Carefully read the information you have gathered and provide a clear and concise answer to the user.",
-            "Do not use phrases like 'based on my knowledge' or 'depending on the information'.",
-            "You can delegate tasks to an AI Assistant in your team depending of their role and the tools available to them.",
-        ],
-        extra_instructions=extra_instructions,
-        # Add long-term memory to the Agent backed by a PostgreSQL database
-        storage=PgAssistantStorage(table_name="agent_runs", db_url=db_url),
-        # Add a knowledge base to the Agent
-        knowledge_base=AssistantKnowledge(
-            vector_db=PgVector2(
-                db_url=db_url,
-                collection="agent_documents",
-                embedder=OpenAIEmbedder(model="text-embedding-3-small", dimensions=1536),
-            ),
-            # 3 references are added to the prompt when searching the knowledge base
-            num_documents=3,
-        ),
-        # Add selected tools to the Agent
-        tools=tools,
-        # Add selected team members to the Agent
-        team=team,
-        # Show tool calls in the chat
-        show_tool_calls=True,
-        # This setting gives the LLM a tool to search the knowledge base for information
-        search_knowledge=True,
-        # This setting gives the LLM a tool to get chat history
-        read_chat_history=True,
-        # This setting adds chat history to the messages
-        add_chat_history_to_messages=True,
-        # This setting adds 4 previous messages from chat history to the messages sent to the LLM
-        num_history_messages=4,
-        # This setting tells the LLM to format messages in markdown
-        markdown=True,
-        # This setting adds the current datetime to the instructions
-        add_datetime_to_instructions=True,
-        # Add an introductory Assistant message
-        introduction=dedent(
-            """\
-        Hi, I'm Optimus Prime v7, your powerful AI Assistant. Send me on my mission boss :statue_of_liberty:\
-        """
-        ),
-        debug_mode=debug_mode,
-    )
-    return agent
diff --git a/cookbook/agents/app.py b/cookbook/agents/app.py
deleted file mode 100644
index 7dbc59cd67..0000000000
--- a/cookbook/agents/app.py
+++ /dev/null
@@ -1,301 +0,0 @@
-from typing import List
-
-import nest_asyncio
-import streamlit as st
-from phi.assistant import Assistant
-from phi.document import Document
-from phi.document.reader.pdf import PDFReader
-from phi.document.reader.website import WebsiteReader
-from phi.utils.log import logger
-
-from agent import get_agent  # type: ignore
-
-nest_asyncio.apply()
-
-st.set_page_config(
-    page_title="AI Agents",
-    page_icon=":orange_heart:",
-)
-st.title("AI Agents")
-st.markdown("##### :orange_heart: built using [phidata](https://github.com/phidatahq/phidata)")
-
-
-def main() -> None:
-    # Get LLM Model
-    llm_id = st.sidebar.selectbox("Select LLM", options=["gpt-4o", "gpt-4-turbo"]) or "gpt-4o"
-    # Set llm_id in session state
-    if "llm_id" not in st.session_state:
-        st.session_state["llm_id"] = llm_id
-    # Restart the assistant if llm_id changes
-    elif st.session_state["llm_id"] != llm_id:
-        st.session_state["llm_id"] = llm_id
-        restart_assistant()
-
-    # Sidebar checkboxes for selecting tools
-    st.sidebar.markdown("### Select Tools")
-
-    # Enable Calculator
-    if "calculator_enabled" not in st.session_state:
-        st.session_state["calculator_enabled"] = True
-    # Get calculator_enabled from session state if set
-    calculator_enabled = st.session_state["calculator_enabled"]
-    # Checkbox for enabling calculator
-    calculator = st.sidebar.checkbox("Calculator", value=calculator_enabled, help="Enable calculator.")
-    if calculator_enabled != calculator:
-        st.session_state["calculator_enabled"] = calculator
-        calculator_enabled = calculator
-        restart_assistant()
-
-    # Enable file tools
-    if "file_tools_enabled" not in st.session_state:
-        st.session_state["file_tools_enabled"] = True
-    # Get file_tools_enabled from session state if set
-    file_tools_enabled = st.session_state["file_tools_enabled"]
-    # Checkbox for enabling shell tools
-    file_tools = st.sidebar.checkbox("File Tools", value=file_tools_enabled, help="Enable file tools.")
-    if file_tools_enabled != file_tools:
-        st.session_state["file_tools_enabled"] = file_tools
-        file_tools_enabled = file_tools
-        restart_assistant()
-
-    # Enable Web Search via DuckDuckGo
-    if "ddg_search_enabled" not in st.session_state:
-        st.session_state["ddg_search_enabled"] = True
-    # Get ddg_search_enabled from session state if set
-    ddg_search_enabled = st.session_state["ddg_search_enabled"]
-    # Checkbox for enabling web search
-    ddg_search = st.sidebar.checkbox("Web Search", value=ddg_search_enabled, help="Enable web search using DuckDuckGo.")
-    if ddg_search_enabled != ddg_search:
-        st.session_state["ddg_search_enabled"] = ddg_search
-        ddg_search_enabled = ddg_search
-        restart_assistant()
-
-    # Enable finance tools
-    if "finance_tools_enabled" not in st.session_state:
-        st.session_state["finance_tools_enabled"] = True
-    # Get finance_tools_enabled from session state if set
-    finance_tools_enabled = st.session_state["finance_tools_enabled"]
-    # Checkbox for enabling shell tools
-    finance_tools = st.sidebar.checkbox("Yahoo Finance", value=finance_tools_enabled, help="Enable finance tools.")
-    if finance_tools_enabled != finance_tools:
-        st.session_state["finance_tools_enabled"] = finance_tools
-        finance_tools_enabled = finance_tools
-        restart_assistant()
-
-    # Sidebar checkboxes for selecting team members
-    st.sidebar.markdown("### Select Agent Team")
-
-    # Enable Data Analyst
-    if "data_analyst_enabled" not in st.session_state:
-        st.session_state["data_analyst_enabled"] = True
-    # Get data_analyst_enabled from session state if set
-    data_analyst_enabled = st.session_state["data_analyst_enabled"]
-    # Checkbox for enabling web search
-    data_analyst = st.sidebar.checkbox(
-        "Data Analyst",
-        value=data_analyst_enabled,
-        help="Enable the Data Analyst assistant for data related queries.",
-    )
-    if data_analyst_enabled != data_analyst:
-        st.session_state["data_analyst_enabled"] = data_analyst
-        data_analyst_enabled = data_analyst
-        restart_assistant()
-
-    # Enable Python Assistant
-    if "python_assistant_enabled" not in st.session_state:
-        st.session_state["python_assistant_enabled"] = True
-    # Get python_assistant_enabled from session state if set
-    python_assistant_enabled = st.session_state["python_assistant_enabled"]
-    # Checkbox for enabling web search
-    python_assistant = st.sidebar.checkbox(
-        "Python Assistant",
-        value=python_assistant_enabled,
-        help="Enable the Python Assistant for writing and running python code.",
-    )
-    if python_assistant_enabled != python_assistant:
-        st.session_state["python_assistant_enabled"] = python_assistant
-        python_assistant_enabled = python_assistant
-        restart_assistant()
-
-    # Enable Research Assistant
-    if "research_assistant_enabled" not in st.session_state:
-        st.session_state["research_assistant_enabled"] = True
-    # Get research_assistant_enabled from session state if set
-    research_assistant_enabled = st.session_state["research_assistant_enabled"]
-    # Checkbox for enabling web search
-    research_assistant = st.sidebar.checkbox(
-        "Research Assistant",
-        value=research_assistant_enabled,
-        help="Enable the research assistant (uses Exa).",
-    )
-    if research_assistant_enabled != research_assistant:
-        st.session_state["research_assistant_enabled"] = research_assistant
-        research_assistant_enabled = research_assistant
-        restart_assistant()
-
-    # Enable Investment Assistant
-    if "investment_assistant_enabled" not in st.session_state:
-        st.session_state["investment_assistant_enabled"] = True
-    # Get investment_assistant_enabled from session state if set
-    investment_assistant_enabled = st.session_state["investment_assistant_enabled"]
-    # Checkbox for enabling web search
-    investment_assistant = st.sidebar.checkbox(
-        "Investment Assistant",
-        value=investment_assistant_enabled,
-        help="Enable the investment assistant. NOTE: This is not financial advice.",
-    )
-    if investment_assistant_enabled != investment_assistant:
-        st.session_state["investment_assistant_enabled"] = investment_assistant
-        investment_assistant_enabled = investment_assistant
-        restart_assistant()
-
-    # Get the agent
-    agent: Assistant
-    if "agent" not in st.session_state or st.session_state["agent"] is None:
-        logger.info(f"---*--- Creating {llm_id} Agent ---*---")
-        agent = get_agent(
-            llm_id=llm_id,
-            calculator=calculator_enabled,
-            ddg_search=ddg_search_enabled,
-            file_tools=file_tools_enabled,
-            finance_tools=finance_tools_enabled,
-            data_analyst=data_analyst_enabled,
-            python_assistant=python_assistant_enabled,
-            research_assistant=research_assistant_enabled,
-            investment_assistant=investment_assistant_enabled,
-        )
-        st.session_state["agent"] = agent
-    else:
-        agent = st.session_state["agent"]
-
-    # Create assistant run (i.e. log to database) and save run_id in session state
-    try:
-        st.session_state["agent_run_id"] = agent.create_run()
-    except Exception:
-        st.warning("Could not create Agent run, is the database running?")
-        return
-
-    # Load existing messages
-    assistant_chat_history = agent.memory.get_chat_history()
-    if len(assistant_chat_history) > 0:
-        logger.debug("Loading chat history")
-        st.session_state["messages"] = assistant_chat_history
-    else:
-        logger.debug("No chat history found")
-        st.session_state["messages"] = [{"role": "assistant", "content": "Ask me questions..."}]
-
-    # Prompt for user input
-    if prompt := st.chat_input():
-        st.session_state["messages"].append({"role": "user", "content": prompt})
-
-    # Display existing chat messages
-    for message in st.session_state["messages"]:
-        if message["role"] == "system":
-            continue
-        with st.chat_message(message["role"]):
-            st.write(message["content"])
-
-    # If last message is from a user, generate a new response
-    last_message = st.session_state["messages"][-1]
-    if last_message.get("role") == "user":
-        question = last_message["content"]
-        with st.chat_message("assistant"):
-            response = ""
-            resp_container = st.empty()
-            for delta in agent.run(question):
-                response += delta  # type: ignore
-                resp_container.markdown(response)
-            st.session_state["messages"].append({"role": "assistant", "content": response})
-
-    # Load Agent knowledge base
-    if agent.knowledge_base:
-        # -*- Add websites to knowledge base
-        if "url_scrape_key" not in st.session_state:
-            st.session_state["url_scrape_key"] = 0
-
-        input_url = st.sidebar.text_input(
-            "Add URL to Knowledge Base", type="default", key=st.session_state["url_scrape_key"]
-        )
-        add_url_button = st.sidebar.button("Add URL")
-        if add_url_button:
-            if input_url is not None:
-                alert = st.sidebar.info("Processing URLs...", icon="ℹ️")
-                if f"{input_url}_scraped" not in st.session_state:
-                    scraper = WebsiteReader(max_links=2, max_depth=1)
-                    web_documents: List[Document] = scraper.read(input_url)
-                    if web_documents:
-                        agent.knowledge_base.load_documents(web_documents, upsert=True)
-                    else:
-                        st.sidebar.error("Could not read website")
-                    st.session_state[f"{input_url}_uploaded"] = True
-                alert.empty()
-
-        # Add PDFs to knowledge base
-        if "file_uploader_key" not in st.session_state:
-            st.session_state["file_uploader_key"] = 100
-
-        uploaded_file = st.sidebar.file_uploader(
-            "Add a PDF :page_facing_up:", type="pdf", key=st.session_state["file_uploader_key"]
-        )
-        if uploaded_file is not None:
-            alert = st.sidebar.info("Processing PDF...", icon="🧠")
-            auto_rag_name = uploaded_file.name.split(".")[0]
-            if f"{auto_rag_name}_uploaded" not in st.session_state:
-                reader = PDFReader()
-                auto_rag_documents: List[Document] = reader.read(uploaded_file)
-                if auto_rag_documents:
-                    agent.knowledge_base.load_documents(auto_rag_documents, upsert=True)
-                else:
-                    st.sidebar.error("Could not read PDF")
-                st.session_state[f"{auto_rag_name}_uploaded"] = True
-            alert.empty()
-
-    if agent.knowledge_base and agent.knowledge_base.vector_db:
-        if st.sidebar.button("Clear Knowledge Base"):
-            agent.knowledge_base.vector_db.clear()
-            st.sidebar.success("Knowledge base cleared")
-
-    # Show team member memory
-    if agent.team and len(agent.team) > 0:
-        for team_member in agent.team:
-            if len(team_member.memory.chat_history) > 0:
-                with st.status(f"{team_member.name} Memory", expanded=False, state="complete"):
-                    with st.container():
-                        _team_member_memory_container = st.empty()
-                        _team_member_memory_container.json(team_member.memory.get_llm_messages())
-
-    if agent.storage:
-        agent_run_ids: List[str] = agent.storage.get_all_run_ids()
-        new_agent_run_id = st.sidebar.selectbox("Run ID", options=agent_run_ids)
-        if st.session_state["agent_run_id"] != new_agent_run_id:
-            logger.info(f"---*--- Loading {llm_id} run: {new_agent_run_id} ---*---")
-            st.session_state["agent"] = get_agent(
-                llm_id=llm_id,
-                calculator=calculator_enabled,
-                ddg_search=ddg_search_enabled,
-                file_tools=file_tools_enabled,
-                finance_tools=finance_tools_enabled,
-                data_analyst=data_analyst_enabled,
-                python_assistant=python_assistant_enabled,
-                research_assistant=research_assistant_enabled,
-                investment_assistant=investment_assistant_enabled,
-                run_id=new_agent_run_id,
-            )
-            st.rerun()
-
-    if st.sidebar.button("New Run"):
-        restart_assistant()
-
-
-def restart_assistant():
-    logger.debug("---*--- Restarting Assistant ---*---")
-    st.session_state["agent"] = None
-    st.session_state["agent_run_id"] = None
-    if "url_scrape_key" in st.session_state:
-        st.session_state["url_scrape_key"] += 1
-    if "file_uploader_key" in st.session_state:
-        st.session_state["file_uploader_key"] += 1
-    st.rerun()
-
-
-main()
diff --git a/cookbook/agents/autonomous_rag.py b/cookbook/agents/autonomous_rag.py
new file mode 100644
index 0000000000..5661afc5eb
--- /dev/null
+++ b/cookbook/agents/autonomous_rag.py
@@ -0,0 +1,27 @@
+from rich.pretty import pprint  # noqa
+from phi.agent import Agent, RunResponse  # noqa
+from phi.model.openai import OpenAIChat
+from phi.knowledge.pdf import PDFUrlKnowledgeBase
+from phi.vectordb.pgvector import PgVector, SearchType
+
+db_url = "postgresql+psycopg://ai:ai@localhost:5532/ai"
+vector_db = PgVector(table_name="recipes", db_url=db_url, search_type=SearchType.hybrid)
+knowledge_base = PDFUrlKnowledgeBase(
+    urls=["https://phi-public.s3.amazonaws.com/recipes/ThaiRecipes.pdf"],
+    vector_db=vector_db,
+)
+# Comment out after first run
+# knowledge_base.load(upsert=True)
+
+agent = Agent(
+    model=OpenAIChat(id="gpt-4o"),
+    knowledge=PDFUrlKnowledgeBase(
+        urls=["https://phi-public.s3.amazonaws.com/recipes/ThaiRecipes.pdf"],
+        vector_db=vector_db,
+    ),
+    # Add a tool to search the knowledge base
+    search_knowledge=True,
+    show_tool_calls=True,
+    markdown=True,
+)
+agent.print_response("How do I make chicken and galangal in Coconut Milk Soup")
diff --git a/cookbook/agents/basic.py b/cookbook/agents/basic.py
index 1f4439a7a4..01d1ace843 100644
--- a/cookbook/agents/basic.py
+++ b/cookbook/agents/basic.py
@@ -1,52 +1,11 @@
-import asyncio  # noqa
-from typing import Iterator  # noqa
-from rich.pretty import pprint  # noqa
 from phi.agent import Agent, RunResponse  # noqa
 from phi.model.openai import OpenAIChat
-from phi.tools.yfinance import YFinanceTools
-from phi.storage.agent.postgres import PgAgentStorage
 
-agent = Agent(
-    model=OpenAIChat(id="gpt-4o"),
-    tools=[YFinanceTools(stock_price=True)],
-    show_tool_calls=True,
-    markdown=True,
-    # debug_mode=True,
-    # monitoring=False,
-    storage=PgAgentStorage(table_name="agent_sessions", db_url="postgresql+psycopg://ai:ai@localhost:5532/ai"),
-)
+agent = Agent(model=OpenAIChat(id="gpt-4o"), instructions=["Respond in a southern tone"], markdown=True)
 
-# run1: RunResponse = agent.run("What is the stock price of NVDA")  # type: ignore
-# pprint(run1)
-# print("------------*******************------------")
-# print(run)
-# print("------------*******************------------")
-# print("------------*******************------------")
-# for m in run.messages:
-#     print("---")
-#     print(m)
-#     print("---")
+# Get the response in a variable
+# run: RunResponse = agent.run("Explain simulation theory")
+# print(run.content)
 
-# run: RunResponse = agent.run("What is the stock price of NVDA")
-# pprint(run.content)
-
-run_stream: Iterator[RunResponse] = agent.run(
-    "What is the stock price of NVDA", stream=True, stream_intermediate_steps=True
-)
-for chunk in run_stream:
-    print("---")
-    pprint(chunk.model_dump(exclude={"messages"}))
-    print("---")
-
-
-# async def main():
-#     run: RunResponse = await agent.arun("What is the stock price of NVDA and TSLA")
-#     pprint(run)
-#     # async for chunk in await agent.arun("What is the stock price of NVDA and TSLA", stream=True):
-#     #     print(chunk.content)
-#
-#
-# asyncio.run(main())
-
-# agent.print_response("What is the stock price of NVDA and TSLA?")
-# agent.print_response("What is the stock price of NVDA and TSLA?", stream=True)
+# Print the response in the terminal
+agent.print_response("Explain simulation theory")
diff --git a/cookbook/agents/finance.py b/cookbook/agents/finance.py
deleted file mode 100644
index 90c66bffc7..0000000000
--- a/cookbook/agents/finance.py
+++ /dev/null
@@ -1,10 +0,0 @@
-from phi.assistant import Assistant
-from phi.llm.openai import OpenAIChat
-from phi.tools.yfinance import YFinanceTools
-
-assistant = Assistant(
-    llm=OpenAIChat(model="gpt-4o"),
-    tools=[YFinanceTools(stock_price=True, analyst_recommendations=True, company_info=True, company_news=True)],
-    show_tool_calls=True,
-)
-assistant.print_response("Compare NVDA to TSLA. Use every tool you have", markdown=True)
diff --git a/cookbook/agents/finance_agent.py b/cookbook/agents/finance_agent.py
new file mode 100644
index 0000000000..e38fff3263
--- /dev/null
+++ b/cookbook/agents/finance_agent.py
@@ -0,0 +1,13 @@
+from phi.agent import Agent
+from phi.model.openai import OpenAIChat
+from phi.tools.yfinance import YFinanceTools
+
+finance_agent = Agent(
+    model=OpenAIChat(id="gpt-4o"),
+    tools=[YFinanceTools(stock_price=True, analyst_recommendations=True, company_info=True, company_news=True)],
+    show_tool_calls=True,
+)
+
+finance_agent.print_response(
+    "Write a report comparing NVDA to TSLA from an investment perspective. Use every tool you have", markdown=True
+)
diff --git a/cookbook/agents/image.py b/cookbook/agents/image.py
deleted file mode 100644
index 78ccc36d09..0000000000
--- a/cookbook/agents/image.py
+++ /dev/null
@@ -1,34 +0,0 @@
-from rich.pretty import pprint  # noqa
-from phi.agent import Agent, RunResponse
-from phi.model.openai import OpenAIChat
-
-agent = Agent(
-    model=OpenAIChat(model="gpt-4o"),
-    markdown=True,
-    debug_mode=True,
-)
-
-# run: RunResponse = agent.run(
-#     "What’s in this image?",
-#     images=[
-#         "https://upload.wikimedia.org/wikipedia/commons/thumb/d/dd/Gfp-wisconsin-madison-the-nature-boardwalk.jpg/2560px-Gfp-wisconsin-madison-the-nature-boardwalk.jpg"
-#     ],
-# )  # type: ignore
-# run: RunResponse = agent.run(
-#     "What’s in this image?",
-#     images=[
-#         {
-#             "url": "https://upload.wikimedia.org/wikipedia/commons/thumb/d/dd/Gfp-wisconsin-madison-the-nature-boardwalk.jpg/2560px-Gfp-wisconsin-madison-the-nature-boardwalk.jpg",
-#             "detail": "high",
-#         }
-#     ],
-# )  # type: ignore
-run: RunResponse = agent.run(
-    "What are in these images? Is there any difference between them?",
-    images=[
-        "https://upload.wikimedia.org/wikipedia/commons/thumb/d/dd/Gfp-wisconsin-madison-the-nature-boardwalk.jpg/2560px-Gfp-wisconsin-madison-the-nature-boardwalk.jpg",
-        "https://upload.wikimedia.org/wikipedia/commons/thumb/d/dd/Gfp-wisconsin-madison-the-nature-boardwalk.jpg/2560px-Gfp-wisconsin-madison-the-nature-boardwalk.jpg",
-    ],
-)  # type: ignore
-print(run.content)
-# pprint(run)
diff --git a/cookbook/agents/images.py b/cookbook/agents/images.py
new file mode 100644
index 0000000000..5e9d138060
--- /dev/null
+++ b/cookbook/agents/images.py
@@ -0,0 +1,15 @@
+from phi.agent import Agent
+from phi.model.openai import OpenAIChat
+
+agent = Agent(
+    model=OpenAIChat(id="gpt-4o"),
+    markdown=True,
+)
+
+agent.print_response(
+    "What are in these images? Is there any difference between them?",
+    images=[
+        "https://upload.wikimedia.org/wikipedia/commons/thumb/d/dd/Gfp-wisconsin-madison-the-nature-boardwalk.jpg/2560px-Gfp-wisconsin-madison-the-nature-boardwalk.jpg",
+        "https://upload.wikimedia.org/wikipedia/commons/thumb/d/dd/Gfp-wisconsin-madison-the-nature-boardwalk.jpg/2560px-Gfp-wisconsin-madison-the-nature-boardwalk.jpg",
+    ],
+)
diff --git a/cookbook/agents/is_9_11_bigger_than_9_9.py b/cookbook/agents/is_9_11_bigger_than_9_9.py
new file mode 100644
index 0000000000..4d12c58c7e
--- /dev/null
+++ b/cookbook/agents/is_9_11_bigger_than_9_9.py
@@ -0,0 +1,13 @@
+from phi.agent import Agent
+from phi.model.openai import OpenAIChat
+from phi.tools.calculator import Calculator
+
+agent = Agent(
+    model=OpenAIChat(id="gpt-4o"),
+    tools=[Calculator(add=True, subtract=True, multiply=True, divide=True)],
+    instructions=["Use the calculator tool for comparisons."],
+    show_tool_calls=True,
+    markdown=True,
+)
+agent.print_response("Is 9.11 bigger than 9.9?")
+agent.print_response("9.11 and 9.9 -- which is bigger?")
diff --git a/cookbook/agents/knowledge.py b/cookbook/agents/knowledge.py
deleted file mode 100644
index d18bdaf750..0000000000
--- a/cookbook/agents/knowledge.py
+++ /dev/null
@@ -1,41 +0,0 @@
-from rich.pretty import pprint  # noqa
-from phi.agent import Agent, RunResponse  # noqa
-from phi.model.openai import OpenAIChat
-from phi.knowledge.pdf import PDFUrlKnowledgeBase
-from phi.vectordb.pgvector import PgVector, SearchType
-
-db_url = "postgresql+psycopg://ai:ai@localhost:5532/ai"
-vector_db = PgVector(table_name="recipes", db_url=db_url, search_type=SearchType.hybrid)
-
-agent = Agent(
-    model=OpenAIChat(model="gpt-4o"),
-    knowledge=PDFUrlKnowledgeBase(
-        urls=["https://phi-public.s3.amazonaws.com/recipes/ThaiRecipes.pdf"],
-        vector_db=vector_db,
-    ),
-    enable_rag=True,
-    # search_knowledge=True,
-    show_tool_calls=True,
-    markdown=True,
-    debug_mode=True,
-)
-# knowledge_base.load(recreate=True)  # Comment out after first run
-# knowledge_base.load(recreate=True, upsert=True)  # Comment out after first run
-# knowledge_base.load(upsert=True)  # Comment out after first run
-
-# results = vector_db.vector_search("Gluai Buat Chi")
-# print("Vector search results:")
-# pprint([r.id for r in results])
-#
-# results = vector_db.keyword_search("Gluai Buat Chi")
-# print("Keyword search results:")
-# pprint([r.id for r in results])
-#
-# results = vector_db.hybrid_search("Gluai Buat Chi")
-# print("Hybrid search results:")
-# pprint([r.id for r in results])
-
-agent.print_response("How do i make Chicken and Galangal in Coconut Milk Soup")
-
-# run1: RunResponse = agent.run("How to make Gluai Buat Chi?")  # type: ignore
-# pprint(run1)
diff --git a/cookbook/agents/rag.py b/cookbook/agents/rag.py
new file mode 100644
index 0000000000..732857c52e
--- /dev/null
+++ b/cookbook/agents/rag.py
@@ -0,0 +1,28 @@
+from rich.pretty import pprint  # noqa
+from phi.agent import Agent, RunResponse  # noqa
+from phi.model.openai import OpenAIChat
+from phi.knowledge.pdf import PDFUrlKnowledgeBase
+from phi.vectordb.pgvector import PgVector, SearchType
+
+db_url = "postgresql+psycopg://ai:ai@localhost:5532/ai"
+vector_db = PgVector(table_name="recipes", db_url=db_url, search_type=SearchType.hybrid)
+knowledge_base = PDFUrlKnowledgeBase(
+    urls=["https://phi-public.s3.amazonaws.com/recipes/ThaiRecipes.pdf"],
+    vector_db=vector_db,
+)
+# Comment out after first run
+# knowledge_base.load(upsert=True)
+
+agent = Agent(
+    model=OpenAIChat(id="gpt-4o"),
+    knowledge=PDFUrlKnowledgeBase(
+        urls=["https://phi-public.s3.amazonaws.com/recipes/ThaiRecipes.pdf"],
+        vector_db=vector_db,
+    ),
+    enable_rag=True,
+    # search_knowledge=True,
+    show_tool_calls=True,
+    markdown=True,
+    # debug_mode=True,
+)
+agent.print_response("How do I make chicken and galangal in Coconut Milk Soup")
diff --git a/cookbook/agents/requirements.in b/cookbook/agents/requirements.in
deleted file mode 100644
index f36a78fe1a..0000000000
--- a/cookbook/agents/requirements.in
+++ /dev/null
@@ -1,15 +0,0 @@
-bs4
-duckduckgo-search
-exa_py
-nest_asyncio
-openai
-pgvector
-phidata
-psycopg[binary]
-pypdf
-sqlalchemy
-streamlit
-yfinance
-duckdb
-pandas
-matplotlib
diff --git a/cookbook/agents/requirements.txt b/cookbook/agents/requirements.txt
deleted file mode 100644
index 2beea44642..0000000000
--- a/cookbook/agents/requirements.txt
+++ /dev/null
@@ -1,255 +0,0 @@
-#
-# This file is autogenerated by pip-compile with Python 3.11
-# by the following command:
-#
-#    pip-compile cookbook/llm_os/requirements.in
-#
-altair==5.3.0
-    # via streamlit
-annotated-types==0.6.0
-    # via pydantic
-anyio==4.3.0
-    # via
-    #   httpx
-    #   openai
-appdirs==1.4.4
-    # via yfinance
-attrs==23.2.0
-    # via
-    #   jsonschema
-    #   referencing
-beautifulsoup4==4.12.3
-    # via
-    #   bs4
-    #   yfinance
-blinker==1.8.2
-    # via streamlit
-bs4==0.0.2
-    # via -r cookbook/llm_os/requirements.in
-cachetools==5.3.3
-    # via streamlit
-certifi==2024.2.2
-    # via
-    #   curl-cffi
-    #   httpcore
-    #   httpx
-    #   requests
-cffi==1.16.0
-    # via curl-cffi
-charset-normalizer==3.3.2
-    # via requests
-click==8.1.7
-    # via
-    #   duckduckgo-search
-    #   streamlit
-    #   typer
-contourpy==1.2.1
-    # via matplotlib
-curl-cffi==0.7.0b4
-    # via duckduckgo-search
-cycler==0.12.1
-    # via matplotlib
-distro==1.9.0
-    # via openai
-duckdb==0.10.2
-    # via -r cookbook/llm_os/requirements.in
-duckduckgo-search==5.3.1
-    # via -r cookbook/llm_os/requirements.in
-exa-py==1.0.9
-    # via -r cookbook/llm_os/requirements.in
-fonttools==4.51.0
-    # via matplotlib
-frozendict==2.4.4
-    # via yfinance
-gitdb==4.0.11
-    # via gitpython
-gitpython==3.1.43
-    # via
-    #   phidata
-    #   streamlit
-h11==0.14.0
-    # via httpcore
-html5lib==1.1
-    # via yfinance
-httpcore==1.0.5
-    # via httpx
-httpx==0.27.0
-    # via
-    #   openai
-    #   phidata
-idna==3.7
-    # via
-    #   anyio
-    #   httpx
-    #   requests
-jinja2==3.1.4
-    # via
-    #   altair
-    #   pydeck
-jsonschema==4.22.0
-    # via altair
-jsonschema-specifications==2023.12.1
-    # via jsonschema
-kiwisolver==1.4.5
-    # via matplotlib
-lxml==5.2.1
-    # via yfinance
-markdown-it-py==3.0.0
-    # via rich
-markupsafe==2.1.5
-    # via jinja2
-matplotlib==3.8.4
-    # via -r cookbook/llm_os/requirements.in
-mdurl==0.1.2
-    # via markdown-it-py
-multitasking==0.0.11
-    # via yfinance
-nest-asyncio==1.6.0
-    # via -r cookbook/llm_os/requirements.in
-numpy==1.26.4
-    # via
-    #   altair
-    #   contourpy
-    #   matplotlib
-    #   pandas
-    #   pgvector
-    #   pyarrow
-    #   pydeck
-    #   streamlit
-    #   yfinance
-openai==1.28.1
-    # via -r cookbook/llm_os/requirements.in
-orjson==3.10.3
-    # via duckduckgo-search
-packaging==24.0
-    # via
-    #   altair
-    #   matplotlib
-    #   streamlit
-pandas==2.2.2
-    # via
-    #   -r cookbook/llm_os/requirements.in
-    #   altair
-    #   streamlit
-    #   yfinance
-peewee==3.17.5
-    # via yfinance
-pgvector==0.2.5
-    # via -r cookbook/llm_os/requirements.in
-phidata==2.4.20
-    # via -r cookbook/llm_os/requirements.in
-pillow==10.3.0
-    # via
-    #   matplotlib
-    #   streamlit
-protobuf==4.25.3
-    # via streamlit
-psycopg[binary]==3.1.18
-    # via -r cookbook/llm_os/requirements.in
-psycopg-binary==3.1.18
-    # via psycopg
-pyarrow==16.0.0
-    # via streamlit
-pycparser==2.22
-    # via cffi
-pydantic==2.7.1
-    # via
-    #   openai
-    #   phidata
-    #   pydantic-settings
-pydantic-core==2.18.2
-    # via pydantic
-pydantic-settings==2.2.1
-    # via phidata
-pydeck==0.9.1
-    # via streamlit
-pygments==2.18.0
-    # via rich
-pyparsing==3.1.2
-    # via matplotlib
-pypdf==4.2.0
-    # via -r cookbook/llm_os/requirements.in
-python-dateutil==2.9.0.post0
-    # via
-    #   matplotlib
-    #   pandas
-python-dotenv==1.0.1
-    # via
-    #   phidata
-    #   pydantic-settings
-pytz==2024.1
-    # via
-    #   pandas
-    #   yfinance
-pyyaml==6.0.1
-    # via phidata
-referencing==0.35.1
-    # via
-    #   jsonschema
-    #   jsonschema-specifications
-requests==2.31.0
-    # via
-    #   exa-py
-    #   streamlit
-    #   yfinance
-rich==13.7.1
-    # via
-    #   phidata
-    #   streamlit
-    #   typer
-rpds-py==0.18.1
-    # via
-    #   jsonschema
-    #   referencing
-shellingham==1.5.4
-    # via typer
-six==1.16.0
-    # via
-    #   html5lib
-    #   python-dateutil
-smmap==5.0.1
-    # via gitdb
-sniffio==1.3.1
-    # via
-    #   anyio
-    #   httpx
-    #   openai
-soupsieve==2.5
-    # via beautifulsoup4
-sqlalchemy==2.0.30
-    # via -r cookbook/llm_os/requirements.in
-streamlit==1.34.0
-    # via -r cookbook/llm_os/requirements.in
-tenacity==8.3.0
-    # via streamlit
-toml==0.10.2
-    # via streamlit
-tomli==2.0.1
-    # via phidata
-toolz==0.12.1
-    # via altair
-tornado==6.4
-    # via streamlit
-tqdm==4.66.4
-    # via openai
-typer==0.12.3
-    # via phidata
-typing-extensions==4.11.0
-    # via
-    #   exa-py
-    #   openai
-    #   phidata
-    #   psycopg
-    #   pydantic
-    #   pydantic-core
-    #   sqlalchemy
-    #   streamlit
-    #   typer
-tzdata==2024.1
-    # via pandas
-urllib3==1.26.18
-    # via requests
-webencodings==0.5.1
-    # via html5lib
-yfinance==0.2.38
-    # via -r cookbook/llm_os/requirements.in
diff --git a/cookbook/agents/web_search.py b/cookbook/agents/web_search.py
index c7712892b2..82f3adcc7f 100644
--- a/cookbook/agents/web_search.py
+++ b/cookbook/agents/web_search.py
@@ -1,6 +1,6 @@
-from phi.assistant import Assistant
-from phi.llm.openai import OpenAIChat
+from phi.agent import Agent
+from phi.model.openai import OpenAIChat
 from phi.tools.duckduckgo import DuckDuckGo
 
-assistant = Assistant(llm=OpenAIChat(model="gpt-4o"), tools=[DuckDuckGo()], show_tool_calls=True)
-assistant.print_response("Share 3 news stories from France", markdown=True)
+web_search_agent = Agent(model=OpenAIChat(id="gpt-4o"), tools=[DuckDuckGo()], show_tool_calls=True, markdown=True)
+web_search_agent.print_response("Share 3 news stories from France")
diff --git a/cookbook/providers/openai/agent.py b/cookbook/providers/openai/agent.py
index 94ab2c2375..63822d1c92 100644
--- a/cookbook/providers/openai/agent.py
+++ b/cookbook/providers/openai/agent.py
@@ -13,5 +13,5 @@
 # run: RunResponse = agent.run("What is the stock price of NVDA and TSLA")
 # print(run.content)
 
-# Print the response on the terminal
+# Print the response in the terminal
 agent.print_response("What is the stock price of NVDA and TSLA")
diff --git a/cookbook/providers/openai/agent_stream.py b/cookbook/providers/openai/agent_stream.py
index 95fec0e9b5..6e0154ce35 100644
--- a/cookbook/providers/openai/agent_stream.py
+++ b/cookbook/providers/openai/agent_stream.py
@@ -15,5 +15,5 @@
 # for chunk in run_response:
 #     print(chunk.content)
 
-# Print the response on the terminal
+# Print the response in the terminal
 agent.print_response("What is the stock price of NVDA and TSLA", stream=True)
diff --git a/cookbook/providers/openai/basic.py b/cookbook/providers/openai/basic.py
index 79e7c20dd2..01d1ace843 100644
--- a/cookbook/providers/openai/basic.py
+++ b/cookbook/providers/openai/basic.py
@@ -7,5 +7,5 @@
 # run: RunResponse = agent.run("Explain simulation theory")
 # print(run.content)
 
-# Print the response on the terminal
+# Print the response in the terminal
 agent.print_response("Explain simulation theory")
diff --git a/cookbook/providers/openai/basic_stream.py b/cookbook/providers/openai/basic_stream.py
index 821eedd45d..d865e98ea1 100644
--- a/cookbook/providers/openai/basic_stream.py
+++ b/cookbook/providers/openai/basic_stream.py
@@ -9,5 +9,5 @@
 # for chunk in run_response:
 #     print(chunk.content)
 
-# Print the response on the terminal
+# Print the response in the terminal
 agent.print_response("Explain simulation theory", stream=True)
diff --git a/phi/tools/yfinance.py b/phi/tools/yfinance.py
index 118b6bdb90..b46cd3e986 100644
--- a/phi/tools/yfinance.py
+++ b/phi/tools/yfinance.py
@@ -20,26 +20,27 @@ def __init__(
         company_news: bool = False,
         technical_indicators: bool = False,
         historical_prices: bool = False,
+        enable_all_tools: bool = False,
     ):
         super().__init__(name="yfinance_tools")
 
-        if stock_price:
+        if stock_price or enable_all_tools:
             self.register(self.get_current_stock_price)
-        if company_info:
+        if company_info or enable_all_tools:
             self.register(self.get_company_info)
-        if stock_fundamentals:
+        if stock_fundamentals or enable_all_tools:
             self.register(self.get_stock_fundamentals)
-        if income_statements:
+        if income_statements or enable_all_tools:
             self.register(self.get_income_statements)
-        if key_financial_ratios:
+        if key_financial_ratios or enable_all_tools:
             self.register(self.get_key_financial_ratios)
-        if analyst_recommendations:
+        if analyst_recommendations or enable_all_tools:
             self.register(self.get_analyst_recommendations)
-        if company_news:
+        if company_news or enable_all_tools:
             self.register(self.get_company_news)
-        if technical_indicators:
+        if technical_indicators or enable_all_tools:
             self.register(self.get_technical_indicators)
-        if historical_prices:
+        if historical_prices or enable_all_tools:
             self.register(self.get_historical_stock_prices)
 
     def get_current_stock_price(self, symbol: str) -> str: