diff --git a/babyagi_task_finalizer/.env.example b/babyagi_task_finalizer/.env.example new file mode 100644 index 0000000..248c0f8 --- /dev/null +++ b/babyagi_task_finalizer/.env.example @@ -0,0 +1,2 @@ +OPENAI_API_KEY= +NODE_URL=http://localhost:7001 \ No newline at end of file diff --git a/babyagi_task_finalizer/component.yaml b/babyagi_task_finalizer/component.yaml deleted file mode 100644 index 2530b24..0000000 --- a/babyagi_task_finalizer/component.yaml +++ /dev/null @@ -1,44 +0,0 @@ -name: "babyagi_task_finalizer" -author: "naptha" -version: "0.1.0" -description: "A tool in naptha's babyagi." -license: "MIT" - -models: - default_model_provider: "ollama" - ollama: - model: "ollama/gemma" - max_tokens: 1000 - temperature: 0 - api_base: "http://localhost:11434" - openai: - model: "openai/gpt-3.5-turbo" - max_tokens: 1000 - temperature: 0 - -inputs: - system_message: "You are a helpful AI assistant." - user_message_template: | - You are given the following objective: {{objective}}. - Your colleagues have accomplished the following tasks with the following results: {{tasks}}. - - - 1. Your task is to study the results of the tasks and prepare a final report. - 2. The final report should be very detailed. - 3. The report should encompass all the tasks that have been performed. - 4. The report should be in MARKDOWN format. - 5. Only prepare the final report if the objective have been met. - 6. If the objective have not been met, prepare the new tasks that need to be performed. - - -Final Report: - save: false - location: "ipfs" - -outputs: - save: false - location: "node" - -implementation: - package: - entrypoint: "run.py" diff --git a/babyagi_task_finalizer/config/agent_deployments.json b/babyagi_task_finalizer/config/agent_deployments.json new file mode 100644 index 0000000..6f30d89 --- /dev/null +++ b/babyagi_task_finalizer/config/agent_deployments.json @@ -0,0 +1,17 @@ +[ + { + "name": "agent_deployment_1", + "module": {"name": "babyagi_task_finalizer"}, + "worker_node_url": "http://localhost:7001", + "agent_config": { + "config_name": "TaskFinalizerAgentConfig", + "llm_config": {"config_name": "model_2"}, + "persona_module" : null, + "system_prompt": { + "role": "You are a helpful AI assistant.", + "persona": "" + }, + "user_message_template": "You are given the following task: {{task}}. The task is to accomplish the following objective: {{objective}}." + } + } +] \ No newline at end of file diff --git a/babyagi_task_finalizer/config/llm_configs.json b/babyagi_task_finalizer/config/llm_configs.json new file mode 100644 index 0000000..b8a6745 --- /dev/null +++ b/babyagi_task_finalizer/config/llm_configs.json @@ -0,0 +1,18 @@ +[ + { + "config_name": "model_1", + "client": "ollama", + "model": "ollama/phi", + "temperature": 0.7, + "max_tokens": 1000, + "api_base": "http://localhost:11434" + }, + { + "config_name": "model_2", + "client": "openai", + "model": "gpt-4o-mini", + "temperature": 0.7, + "max_tokens": 1000, + "api_base": "https://api.openai.com/v1" + } +] \ No newline at end of file diff --git a/babyagi_task_finalizer/run.py b/babyagi_task_finalizer/run.py index 6e4980a..9d5c55b 100644 --- a/babyagi_task_finalizer/run.py +++ b/babyagi_task_finalizer/run.py @@ -1,73 +1,97 @@ +#!/usr/bin/env python +from dotenv import load_dotenv +from babyagi_task_finalizer.schemas import InputSchema, TaskExecutorPromptSchema, TaskFinalizerAgentConfig, TaskFinalizer +import json +from litellm import completion import os -import yaml -import instructor -from litellm import Router -from babyagi_task_finalizer.schemas import InputSchema, TaskFinalizer -from babyagi_task_finalizer.utils import get_logger - +from naptha_sdk.schemas import AgentDeployment, AgentRunInput +from naptha_sdk.utils import get_logger +load_dotenv() logger = get_logger(__name__) -client = instructor.patch( - Router( - model_list= - [ - { - "model_name": "gpt-3.5-turbo", - "litellm_params": { - "model": "openai/gpt-3.5-turbo", - "api_key": os.getenv("OPENAI_API_KEY"), - }, - } - ], - # default_litellm_params={"acompletion": True}, - ) -) - -def llm_call(messages, response_model=None): - if response_model: - response = client.chat.completions.create( - model="gpt-3.5-turbo", - response_model=response_model, +class TaskFinalizerAgent: + def __init__(self, agent_deployment: AgentDeployment): + self.agent_deployment = agent_deployment + + def execute_task(self, inputs: InputSchema): + if isinstance(self.agent_deployment.agent_config, dict): + self.agent_deployment.agent_config = TaskFinalizerAgentConfig(**self.agent_deployment.agent_config) + + user_prompt = self.agent_deployment.agent_config.user_message_template.replace( + "{{task}}", inputs.tool_input_data.task + ).replace( + "{{objective}}", inputs.tool_input_data.objective + ) + + messages = [ + {"role": "system", "content": json.dumps(self.agent_deployment.agent_config.system_prompt)}, + {"role": "user", "content": user_prompt} + ] + + api_key = None if self.agent_deployment.agent_config.llm_config.client == "ollama" else ( + "EMPTY" if self.agent_deployment.agent_config.llm_config.client == "vllm" else os.getenv("OPENAI_API_KEY") + ) + + response = completion( + model=self.agent_deployment.agent_config.llm_config.model, messages=messages, - temperature=0.0, - max_tokens=1000, + temperature=self.agent_deployment.agent_config.llm_config.temperature, + max_tokens=self.agent_deployment.agent_config.llm_config.max_tokens, + api_base=self.agent_deployment.agent_config.llm_config.api_base, + api_key=api_key ) - return response - -def run(inputs: InputSchema, *args, **kwargs): - logger.info(f"Running with inputs {inputs.objective}") - logger.info(f"Running with inputs {inputs.task}") - cfg = kwargs["cfg"] - - user_prompt = cfg["inputs"]["user_message_template"].replace("{{task}}", inputs.task).replace("{{objective}}", inputs.objective) - - messages = [ - {"role": "system", "content": cfg["inputs"]["system_message"]}, - {"role": "user", "content": user_prompt} - ] - - response = llm_call(messages, response_model=TaskFinalizer) - - logger.info(f"Result: {response}") - - return response.model_dump_json() - + + # Parse the response into the TaskFinalizer model + response_content = response.choices[0].message.content + + try: + # Attempt to parse the response as JSON + parsed_response = json.loads(response_content) + task_finalizer = TaskFinalizer(**parsed_response) + except (json.JSONDecodeError, TypeError): + # If parsing fails, create a TaskFinalizer with the raw content + task_finalizer = TaskFinalizer( + final_report=response_content, + new_tasks=[], + objective_met=False + ) + + logger.info(f"Response: {task_finalizer}") + return task_finalizer.model_dump_json() + +def run(agent_run: AgentRunInput, *args, **kwargs): + logger.info(f"Running with inputs {agent_run.inputs.tool_input_data}") + task_finalizer_agent = TaskFinalizerAgent(agent_run.agent_deployment) + method = getattr(task_finalizer_agent, agent_run.inputs.tool_name, None) + return method(agent_run.inputs) if __name__ == "__main__": - with open("babyagi_task_finalizer/component.yaml", "r") as f: - cfg = yaml.safe_load(f) - - inputs = InputSchema( - task="Weather pattern between year 1900 and 2000 was cool andry", - objective="Write a blog post about the weather in London." + from naptha_sdk.client.naptha import Naptha + from naptha_sdk.configs import load_agent_deployments + + naptha = Naptha() + + # Configs + agent_deployments = load_agent_deployments( + "babyagi_task_finalizer/configs/agent_deployments.json", + load_persona_data=False, + load_persona_schema=False ) - - r = run(inputs, cfg=cfg) - logger.info(f"Result: {type(r)}") - - - import json - t = TaskFinalizer(**json.loads(r)) - logger.info(f"Final report: {type(t)}") - + + input_params = InputSchema( + tool_name="execute_task", + tool_input_data=TaskExecutorPromptSchema( + task="Weather pattern between year 1900 and 2000", + objective="Write a blog post about the weather in London." + ), + ) + + agent_run = AgentRunInput( + inputs=input_params, + agent_deployment=agent_deployments[0], + consumer_id=naptha.user.id, + ) + + response = run(agent_run) + print(response) \ No newline at end of file diff --git a/babyagi_task_finalizer/schemas.py b/babyagi_task_finalizer/schemas.py index 05bc6c0..69cd637 100644 --- a/babyagi_task_finalizer/schemas.py +++ b/babyagi_task_finalizer/schemas.py @@ -1,10 +1,15 @@ +from naptha_sdk.schemas import AgentConfig from pydantic import BaseModel, Field from typing import List -class InputSchema(BaseModel): +class TaskExecutorPromptSchema(BaseModel): task: str objective: str +class InputSchema(BaseModel): + tool_name: str + tool_input_data: TaskExecutorPromptSchema + class Task(BaseModel): """Class for defining a task to be performed.""" name: str = Field(..., description="The name of the task to be performed.") @@ -16,4 +21,7 @@ class TaskFinalizer(BaseModel): """Class for finalizing the tasks.""" final_report: str = Field("", description="The final report of the tasks.") new_tasks: List[Task] = Field([], description="A list of new tasks to be performed.") - objective_met: bool = Field(False, description="The status of the objective. True if the objective have been met, False otherwise.") \ No newline at end of file + objective_met: bool = Field(False, description="The status of the objective. True if the objective have been met, False otherwise.") + +class TaskFinalizerAgentConfig(AgentConfig): + user_message_template: str \ No newline at end of file diff --git a/babyagi_task_finalizer/utils.py b/babyagi_task_finalizer/utils.py deleted file mode 100644 index a25fa26..0000000 --- a/babyagi_task_finalizer/utils.py +++ /dev/null @@ -1,12 +0,0 @@ -import logging - - -def get_logger(name=__name__): - # create logger with formatter and handler - logger = logging.getLogger(name) - logger.setLevel(logging.DEBUG) - formatter = logging.Formatter("%(asctime)s - %(name)s - %(levelname)s - %(message)s") - handler = logging.StreamHandler() - handler.setFormatter(formatter) - logger.addHandler(handler) - return logger \ No newline at end of file diff --git a/pyproject.toml b/pyproject.toml index bc9cf30..c9386fd 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -9,10 +9,12 @@ packages = [ ] [tool.poetry.dependencies] -python = "^3.10" +python = ">=3.10,<=3.13" pydantic = "^2.7.0" litellm = "^1.35.15" -instructor = "^1.3.2" +naptha-sdk = {git = "https://github.com/NapthaAI/naptha-sdk.git"} +python-dotenv = "^1.0.1" +instructor="^1.3.2" [build-system]