diff --git a/playground/agents/meta_prompter.py b/playground/agents/meta_prompter.py index b6eec5fa1..c517acb1a 100644 --- a/playground/agents/meta_prompter.py +++ b/playground/agents/meta_prompter.py @@ -2,23 +2,23 @@ from swarms.agents.meta_prompter import MetaPrompterAgent from langchain.llms import OpenAI -#init llm +# init llm llm = OpenAI() -#init the meta prompter agent that optimized prompts +# init the meta prompter agent that optimized prompts meta_optimizer = MetaPrompterAgent(llm=llm) -#init the worker agent +# init the worker agent worker = Worker(llm) -#broad task to complete +# broad task to complete task = "Create a feedforward in pytorch" -#optimize the prompt -optimized_prompt = meta_optimizer.run(task) +# optimize the prompt +optimized_prompt = meta_optimizer.run(task) -#run the optimized prompt with detailed instructions +# run the optimized prompt with detailed instructions result = worker.run(optimized_prompt) -#print -print(result) \ No newline at end of file +# print +print(result) diff --git a/swarms/agents/conversabe_agent.py b/swarms/agents/conversabe_agent.py index 1ef2f6470..35808c4bc 100644 --- a/swarms/agents/conversabe_agent.py +++ b/swarms/agents/conversabe_agent.py @@ -977,7 +977,7 @@ def execute_code_blocks(self, code_blocks): ) elif lang in ["python", "Python"]: if code.startswith("# filename: "): - filename = code[11: code.find("\n")].strip() + filename = code[11 : code.find("\n")].strip() else: filename = None exitcode, logs, image = self.run_code( diff --git a/swarms/agents/message.py b/swarms/agents/message.py index 823405e34..20a90fe57 100644 --- a/swarms/agents/message.py +++ b/swarms/agents/message.py @@ -23,6 +23,6 @@ def __init__(self, sender, content, metadata=None): def __repr__(self): """ - __repr__ means... + __repr__ means... """ return f"{self.timestamp} - {self.sender}: {self.content}" diff --git a/swarms/agents/models/groundingdino/datasets/transforms.py b/swarms/agents/models/groundingdino/datasets/transforms.py index 5d6d2cfd7..c34a14531 100644 --- a/swarms/agents/models/groundingdino/datasets/transforms.py +++ b/swarms/agents/models/groundingdino/datasets/transforms.py @@ -38,7 +38,7 @@ def crop(image, target, region): if "masks" in target: # FIXME should we update the area here if there are no boxes? - target["masks"] = target["masks"][:, i: i + h, j: j + w] + target["masks"] = target["masks"][:, i : i + h, j : j + w] fields.append("masks") # remove elements for which the boxes or masks that have zero area diff --git a/swarms/agents/models/groundingdino/models/GroundingDINO/backbone/backbone.py b/swarms/agents/models/groundingdino/models/GroundingDINO/backbone/backbone.py index 91e74de41..a56f369ea 100644 --- a/swarms/agents/models/groundingdino/models/GroundingDINO/backbone/backbone.py +++ b/swarms/agents/models/groundingdino/models/GroundingDINO/backbone/backbone.py @@ -159,7 +159,7 @@ def __init__( ), "Only resnet50 and resnet101 are available." assert return_interm_indices in [[0, 1, 2, 3], [1, 2, 3], [3]] num_channels_all = [256, 512, 1024, 2048] - num_channels = num_channels_all[4 - len(return_interm_indices):] + num_channels = num_channels_all[4 - len(return_interm_indices) :] super().__init__(backbone, train_backbone, num_channels, return_interm_indices) @@ -224,7 +224,7 @@ def build_backbone(args): use_checkpoint=use_checkpoint, ) - bb_num_channels = backbone.num_features[4 - len(return_interm_indices):] + bb_num_channels = backbone.num_features[4 - len(return_interm_indices) :] else: raise NotImplementedError("Unknown backbone {}".format(args.backbone)) diff --git a/swarms/agents/models/groundingdino/models/GroundingDINO/backbone/swin_transformer.py b/swarms/agents/models/groundingdino/models/GroundingDINO/backbone/swin_transformer.py index b476627ef..1a74ca36a 100644 --- a/swarms/agents/models/groundingdino/models/GroundingDINO/backbone/swin_transformer.py +++ b/swarms/agents/models/groundingdino/models/GroundingDINO/backbone/swin_transformer.py @@ -649,7 +649,7 @@ def __init__( qk_scale=qk_scale, drop=drop_rate, attn_drop=attn_drop_rate, - drop_path=dpr[sum(depths[:i_layer]): sum(depths[: i_layer + 1])], + drop_path=dpr[sum(depths[:i_layer]) : sum(depths[: i_layer + 1])], norm_layer=norm_layer, # downsample=PatchMerging if (i_layer < self.num_layers - 1) else None, downsample=downsamplelist[i_layer], diff --git a/swarms/agents/models/groundingdino/models/GroundingDINO/bertwarper.py b/swarms/agents/models/groundingdino/models/GroundingDINO/bertwarper.py index 7a46aa703..2ad9c0208 100644 --- a/swarms/agents/models/groundingdino/models/GroundingDINO/bertwarper.py +++ b/swarms/agents/models/groundingdino/models/GroundingDINO/bertwarper.py @@ -221,9 +221,9 @@ def generate_masks_with_special_tokens(tokenized, special_tokens_list, tokenizer position_ids[row, col] = 0 else: attention_mask[ - row, previous_col + 1: col + 1, previous_col + 1: col + 1 + row, previous_col + 1 : col + 1, previous_col + 1 : col + 1 ] = True - position_ids[row, previous_col + 1: col + 1] = torch.arange( + position_ids[row, previous_col + 1 : col + 1] = torch.arange( 0, col - previous_col, device=input_ids.device ) @@ -273,13 +273,13 @@ def generate_masks_with_special_tokens_and_transfer_map( position_ids[row, col] = 0 else: attention_mask[ - row, previous_col + 1: col + 1, previous_col + 1: col + 1 + row, previous_col + 1 : col + 1, previous_col + 1 : col + 1 ] = True - position_ids[row, previous_col + 1: col + 1] = torch.arange( + position_ids[row, previous_col + 1 : col + 1] = torch.arange( 0, col - previous_col, device=input_ids.device ) c2t_maski = torch.zeros((num_token), device=input_ids.device).bool() - c2t_maski[previous_col + 1: col] = True + c2t_maski[previous_col + 1 : col] = True cate_to_token_mask_list[row].append(c2t_maski) previous_col = col diff --git a/swarms/agents/models/groundingdino/models/GroundingDINO/utils.py b/swarms/agents/models/groundingdino/models/GroundingDINO/utils.py index 9488f8272..2bb3e9b8b 100644 --- a/swarms/agents/models/groundingdino/models/GroundingDINO/utils.py +++ b/swarms/agents/models/groundingdino/models/GroundingDINO/utils.py @@ -76,7 +76,7 @@ def gen_encoder_output_proposals( proposals = [] _cur = 0 for lvl, (H_, W_) in enumerate(spatial_shapes): - mask_flatten_ = memory_padding_mask[:, _cur: (_cur + H_ * W_)].view( + mask_flatten_ = memory_padding_mask[:, _cur : (_cur + H_ * W_)].view( N_, H_, W_, 1 ) valid_H = torch.sum(~mask_flatten_[:, :, 0, 0], 1) diff --git a/swarms/agents/models/groundingdino/util/utils.py b/swarms/agents/models/groundingdino/util/utils.py index 90af343dc..7a0815ef0 100644 --- a/swarms/agents/models/groundingdino/util/utils.py +++ b/swarms/agents/models/groundingdino/util/utils.py @@ -619,7 +619,7 @@ def get_phrases_from_posmap( ): assert isinstance(posmap, torch.Tensor), "posmap must be torch.Tensor" if posmap.dim() == 1: - posmap[0: left_idx + 1] = False + posmap[0 : left_idx + 1] = False posmap[right_idx:] = False non_zero_idx = posmap.nonzero(as_tuple=True)[0].tolist() token_ids = [tokenized["input_ids"][i] for i in non_zero_idx] diff --git a/swarms/agents/models/groundingdino/util/vl_utils.py b/swarms/agents/models/groundingdino/util/vl_utils.py index 44ff4d5e4..4fd8592c9 100644 --- a/swarms/agents/models/groundingdino/util/vl_utils.py +++ b/swarms/agents/models/groundingdino/util/vl_utils.py @@ -41,7 +41,7 @@ def create_positive_map_from_span(tokenized, token_span, max_text_len=256): positive_map[j, beg_pos] = 1 break else: - positive_map[j, beg_pos: end_pos + 1].fill_(1) + positive_map[j, beg_pos : end_pos + 1].fill_(1) return positive_map / (positive_map.sum(-1)[:, None] + 1e-6) diff --git a/swarms/agents/models/segment_anything/segment_anything/modeling/mask_decoder.py b/swarms/agents/models/segment_anything/segment_anything/modeling/mask_decoder.py index 35170835c..f94bee1fa 100644 --- a/swarms/agents/models/segment_anything/segment_anything/modeling/mask_decoder.py +++ b/swarms/agents/models/segment_anything/segment_anything/modeling/mask_decoder.py @@ -139,7 +139,7 @@ def predict_masks( # Run the transformer hs, src = self.transformer(src, pos_src, tokens) iou_token_out = hs[:, 0, :] - mask_tokens_out = hs[:, 1: (1 + self.num_mask_tokens), :] + mask_tokens_out = hs[:, 1 : (1 + self.num_mask_tokens), :] # Upscale mask embeddings and predict masks using the mask tokens src = src.transpose(1, 2).view(b, c, h, w) diff --git a/swarms/agents/models/segment_anything/segment_anything/utils/amg.py b/swarms/agents/models/segment_anything/segment_anything/utils/amg.py index cb67232aa..be064071e 100644 --- a/swarms/agents/models/segment_anything/segment_anything/utils/amg.py +++ b/swarms/agents/models/segment_anything/segment_anything/utils/amg.py @@ -101,7 +101,7 @@ def batch_iterator(batch_size: int, *args) -> Generator[List[Any], None, None]: ), "Batched iteration must have inputs of all the same size." n_batches = len(args[0]) // batch_size + int(len(args[0]) % batch_size != 0) for b in range(n_batches): - yield [arg[b * batch_size: (b + 1) * batch_size] for arg in args] + yield [arg[b * batch_size : (b + 1) * batch_size] for arg in args] def mask_to_rle_pytorch(tensor: torch.Tensor) -> List[Dict[str, Any]]: @@ -142,7 +142,7 @@ def rle_to_mask(rle: Dict[str, Any]) -> np.ndarray: idx = 0 parity = False for count in rle["counts"]: - mask[idx: idx + count] = parity + mask[idx : idx + count] = parity idx += count parity ^= True mask = mask.reshape(w, h) diff --git a/swarms/agents/multi_modal_visual_agent.py b/swarms/agents/multi_modal_visual_agent.py index b0172431a..68941ef05 100644 --- a/swarms/agents/multi_modal_visual_agent.py +++ b/swarms/agents/multi_modal_visual_agent.py @@ -207,12 +207,12 @@ def blend_gt2pt(old_image, new_image, sigma=0.15, steps=100): kernel[steps:-steps, :steps] = left kernel[steps:-steps, -steps:] = right - pt_gt_img = easy_img[pos_h: pos_h + old_size[1], pos_w: pos_w + old_size[0]] + pt_gt_img = easy_img[pos_h : pos_h + old_size[1], pos_w : pos_w + old_size[0]] gaussian_gt_img = ( kernel * gt_img_array + (1 - kernel) * pt_gt_img ) # gt img with blur img gaussian_gt_img = gaussian_gt_img.astype(np.int64) - easy_img[pos_h: pos_h + old_size[1], pos_w: pos_w + old_size[0]] = gaussian_gt_img + easy_img[pos_h : pos_h + old_size[1], pos_w : pos_w + old_size[0]] = gaussian_gt_img gaussian_img = Image.fromarray(easy_img) return gaussian_img diff --git a/swarms/agents/multi_modal_workers/omni_agent/omni_chat.py b/swarms/agents/multi_modal_workers/omni_agent/omni_chat.py index 18d875785..2198af259 100644 --- a/swarms/agents/multi_modal_workers/omni_agent/omni_chat.py +++ b/swarms/agents/multi_modal_workers/omni_agent/omni_chat.py @@ -317,7 +317,7 @@ def find_json(s): s = s.replace("'", '"') start = s.find("{") end = s.rfind("}") - res = s[start: end + 1] + res = s[start : end + 1] res = res.replace("\n", "") return res diff --git a/swarms/embeddings/openai.py b/swarms/embeddings/openai.py index 2eba8c718..230dade90 100644 --- a/swarms/embeddings/openai.py +++ b/swarms/embeddings/openai.py @@ -347,7 +347,7 @@ def _get_len_safe_embeddings( disallowed_special=self.disallowed_special, ) for j in range(0, len(token), self.embedding_ctx_length): - tokens.append(token[j: j + self.embedding_ctx_length]) + tokens.append(token[j : j + self.embedding_ctx_length]) indices.append(i) batched_embeddings: List[List[float]] = [] @@ -366,7 +366,7 @@ def _get_len_safe_embeddings( for i in _iter: response = embed_with_retry( self, - input=tokens[i: i + _chunk_size], + input=tokens[i : i + _chunk_size], **self._invocation_params, ) batched_embeddings.extend(r["embedding"] for r in response["data"]) @@ -428,7 +428,7 @@ async def _aget_len_safe_embeddings( disallowed_special=self.disallowed_special, ) for j in range(0, len(token), self.embedding_ctx_length): - tokens.append(token[j: j + self.embedding_ctx_length]) + tokens.append(token[j : j + self.embedding_ctx_length]) indices.append(i) batched_embeddings: List[List[float]] = [] @@ -436,7 +436,7 @@ async def _aget_len_safe_embeddings( for i in range(0, len(tokens), _chunk_size): response = await async_embed_with_retry( self, - input=tokens[i: i + _chunk_size], + input=tokens[i : i + _chunk_size], **self._invocation_params, ) batched_embeddings.extend(r["embedding"] for r in response["data"]) diff --git a/swarms/models/chat_openai.py b/swarms/models/chat_openai.py index 7ffc9136b..380623c36 100644 --- a/swarms/models/chat_openai.py +++ b/swarms/models/chat_openai.py @@ -458,7 +458,7 @@ def get_sub_prompts( ) params["max_tokens"] = self.max_tokens_for_prompt(prompts[0]) sub_prompts = [ - prompts[i: i + self.batch_size] + prompts[i : i + self.batch_size] for i in range(0, len(prompts), self.batch_size) ] return sub_prompts @@ -469,7 +469,7 @@ def create_llm_result( """Create the LLMResult from the choices and prompts.""" generations = [] for i, _ in enumerate(prompts): - sub_choices = choices[i * self.n: (i + 1) * self.n] + sub_choices = choices[i * self.n : (i + 1) * self.n] generations.append( [ Generation( diff --git a/swarms/structs/link.py b/swarms/structs/link.py index 6529c8be6..cb1ac2af9 100644 --- a/swarms/structs/link.py +++ b/swarms/structs/link.py @@ -50,21 +50,21 @@ def is_lc_serializable(self) -> bool: prompt: BasePromptTemplate """Prompt object to use.""" - + llm: BaseLanguageModel """Language model to call.""" - + output_key: str = "text" #: :meta private: - + output_parser: BaseLLMOutputParser = Field(default_factory=StrOutputParser) """Output parser to use. Defaults to one that takes the most likely string but does not change it otherwise.""" - + return_final_only: bool = True """Whether to return only the final parsed result. Defaults to True. If false, will return a bunch of extra information about the generation.""" - + llm_kwargs: dict = Field(default_factory=dict) class Config: @@ -349,4 +349,4 @@ def _link_type(self) -> str: def from_string(cls, llm: BaseLanguageModel, template: str) -> Link: """Create Link from LLM and template.""" prompt_template = PromptTemplate.from_template(template) - return cls(llm=llm, prompt=prompt_template) \ No newline at end of file + return cls(llm=llm, prompt=prompt_template) diff --git a/swarms/structs/workflow.py b/swarms/structs/workflow.py index 5a0c35aa0..f6753748c 100644 --- a/swarms/structs/workflow.py +++ b/swarms/structs/workflow.py @@ -11,7 +11,7 @@ class Workflow: They string together multiple tasks of varying types, and can use Short-Term Memory or pass specific arguments downstream. - + Usage llm = LLM() workflow = Workflow(llm) @@ -47,11 +47,13 @@ def execute(self) -> Any: return response def __init__(self, agent, parallel: bool = False): + """__init__""" self.agent = agent self.tasks: List[Workflow.Task] = [] self.parallel = parallel def add(self, task: str) -> Task: + """Add a task""" task = self.Task(task) if self.last_task(): @@ -62,12 +64,15 @@ def add(self, task: str) -> Task: return task def first_task(self) -> Optional[Task]: + """Add first task""" return self.tasks[0] if self.tasks else None def last_task(self) -> Optional[Task]: + """Last task""" return self.tasks[-1] if self.tasks else None def run(self, *args) -> Task: + """Run tasks""" [task.reset() for task in self.tasks] if self.parallel: @@ -79,6 +84,7 @@ def run(self, *args) -> Task: return self.last_task() def context(self, task: Task) -> Dict[str, Any]: + """Context in tasks""" return { "parent_output": task.parents[0].output if task.parents and task.parents[0].output @@ -88,6 +94,7 @@ def context(self, task: Task) -> Dict[str, Any]: } def __run_from_task(self, task: Optional[Task]) -> None: + """Run from task""" if task is None: return else: diff --git a/swarms/tools/autogpt.py b/swarms/tools/autogpt.py index 3d3e66001..1755d2590 100644 --- a/swarms/tools/autogpt.py +++ b/swarms/tools/autogpt.py @@ -1,29 +1,30 @@ -import interpreter -from transformers import ( - BlipForQuestionAnswering, - BlipProcessor, -) -from PIL import Image -import torch -from swarms.utils.logger import logger -from pydantic import Field -from langchain.tools.file_management.write import WriteFileTool -from langchain.tools.file_management.read import ReadFileTool -from langchain.tools import BaseTool -from langchain.text_splitter import RecursiveCharacterTextSplitter -from langchain.chains.qa_with_sources.loading import BaseCombineDocumentsChain import asyncio import os - -# Tools from contextlib import contextmanager from typing import Optional +import interpreter import pandas as pd +import torch from langchain.agents import tool from langchain.agents.agent_toolkits.pandas.base import create_pandas_dataframe_agent -from langchain.chains.qa_with_sources.loading import load_qa_with_sources_chain +from langchain.chains.qa_with_sources.loading import ( + BaseCombineDocumentsChain, + load_qa_with_sources_chain, +) from langchain.docstore.document import Document +from langchain.text_splitter import RecursiveCharacterTextSplitter +from langchain.tools import BaseTool +from langchain.tools.file_management.read import ReadFileTool +from langchain.tools.file_management.write import WriteFileTool +from PIL import Image +from pydantic import Field +from transformers import ( + BlipForQuestionAnswering, + BlipProcessor, +) + +from swarms.utils.logger import logger ROOT_DIR = "./data/" @@ -128,7 +129,7 @@ def _run(self, url: str, question: str) -> str: results = [] # TODO: Handle this with a MapReduceChain for i in range(0, len(web_docs), 4): - input_docs = web_docs[i: i + 4] + input_docs = web_docs[i : i + 4] window_result = self.qa_chain( {"input_documents": input_docs, "question": question}, return_only_outputs=True, diff --git a/swarms/tools/developer.py b/swarms/tools/developer.py index 062f463bc..04e4b30a7 100644 --- a/swarms/tools/developer.py +++ b/swarms/tools/developer.py @@ -306,7 +306,7 @@ def execute(self) -> str: @staticmethod def from_str(command: str) -> "WriteCommand": filepath = command.split(WriteCommand.separator)[0] - return WriteCommand(filepath, command[len(filepath) + 1:]) + return WriteCommand(filepath, command[len(filepath) + 1 :]) class CodeWriter: @@ -433,7 +433,7 @@ def execute(self) -> str: if self.start == self.end: code = code[self.start - 1] else: - code = "".join(code[self.start - 1: self.end]) + code = "".join(code[self.start - 1 : self.end]) return code @staticmethod @@ -590,9 +590,9 @@ def execute(self) -> Tuple[int, int]: lines[self.start.line] = ( lines[self.start.line][: self.start.col] + self.content - + lines[self.end.line][self.end.col:] + + lines[self.end.line][self.end.col :] ) - lines = lines[: self.start.line + 1] + lines[self.end.line + 1:] + lines = lines[: self.start.line + 1] + lines[self.end.line + 1 :] after = self.write_lines(lines) diff --git a/swarms/utils/main.py b/swarms/utils/main.py index 369c59675..6e5907b28 100644 --- a/swarms/utils/main.py +++ b/swarms/utils/main.py @@ -365,7 +365,7 @@ def handle(self, url: str) -> str: try: if url.startswith(os.environ.get("SERVER", "http://localhost:8000")): local_filepath = url[ - len(os.environ.get("SERVER", "http://localhost:8000")) + 1: + len(os.environ.get("SERVER", "http://localhost:8000")) + 1 : ] local_filename = Path("file") / local_filepath.split("/")[-1] src = self.path / local_filepath diff --git a/swarms/workers/worker.py b/swarms/workers/worker.py index 8d8d28eb1..996b694e7 100644 --- a/swarms/workers/worker.py +++ b/swarms/workers/worker.py @@ -1,18 +1,20 @@ +from typing import Dict, List, Optional, Union + import faiss from langchain.docstore import InMemoryDocstore from langchain.embeddings import OpenAIEmbeddings from langchain.tools.human.tool import HumanInputRun from langchain.vectorstores import FAISS from langchain_experimental.autonomous_agents import AutoGPT -from typing import Dict, List, Optional, Union + from swarms.agents.message import Message from swarms.tools.autogpt import ( ReadFileTool, + WebpageQATool, WriteFileTool, compile, - process_csv, load_qa_with_sources_chain, - WebpageQATool, + process_csv, ) from swarms.utils.decorators import error_decorator, log_decorator, timing_decorator