Skip to content

Commit

Permalink
clean up
Browse files Browse the repository at this point in the history
  • Loading branch information
Kye committed Oct 7, 2023
1 parent 6aa7b00 commit 0ab9725
Show file tree
Hide file tree
Showing 22 changed files with 78 additions and 68 deletions.
18 changes: 9 additions & 9 deletions playground/agents/meta_prompter.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,23 +2,23 @@
from swarms.agents.meta_prompter import MetaPrompterAgent
from langchain.llms import OpenAI

#init llm
# init llm
llm = OpenAI()

#init the meta prompter agent that optimized prompts
# init the meta prompter agent that optimized prompts
meta_optimizer = MetaPrompterAgent(llm=llm)

#init the worker agent
# init the worker agent
worker = Worker(llm)

#broad task to complete
# broad task to complete
task = "Create a feedforward in pytorch"

#optimize the prompt
optimized_prompt = meta_optimizer.run(task)
# optimize the prompt
optimized_prompt = meta_optimizer.run(task)

#run the optimized prompt with detailed instructions
# run the optimized prompt with detailed instructions
result = worker.run(optimized_prompt)

#print
print(result)
# print
print(result)
2 changes: 1 addition & 1 deletion swarms/agents/conversabe_agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -977,7 +977,7 @@ def execute_code_blocks(self, code_blocks):
)
elif lang in ["python", "Python"]:
if code.startswith("# filename: "):
filename = code[11: code.find("\n")].strip()
filename = code[11 : code.find("\n")].strip()
else:
filename = None
exitcode, logs, image = self.run_code(
Expand Down
2 changes: 1 addition & 1 deletion swarms/agents/message.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,6 @@ def __init__(self, sender, content, metadata=None):

def __repr__(self):
"""
__repr__ means...
__repr__ means...
"""
return f"{self.timestamp} - {self.sender}: {self.content}"
2 changes: 1 addition & 1 deletion swarms/agents/models/groundingdino/datasets/transforms.py
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,7 @@ def crop(image, target, region):

if "masks" in target:
# FIXME should we update the area here if there are no boxes?
target["masks"] = target["masks"][:, i: i + h, j: j + w]
target["masks"] = target["masks"][:, i : i + h, j : j + w]
fields.append("masks")

# remove elements for which the boxes or masks that have zero area
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -159,7 +159,7 @@ def __init__(
), "Only resnet50 and resnet101 are available."
assert return_interm_indices in [[0, 1, 2, 3], [1, 2, 3], [3]]
num_channels_all = [256, 512, 1024, 2048]
num_channels = num_channels_all[4 - len(return_interm_indices):]
num_channels = num_channels_all[4 - len(return_interm_indices) :]
super().__init__(backbone, train_backbone, num_channels, return_interm_indices)


Expand Down Expand Up @@ -224,7 +224,7 @@ def build_backbone(args):
use_checkpoint=use_checkpoint,
)

bb_num_channels = backbone.num_features[4 - len(return_interm_indices):]
bb_num_channels = backbone.num_features[4 - len(return_interm_indices) :]
else:
raise NotImplementedError("Unknown backbone {}".format(args.backbone))

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -649,7 +649,7 @@ def __init__(
qk_scale=qk_scale,
drop=drop_rate,
attn_drop=attn_drop_rate,
drop_path=dpr[sum(depths[:i_layer]): sum(depths[: i_layer + 1])],
drop_path=dpr[sum(depths[:i_layer]) : sum(depths[: i_layer + 1])],
norm_layer=norm_layer,
# downsample=PatchMerging if (i_layer < self.num_layers - 1) else None,
downsample=downsamplelist[i_layer],
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -221,9 +221,9 @@ def generate_masks_with_special_tokens(tokenized, special_tokens_list, tokenizer
position_ids[row, col] = 0
else:
attention_mask[
row, previous_col + 1: col + 1, previous_col + 1: col + 1
row, previous_col + 1 : col + 1, previous_col + 1 : col + 1
] = True
position_ids[row, previous_col + 1: col + 1] = torch.arange(
position_ids[row, previous_col + 1 : col + 1] = torch.arange(
0, col - previous_col, device=input_ids.device
)

Expand Down Expand Up @@ -273,13 +273,13 @@ def generate_masks_with_special_tokens_and_transfer_map(
position_ids[row, col] = 0
else:
attention_mask[
row, previous_col + 1: col + 1, previous_col + 1: col + 1
row, previous_col + 1 : col + 1, previous_col + 1 : col + 1
] = True
position_ids[row, previous_col + 1: col + 1] = torch.arange(
position_ids[row, previous_col + 1 : col + 1] = torch.arange(
0, col - previous_col, device=input_ids.device
)
c2t_maski = torch.zeros((num_token), device=input_ids.device).bool()
c2t_maski[previous_col + 1: col] = True
c2t_maski[previous_col + 1 : col] = True
cate_to_token_mask_list[row].append(c2t_maski)
previous_col = col

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -76,7 +76,7 @@ def gen_encoder_output_proposals(
proposals = []
_cur = 0
for lvl, (H_, W_) in enumerate(spatial_shapes):
mask_flatten_ = memory_padding_mask[:, _cur: (_cur + H_ * W_)].view(
mask_flatten_ = memory_padding_mask[:, _cur : (_cur + H_ * W_)].view(
N_, H_, W_, 1
)
valid_H = torch.sum(~mask_flatten_[:, :, 0, 0], 1)
Expand Down
2 changes: 1 addition & 1 deletion swarms/agents/models/groundingdino/util/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -619,7 +619,7 @@ def get_phrases_from_posmap(
):
assert isinstance(posmap, torch.Tensor), "posmap must be torch.Tensor"
if posmap.dim() == 1:
posmap[0: left_idx + 1] = False
posmap[0 : left_idx + 1] = False
posmap[right_idx:] = False
non_zero_idx = posmap.nonzero(as_tuple=True)[0].tolist()
token_ids = [tokenized["input_ids"][i] for i in non_zero_idx]
Expand Down
2 changes: 1 addition & 1 deletion swarms/agents/models/groundingdino/util/vl_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -41,7 +41,7 @@ def create_positive_map_from_span(tokenized, token_span, max_text_len=256):
positive_map[j, beg_pos] = 1
break
else:
positive_map[j, beg_pos: end_pos + 1].fill_(1)
positive_map[j, beg_pos : end_pos + 1].fill_(1)

return positive_map / (positive_map.sum(-1)[:, None] + 1e-6)

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -139,7 +139,7 @@ def predict_masks(
# Run the transformer
hs, src = self.transformer(src, pos_src, tokens)
iou_token_out = hs[:, 0, :]
mask_tokens_out = hs[:, 1: (1 + self.num_mask_tokens), :]
mask_tokens_out = hs[:, 1 : (1 + self.num_mask_tokens), :]

# Upscale mask embeddings and predict masks using the mask tokens
src = src.transpose(1, 2).view(b, c, h, w)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -101,7 +101,7 @@ def batch_iterator(batch_size: int, *args) -> Generator[List[Any], None, None]:
), "Batched iteration must have inputs of all the same size."
n_batches = len(args[0]) // batch_size + int(len(args[0]) % batch_size != 0)
for b in range(n_batches):
yield [arg[b * batch_size: (b + 1) * batch_size] for arg in args]
yield [arg[b * batch_size : (b + 1) * batch_size] for arg in args]


def mask_to_rle_pytorch(tensor: torch.Tensor) -> List[Dict[str, Any]]:
Expand Down Expand Up @@ -142,7 +142,7 @@ def rle_to_mask(rle: Dict[str, Any]) -> np.ndarray:
idx = 0
parity = False
for count in rle["counts"]:
mask[idx: idx + count] = parity
mask[idx : idx + count] = parity
idx += count
parity ^= True
mask = mask.reshape(w, h)
Expand Down
4 changes: 2 additions & 2 deletions swarms/agents/multi_modal_visual_agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -207,12 +207,12 @@ def blend_gt2pt(old_image, new_image, sigma=0.15, steps=100):
kernel[steps:-steps, :steps] = left
kernel[steps:-steps, -steps:] = right

pt_gt_img = easy_img[pos_h: pos_h + old_size[1], pos_w: pos_w + old_size[0]]
pt_gt_img = easy_img[pos_h : pos_h + old_size[1], pos_w : pos_w + old_size[0]]
gaussian_gt_img = (
kernel * gt_img_array + (1 - kernel) * pt_gt_img
) # gt img with blur img
gaussian_gt_img = gaussian_gt_img.astype(np.int64)
easy_img[pos_h: pos_h + old_size[1], pos_w: pos_w + old_size[0]] = gaussian_gt_img
easy_img[pos_h : pos_h + old_size[1], pos_w : pos_w + old_size[0]] = gaussian_gt_img
gaussian_img = Image.fromarray(easy_img)
return gaussian_img

Expand Down
2 changes: 1 addition & 1 deletion swarms/agents/multi_modal_workers/omni_agent/omni_chat.py
Original file line number Diff line number Diff line change
Expand Up @@ -317,7 +317,7 @@ def find_json(s):
s = s.replace("'", '"')
start = s.find("{")
end = s.rfind("}")
res = s[start: end + 1]
res = s[start : end + 1]
res = res.replace("\n", "")
return res

Expand Down
8 changes: 4 additions & 4 deletions swarms/embeddings/openai.py
Original file line number Diff line number Diff line change
Expand Up @@ -347,7 +347,7 @@ def _get_len_safe_embeddings(
disallowed_special=self.disallowed_special,
)
for j in range(0, len(token), self.embedding_ctx_length):
tokens.append(token[j: j + self.embedding_ctx_length])
tokens.append(token[j : j + self.embedding_ctx_length])
indices.append(i)

batched_embeddings: List[List[float]] = []
Expand All @@ -366,7 +366,7 @@ def _get_len_safe_embeddings(
for i in _iter:
response = embed_with_retry(
self,
input=tokens[i: i + _chunk_size],
input=tokens[i : i + _chunk_size],
**self._invocation_params,
)
batched_embeddings.extend(r["embedding"] for r in response["data"])
Expand Down Expand Up @@ -428,15 +428,15 @@ async def _aget_len_safe_embeddings(
disallowed_special=self.disallowed_special,
)
for j in range(0, len(token), self.embedding_ctx_length):
tokens.append(token[j: j + self.embedding_ctx_length])
tokens.append(token[j : j + self.embedding_ctx_length])
indices.append(i)

batched_embeddings: List[List[float]] = []
_chunk_size = chunk_size or self.chunk_size
for i in range(0, len(tokens), _chunk_size):
response = await async_embed_with_retry(
self,
input=tokens[i: i + _chunk_size],
input=tokens[i : i + _chunk_size],
**self._invocation_params,
)
batched_embeddings.extend(r["embedding"] for r in response["data"])
Expand Down
4 changes: 2 additions & 2 deletions swarms/models/chat_openai.py
Original file line number Diff line number Diff line change
Expand Up @@ -458,7 +458,7 @@ def get_sub_prompts(
)
params["max_tokens"] = self.max_tokens_for_prompt(prompts[0])
sub_prompts = [
prompts[i: i + self.batch_size]
prompts[i : i + self.batch_size]
for i in range(0, len(prompts), self.batch_size)
]
return sub_prompts
Expand All @@ -469,7 +469,7 @@ def create_llm_result(
"""Create the LLMResult from the choices and prompts."""
generations = []
for i, _ in enumerate(prompts):
sub_choices = choices[i * self.n: (i + 1) * self.n]
sub_choices = choices[i * self.n : (i + 1) * self.n]
generations.append(
[
Generation(
Expand Down
12 changes: 6 additions & 6 deletions swarms/structs/link.py
Original file line number Diff line number Diff line change
Expand Up @@ -50,21 +50,21 @@ def is_lc_serializable(self) -> bool:

prompt: BasePromptTemplate
"""Prompt object to use."""

llm: BaseLanguageModel
"""Language model to call."""

output_key: str = "text" #: :meta private:

output_parser: BaseLLMOutputParser = Field(default_factory=StrOutputParser)
"""Output parser to use.
Defaults to one that takes the most likely string but does not change it
otherwise."""

return_final_only: bool = True
"""Whether to return only the final parsed result. Defaults to True.
If false, will return a bunch of extra information about the generation."""

llm_kwargs: dict = Field(default_factory=dict)

class Config:
Expand Down Expand Up @@ -349,4 +349,4 @@ def _link_type(self) -> str:
def from_string(cls, llm: BaseLanguageModel, template: str) -> Link:
"""Create Link from LLM and template."""
prompt_template = PromptTemplate.from_template(template)
return cls(llm=llm, prompt=prompt_template)
return cls(llm=llm, prompt=prompt_template)
9 changes: 8 additions & 1 deletion swarms/structs/workflow.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@ class Workflow:
They string together multiple tasks of varying types, and can use Short-Term Memory
or pass specific arguments downstream.
Usage
llm = LLM()
workflow = Workflow(llm)
Expand Down Expand Up @@ -47,11 +47,13 @@ def execute(self) -> Any:
return response

def __init__(self, agent, parallel: bool = False):
"""__init__"""
self.agent = agent
self.tasks: List[Workflow.Task] = []
self.parallel = parallel

def add(self, task: str) -> Task:
"""Add a task"""
task = self.Task(task)

if self.last_task():
Expand All @@ -62,12 +64,15 @@ def add(self, task: str) -> Task:
return task

def first_task(self) -> Optional[Task]:
"""Add first task"""
return self.tasks[0] if self.tasks else None

def last_task(self) -> Optional[Task]:
"""Last task"""
return self.tasks[-1] if self.tasks else None

def run(self, *args) -> Task:
"""Run tasks"""
[task.reset() for task in self.tasks]

if self.parallel:
Expand All @@ -79,6 +84,7 @@ def run(self, *args) -> Task:
return self.last_task()

def context(self, task: Task) -> Dict[str, Any]:
"""Context in tasks"""
return {
"parent_output": task.parents[0].output
if task.parents and task.parents[0].output
Expand All @@ -88,6 +94,7 @@ def context(self, task: Task) -> Dict[str, Any]:
}

def __run_from_task(self, task: Optional[Task]) -> None:
"""Run from task"""
if task is None:
return
else:
Expand Down
37 changes: 19 additions & 18 deletions swarms/tools/autogpt.py
Original file line number Diff line number Diff line change
@@ -1,29 +1,30 @@
import interpreter
from transformers import (
BlipForQuestionAnswering,
BlipProcessor,
)
from PIL import Image
import torch
from swarms.utils.logger import logger
from pydantic import Field
from langchain.tools.file_management.write import WriteFileTool
from langchain.tools.file_management.read import ReadFileTool
from langchain.tools import BaseTool
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.chains.qa_with_sources.loading import BaseCombineDocumentsChain
import asyncio
import os

# Tools
from contextlib import contextmanager
from typing import Optional

import interpreter
import pandas as pd
import torch
from langchain.agents import tool
from langchain.agents.agent_toolkits.pandas.base import create_pandas_dataframe_agent
from langchain.chains.qa_with_sources.loading import load_qa_with_sources_chain
from langchain.chains.qa_with_sources.loading import (
BaseCombineDocumentsChain,
load_qa_with_sources_chain,
)
from langchain.docstore.document import Document
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.tools import BaseTool
from langchain.tools.file_management.read import ReadFileTool
from langchain.tools.file_management.write import WriteFileTool
from PIL import Image
from pydantic import Field
from transformers import (
BlipForQuestionAnswering,
BlipProcessor,
)

from swarms.utils.logger import logger

ROOT_DIR = "./data/"

Expand Down Expand Up @@ -128,7 +129,7 @@ def _run(self, url: str, question: str) -> str:
results = []
# TODO: Handle this with a MapReduceChain
for i in range(0, len(web_docs), 4):
input_docs = web_docs[i: i + 4]
input_docs = web_docs[i : i + 4]
window_result = self.qa_chain(
{"input_documents": input_docs, "question": question},
return_only_outputs=True,
Expand Down
Loading

0 comments on commit 0ab9725

Please sign in to comment.