Skip to content

Commit

Permalink
Fix/typo (microsoft#1034)
Browse files Browse the repository at this point in the history
* fix: typo

* fix: typo

* fix: typo of function name

* fix: typo of function name of test file

* Update test_token_count.py

---------

Co-authored-by: Eric Zhu <[email protected]>
  • Loading branch information
KazooTTT and ekzhu authored Dec 22, 2023
1 parent a1e60e8 commit a122ffe
Show file tree
Hide file tree
Showing 36 changed files with 70 additions and 70 deletions.
2 changes: 1 addition & 1 deletion autogen/agentchat/assistant_agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -70,7 +70,7 @@ def __init__(
**kwargs,
)

# Update the provided desciption if None, and we are using the default system_message,
# Update the provided description if None, and we are using the default system_message,
# then use the default description.
if description is None:
if system_message == self.DEFAULT_SYSTEM_MESSAGE:
Expand Down
6 changes: 3 additions & 3 deletions autogen/agentchat/contrib/compressible_agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -225,7 +225,7 @@ def _manage_history_on_token_limit(self, messages, token_used, max_token_allowed
# 1. mode = "TERMINATE", terminate the agent if no token left.
if self.compress_config["mode"] == "TERMINATE":
if max_token_allowed - token_used <= 0:
# Teminate if no token left.
# Terminate if no token left.
print(
colored(
f'Warning: Terminate Agent "{self.name}" due to no token left for oai reply. max token for {model}: {max_token_allowed}, existing token count: {token_used}',
Expand Down Expand Up @@ -320,7 +320,7 @@ def on_oai_token_limit(
cmsg["role"] = "user"
sender._oai_messages[self][i] = cmsg

# sucessfully compressed, return False, None for generate_oai_reply to be called with the updated messages
# successfully compressed, return False, None for generate_oai_reply to be called with the updated messages
return False, None
return final, None

Expand All @@ -332,7 +332,7 @@ def compress_messages(
"""Compress a list of messages into one message.
The first message (the initial prompt) will not be compressed.
The rest of the messages will be compressed into one message, the model is asked to distinuish the role of each message: USER, ASSISTANT, FUNCTION_CALL, FUNCTION_RETURN.
The rest of the messages will be compressed into one message, the model is asked to distinguish the role of each message: USER, ASSISTANT, FUNCTION_CALL, FUNCTION_RETURN.
Check out the compress_sys_msg.
TODO: model used in compression agent is different from assistant agent: For example, if original model used by is gpt-4; we start compressing at 70% of usage, 70% of 8092 = 5664; and we use gpt 3.5 here max_toke = 4096, it will raise error. choosinng model automatically?
Expand Down
4 changes: 2 additions & 2 deletions autogen/agentchat/contrib/gpt_assistant_agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -203,7 +203,7 @@ def _get_run_response(self, thread, run):
new_messages.append(
{
"role": msg.role,
"content": f"Recieved file id={content.image_file.file_id}",
"content": f"Received file id={content.image_file.file_id}",
}
)
return new_messages
Expand All @@ -219,7 +219,7 @@ def _get_run_response(self, thread, run):
}

logger.info(
"Intermediate executing(%s, Sucess: %s) : %s",
"Intermediate executing(%s, Success: %s) : %s",
tool_response["name"],
is_exec_success,
tool_response["content"],
Expand Down
2 changes: 1 addition & 1 deletion autogen/agentchat/contrib/img_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@ def get_image_data(image_file: str, use_b64=True) -> bytes:
return content


def llava_formater(prompt: str, order_image_tokens: bool = False) -> Tuple[str, List[str]]:
def llava_formatter(prompt: str, order_image_tokens: bool = False) -> Tuple[str, List[str]]:
"""
Formats the input prompt by replacing image tags and returns the new prompt along with image locations.
Expand Down
4 changes: 2 additions & 2 deletions autogen/agentchat/contrib/llava_agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@
from regex import R

from autogen.agentchat.agent import Agent
from autogen.agentchat.contrib.img_utils import get_image_data, llava_formater
from autogen.agentchat.contrib.img_utils import get_image_data, llava_formatter
from autogen.agentchat.contrib.multimodal_conversable_agent import MultimodalConversableAgent
from autogen.code_utils import content_str

Expand Down Expand Up @@ -162,7 +162,7 @@ def llava_call(prompt: str, llm_config: dict) -> str:
Makes a call to the LLaVA service to generate text based on a given prompt
"""

prompt, images = llava_formater(prompt, order_image_tokens=False)
prompt, images = llava_formatter(prompt, order_image_tokens=False)

for im in images:
if len(im) == 0:
Expand Down
4 changes: 2 additions & 2 deletions autogen/agentchat/conversable_agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -676,7 +676,7 @@ def generate_code_execution_reply(
else:
messages_to_scan += 1

# iterate through the last n messages reversly
# iterate through the last n messages reversely
# if code blocks are found, execute the code blocks and return the output
# if no code blocks are found, continue
for i in range(min(len(messages), messages_to_scan)):
Expand Down Expand Up @@ -1292,7 +1292,7 @@ def update_function_signature(self, func_sig: Union[str, Dict], is_remove: None)
Args:
func_sig (str or dict): description/name of the function to update/remove to the model. See: https://platform.openai.com/docs/api-reference/chat/create#chat/create-functions
is_remove: whether removing the funciton from llm_config with name 'func_sig'
is_remove: whether removing the function from llm_config with name 'func_sig'
"""

if not self.llm_config:
Expand Down
6 changes: 3 additions & 3 deletions autogen/oai/client.py
Original file line number Diff line number Diff line change
Expand Up @@ -136,7 +136,7 @@ def _separate_create_config(self, config):
return create_config, extra_kwargs

def _client(self, config, openai_config):
"""Create a client with the given config to overrdie openai_config,
"""Create a client with the given config to override openai_config,
after removing extra kwargs.
"""
openai_config = {**openai_config, **{k: v for k, v in config.items() if k in self.openai_kwargs}}
Expand Down Expand Up @@ -244,7 +244,7 @@ def yes_or_no_filter(context, response):
try:
response.cost
except AttributeError:
# update atrribute if cost is not calculated
# update attribute if cost is not calculated
response.cost = self.cost(response)
cache.set(key, response)
self._update_usage_summary(response, use_cache=True)
Expand Down Expand Up @@ -349,7 +349,7 @@ def _completions_create(self, client, params):
def _update_usage_summary(self, response: ChatCompletion | Completion, use_cache: bool) -> None:
"""Update the usage summary.
Usage is calculated no mattter filter is passed or not.
Usage is calculated no matter filter is passed or not.
"""

def update_usage(usage_summary):
Expand Down
2 changes: 1 addition & 1 deletion autogen/oai/completion.py
Original file line number Diff line number Diff line change
Expand Up @@ -430,7 +430,7 @@ def _eval(cls, config: dict, prune=True, eval_only=False):
if previous_num_completions:
n_tokens_list[i] += n_output_tokens
responses_list[i].extend(responses)
# Assumption 1: assuming requesting n1, n2 responses separatively then combining them
# Assumption 1: assuming requesting n1, n2 responses separately then combining them
# is the same as requesting (n1+n2) responses together
else:
n_tokens_list.append(n_output_tokens)
Expand Down
2 changes: 1 addition & 1 deletion samples/apps/autogen-assistant/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -77,7 +77,7 @@ Now that you have AutoGen Assistant installed and running, you are ready to expl
This demo focuses on the research assistant use case with some generalizations:
- **Skills**: The agent is provided with a list of skills that it can leverage while attempting to address a user's query. Each skill is a python function that may be in any file in a folder made availabe to the agents. We separate the concept of global skills available to all agents `backend/files/global_utlis_dir` and user level skills `backend/files/user/<user_hash>/utils_dir`, relevant in a multi user environment. Agents are aware skills as they are appended to the system message. A list of example skills is available in the `backend/global_utlis_dir` folder. Modify the file or create a new file with a function in the same directory to create new global skills.
- **Skills**: The agent is provided with a list of skills that it can leverage while attempting to address a user's query. Each skill is a python function that may be in any file in a folder made available to the agents. We separate the concept of global skills available to all agents `backend/files/global_utlis_dir` and user level skills `backend/files/user/<user_hash>/utils_dir`, relevant in a multi user environment. Agents are aware skills as they are appended to the system message. A list of example skills is available in the `backend/global_utlis_dir` folder. Modify the file or create a new file with a function in the same directory to create new global skills.

- **Conversation Persistence**: Conversation history is persisted in an sqlite database `database.sqlite`.

Expand Down
2 changes: 1 addition & 1 deletion samples/apps/autogen-assistant/autogenra/utils/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -250,7 +250,7 @@ def get_all_skills(user_skills_path: str, global_skills_path: str, dest_dir: str
}

if dest_dir:
# chcek if dest_dir exists
# check if dest_dir exists
if not os.path.exists(dest_dir):
os.makedirs(dest_dir)
# copy all skills to dest_dir
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -94,7 +94,7 @@ const Header = ({ meta, link }: any) => {
return (
<div
key={index + "linkrow"}
className={`text-primary items-center hover:text-accent hovder:bg-secondary px-1 pt-1 block text-sm font-medium `}
className={`text-primary items-center hover:text-accent hover:bg-secondary px-1 pt-1 block text-sm font-medium `}
>
<Link
className="hover:text-accent h-full flex flex-col"
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -169,7 +169,7 @@ export const ColorTween = (
endColor: string,
percent: number
) => {
// exaple startColor = "#ff0000" endColor = "#0000ff" percent = 0.5
// example startColor = "#ff0000" endColor = "#0000ff" percent = 0.5
const start = {
r: parseInt(startColor.substring(1, 3), 16),
g: parseInt(startColor.substring(3, 5), 16),
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -129,7 +129,7 @@ const GalleryView = ({ location }: any) => {
</div>
<div className="text-xs">
{" "}
{item.messages.length} messsage{item.messages.length > 1 && "s"}
{item.messages.length} message{item.messages.length > 1 && "s"}
</div>
<div className="my-2 border-t border-dashed w-full pt-2 inline-flex gap-2 ">
<TagsView tags={item.tags} />{" "}
Expand Down
4 changes: 2 additions & 2 deletions samples/tools/testbed/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@ This Testbed sample has been tested in, and is known to work with, Autogen versi

## Setup

Before you begin, you must configure your API keys for use with the Testbed. As with other Autogen applications, the Testbed will look for the OpenAI keys in a file in the current working directy, or environment variable named, OAI_CONFIG_LIST. This can be overrriden using a command-line parameter described later.
Before you begin, you must configure your API keys for use with the Testbed. As with other Autogen applications, the Testbed will look for the OpenAI keys in a file in the current working directory, or environment variable named, OAI_CONFIG_LIST. This can be overridden using a command-line parameter described later.

For some scenarios, additional keys may be required (e.g., keys for the Bing Search API). These can be added to an `ENV` file in the `includes` folder. A sample has been provided in ``includes/ENV.example``. Edit ``includes/ENV`` as needed.

Expand Down Expand Up @@ -56,7 +56,7 @@ options:

## Results

By default, the Testbed stores results in a folder heirarchy with the following template:
By default, the Testbed stores results in a folder hierarchy with the following template:

``./results/[scenario]/[instance_id]/[repetition]``

Expand Down
10 changes: 5 additions & 5 deletions samples/tools/testbed/run_scenarios.py
Original file line number Diff line number Diff line change
Expand Up @@ -119,7 +119,7 @@ def expand_scenario(scenario_dir, scenario, output_dir):

template = scenario["template"]

# Either key works for finding the substiturions list. "values" may be deprecated in the future
# Either key works for finding the substitutions list. "values" may be deprecated in the future
substitutions = scenario["substitutions"] if "substitutions" in scenario else scenario["values"]

# Older versions are only one-level deep. Convert them,
Expand Down Expand Up @@ -159,7 +159,7 @@ def expand_scenario(scenario_dir, scenario, output_dir):
# If the destination is a directory, use the same filename
shutil.copyfile(src_path, os.path.join(dest_path, os.path.basename(src_path)))
else:
# Otherwuse use the filename provided
# Otherwise use the filename provided
shutil.copyfile(src_path, dest_path)

# Expand templated files
Expand All @@ -184,7 +184,7 @@ def run_scenario_natively(work_dir):
Run a scenario in the native environment.
Args:
work_dir (path): the path to the working directory previously created to house this sceario instance
work_dir (path): the path to the working directory previously created to house this scenario instance
"""

# Get the current working directory
Expand Down Expand Up @@ -253,7 +253,7 @@ def run_scenario_in_docker(work_dir, requirements, timeout=600, docker_image=Non
Run a scenario in a Docker environment.
Args:
work_dir (path): the path to the working directory previously created to house this sceario instance
work_dir (path): the path to the working directory previously created to house this scenario instance
timeout (Optional, int): the number of seconds to allow a Docker container to run before timing out
"""

Expand Down Expand Up @@ -462,7 +462,7 @@ def build_default_docker_image(docker_client, image_tag):
if not os.path.isfile(req_file):
raise FileNotFoundError(errno.ENOENT, os.strerror(errno.ENOENT), req_file)

# Warn aboit a common error
# Warn about a common error
env_file = os.path.join(GLOBAL_INCLUDES_DIR, "ENV")
example_file = os.path.join(GLOBAL_INCLUDES_DIR, "ENV.example")
if not os.path.isfile(env_file):
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@
testbed_utils.init()

work_dir = "coding"
target_folder = "__TARGET_FOLDER__" # path to the arifact folder
target_folder = "__TARGET_FOLDER__" # path to the artifact folder

config_list = config_list_from_json("OAI_CONFIG_LIST", filter_dict={"model": ["__MODEL__"]})

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -28,5 +28,5 @@
]
},
"name": "AnswerQuestionSmallCsv",
"task": "Read and analyze 'file1.csv', then answer the quetion: How much was spent on utilities in total? Write the answer in file 'output.txt'."
"task": "Read and analyze 'file1.csv', then answer the question: How much was spent on utilities in total? Write the answer in file 'output.txt'."
}
2 changes: 1 addition & 1 deletion samples/tools/testbed/scenarios/MATH/problems_to_json.py
Original file line number Diff line number Diff line change
Expand Up @@ -46,7 +46,7 @@ def problem_to_json():
with open("problems.jsonl", "w") as f:
for i, problem in enumerate(problems):
# a = {
# 'id': f'problem{i}',
# 'id': problem{i}',
# 'template': 'scenario.py',
# 'substitutions': {
# '__PROMPT__': problem,
Expand Down
2 changes: 1 addition & 1 deletion samples/tools/testbed/utils/collate_autogpt.py
Original file line number Diff line number Diff line change
Expand Up @@ -93,7 +93,7 @@ def collate(results_dir="results"):
Test_M, x_M0, x_M1, ..., X_MN
Where x_ij is the number of AsssitantAgent conversation turns needed to pass all the tests for problem i, in Trial/repetition j. If the agent was not able to pass the tests by the end of the conversation, the value will be -1. If data for the trial is missing, the value will be an empty string "".
Where x_ij is the number of AssistantAgent conversation turns needed to pass all the tests for problem i, in Trial/repetition j. If the agent was not able to pass the tests by the end of the conversation, the value will be -1. If data for the trial is missing, the value will be an empty string "".
""".strip(),
formatter_class=argparse.RawTextHelpFormatter,
)
Expand Down
4 changes: 2 additions & 2 deletions samples/tools/testbed/utils/collate_gaia_csv.py
Original file line number Diff line number Diff line change
Expand Up @@ -27,15 +27,15 @@ def collate(results_dir):
for test_id in os.listdir(results_dir):
test_path = os.path.join(results_dir, test_id)

# Collect the reslts vector
# Collect the results vector
results = [test_id]

instance = 0
instance_dir = os.path.join(test_path, str(instance))
while os.path.isdir(instance_dir):
expected_answer_file = os.path.join(instance_dir, "expected_answer.txt")
if not os.path.isfile(expected_answer_file):
# Expected ansewr is missing
# Expected answer is missing
results.append("")

instance += 1
Expand Down
6 changes: 3 additions & 3 deletions samples/tools/testbed/utils/collate_gaia_jsonl.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,10 +15,10 @@ def normalize_answer(a):

def collate(results_dir, instance=0):
"""
Collate the results of running GAIA. Print the results in the format acceped by the leaderboard.
Collate the results of running GAIA. Print the results in the format accepted by the leaderboard.
Args:
results_dir (path): The folder were results were be saved.
results_dir (path): The folder where results were be saved.
"""

for test_id in os.listdir(results_dir):
Expand Down Expand Up @@ -61,7 +61,7 @@ def collate(results_dir, instance=0):
description=f"""
{script_name} will collate the results of the GAIA scenarios into the jsonl format that can be submit to the GAIA leaderboard.
NOTE: You will likely need to concatenate resuls for level 1, level 2 and level 3 to form a complete submission.
NOTE: You will likely need to concatenate results for level 1, level 2 and level 3 to form a complete submission.
""".strip(),
formatter_class=argparse.RawTextHelpFormatter,
)
Expand Down
2 changes: 1 addition & 1 deletion samples/tools/testbed/utils/collate_human_eval.py
Original file line number Diff line number Diff line change
Expand Up @@ -83,7 +83,7 @@ def collate(results_dir):
HumanEval_M, x_M0, x_M1, ..., X_MN
Where x_ij is the number of AsssitantAgent conversation turns needed to pass all the tests for problem i, in Trial/repetition j. If the agent was not able to pass the tests by the end of the conversation, the value will be -1. If data for the trial is missing, the value will be an empty string "".
Where x_ij is the number of AssistantAgent conversation turns needed to pass all the tests for problem i, in Trial/repetition j. If the agent was not able to pass the tests by the end of the conversation, the value will be -1. If data for the trial is missing, the value will be an empty string "".
""".strip(),
formatter_class=argparse.RawTextHelpFormatter,
)
Expand Down
4 changes: 2 additions & 2 deletions test/agentchat/contrib/test_compressible_agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -138,7 +138,7 @@ def constrain_num_messages(messages):
sys.platform in ["darwin", "win32"] or not OPENAI_INSTALLED,
reason="do not run on MacOS or windows or dependency is not installed",
)
def test_compress_messsage():
def test_compress_message():
assistant = CompressibleAgent(
name="assistant",
llm_config={
Expand Down Expand Up @@ -202,5 +202,5 @@ def test_mode_terminate():
if __name__ == "__main__":
test_mode_compress()
test_mode_customized()
test_compress_messsage()
test_compress_message()
test_mode_terminate()
6 changes: 3 additions & 3 deletions test/agentchat/contrib/test_gpt_assistant.py
Original file line number Diff line number Diff line change
Expand Up @@ -42,7 +42,7 @@ def test_gpt_assistant_chat():
},
"required": ["question"],
},
"description": "This is an API endpoint allowing users (analysts) to input question about GitHub in text format to retrieve the realted and structured data.",
"description": "This is an API endpoint allowing users (analysts) to input question about GitHub in text format to retrieve the related and structured data.",
}

name = "For test_gpt_assistant_chat"
Expand Down Expand Up @@ -202,13 +202,13 @@ def test_get_assistant_files():
)

files = assistant.openai_client.beta.assistants.files.list(assistant_id=assistant.assistant_id)
retrived_file_ids = [fild.id for fild in files]
retrieved_file_ids = [fild.id for fild in files]
expected_file_id = file.id

assistant.delete_assistant()
openai_client.files.delete(file.id)

assert expected_file_id in retrived_file_ids
assert expected_file_id in retrieved_file_ids


@pytest.mark.skipif(
Expand Down
Loading

0 comments on commit a122ffe

Please sign in to comment.