diff --git a/agentverse/environments/tasksolving_env/rules/executor/tool_using.py b/agentverse/environments/tasksolving_env/rules/executor/tool_using.py index 9ae70a78d..e9aa5f856 100644 --- a/agentverse/environments/tasksolving_env/rules/executor/tool_using.py +++ b/agentverse/environments/tasksolving_env/rules/executor/tool_using.py @@ -1,6 +1,8 @@ import json import ast -import openai +from openai import AsyncOpenAI + +aclient = AsyncOpenAI() from string import Template from colorama import Fore from aiohttp import ClientSession @@ -219,43 +221,41 @@ async def _summarize_webpage(webpage, question): ) for _ in range(3): try: - response = await openai.ChatCompletion.acreate( - messages=[{"role": "user", "content": summarize_prompt}], - model="gpt-3.5-turbo-16k", - functions=[ - { - "name": "parse_web_text", - "description": "Parse the text of the webpage based on tthe question. Extract all related infomation about `Question` from the webpage. ! Don't provide information that is not shown in the webpage! ! Don't provide your own opinion!", - "parameters": { - "type": "object", - "properties": { - "summary": { - "type": "string", - "description": "Summary of the webpage with 50 words. Make sure all important information about `Question` is included. ! Don't provide information that is not shown in the webpage! ! Don't provide your own opinion!", - }, - "related_details": { + response = await aclient.chat.completions.create(messages=[{"role": "user", "content": summarize_prompt}], + model="gpt-3.5-turbo-16k", + functions=[ + { + "name": "parse_web_text", + "description": "Parse the text of the webpage based on tthe question. Extract all related infomation about `Question` from the webpage. ! Don't provide information that is not shown in the webpage! ! Don't provide your own opinion!", + "parameters": { + "type": "object", + "properties": { + "summary": { + "type": "string", + "description": "Summary of the webpage with 50 words. Make sure all important information about `Question` is included. ! Don't provide information that is not shown in the webpage! ! Don't provide your own opinion!", + }, + "related_details": { + "type": "string", + "description": "List all webpage details related to the question. Maximum 400 words. ! Don't provide information that is not shown in the webpage! ! Don't provide your own opinion!", + }, + "useful_hyperlinks": { + "type": "array", + "description": "Maximum 3 items. Select useful hyperlinks in the webpage that related to the question. Make sure the url is useful for further browse. Don't provide repeated hyperlinks.", + "items": { "type": "string", - "description": "List all webpage details related to the question. Maximum 400 words. ! Don't provide information that is not shown in the webpage! ! Don't provide your own opinion!", - }, - "useful_hyperlinks": { - "type": "array", - "description": "Maximum 3 items. Select useful hyperlinks in the webpage that related to the question. Make sure the url is useful for further browse. Don't provide repeated hyperlinks.", - "items": { - "type": "string", - "description": "! Don't provide hyperlinks that is not shown in the webpage! ! Don't provide your own opinion!", - }, + "description": "! Don't provide hyperlinks that is not shown in the webpage! ! Don't provide your own opinion!", }, }, - "required": [ - "summary", - "related_details", - "useful_hyperlinks", - ], }, - } - ], - function_call={"name": "parse_web_text"}, - ) + "required": [ + "summary", + "related_details", + "useful_hyperlinks", + ], + }, + } + ], + function_call={"name": "parse_web_text"}) except Exception as e: logger.error("Failed to call the tool. Exception: " + str(e)) continue diff --git a/agentverse/llms/openai.py b/agentverse/llms/openai.py index c119dcd80..d5e59ee78 100644 --- a/agentverse/llms/openai.py +++ b/agentverse/llms/openai.py @@ -17,31 +17,25 @@ from .base import BaseChatModel, BaseCompletionModel, BaseModelArgs from .utils.jsonrepair import JsonRepair -try: - import openai - from openai.error import OpenAIError -except ImportError: - is_openai_available = False - logger.warn("openai package is not installed") +import openai + +# openai.proxy = os.environ.get("http_proxy") +# if openai.proxy is None: +# openai.proxy = os.environ.get("HTTP_PROXY") +if os.environ.get("OPENAI_API_KEY") != None: + openai.api_key = os.environ.get("OPENAI_API_KEY") + is_openai_available = True +elif os.environ.get("AZURE_OPENAI_API_KEY") != None: + openai.api_type = "azure" + openai.api_key = os.environ.get("AZURE_OPENAI_API_KEY") + openai.api_base = os.environ.get("AZURE_OPENAI_API_BASE") + openai.api_version = "2023-05-15" + is_openai_available = True else: - # openai.proxy = os.environ.get("http_proxy") - # if openai.proxy is None: - # openai.proxy = os.environ.get("HTTP_PROXY") - if os.environ.get("OPENAI_API_KEY") != None: - openai.api_key = os.environ.get("OPENAI_API_KEY") - is_openai_available = True - elif os.environ.get("AZURE_OPENAI_API_KEY") != None: - openai.api_type = "azure" - openai.api_key = os.environ.get("AZURE_OPENAI_API_KEY") - openai.api_base = os.environ.get("AZURE_OPENAI_API_BASE") - openai.api_version = "2023-05-15" - is_openai_available = True - else: - logger.warn( - "OpenAI API key is not set. Please set the environment variable OPENAI_API_KEY" - ) - is_openai_available = False - + logger.warn( + "OpenAI API key is not set. Please set the environment variable OPENAI_API_KEY" + ) + is_openai_available = False class OpenAIChatArgs(BaseModelArgs): model: str = Field(default="gpt-3.5-turbo") @@ -148,11 +142,9 @@ def generate_response( try: # Execute function call if functions != []: - response = openai.ChatCompletion.create( - messages=messages, - functions=functions, - **self.args.dict(), - ) + response = client.chat.completions.create(messages=messages, + functions=functions, + **self.args.dict()) if response["choices"][0]["message"].get("function_call") is not None: self.collect_metrics(response) return LLMResult( @@ -179,10 +171,8 @@ def generate_response( ) else: - response = openai.ChatCompletion.create( - messages=messages, - **self.args.dict(), - ) + response = client.chat.completions.create(messages=messages, + **self.args.dict()) self.collect_metrics(response) return LLMResult( content=response["choices"][0]["message"]["content"], @@ -212,11 +202,9 @@ async def agenerate_response( if functions != []: async with ClientSession(trust_env=True) as session: openai.aiosession.set(session) - response = await openai.ChatCompletion.acreate( - messages=messages, - functions=functions, - **self.args.dict(), - ) + response = await aclient.chat.completions.create(messages=messages, + functions=functions, + **self.args.dict()) if response["choices"][0]["message"].get("function_call") is not None: function_name = response["choices"][0]["message"]["function_call"][ "name" @@ -280,10 +268,8 @@ async def agenerate_response( else: async with ClientSession(trust_env=True) as session: openai.aiosession.set(session) - response = await openai.ChatCompletion.acreate( - messages=messages, - **self.args.dict(), - ) + response = await aclient.chat.completions.create(messages=messages, + **self.args.dict()) self.collect_metrics(response) return LLMResult( content=response["choices"][0]["message"]["content"], @@ -352,13 +338,9 @@ def get_embedding(text: str, attempts=3) -> np.array: try: text = text.replace("\n", " ") if openai.api_type == "azure": - embedding = openai.Embedding.create( - input=[text], deployment_id="text-embedding-ada-002" - )["data"][0]["embedding"] + embedding = client.embeddings.create(input=[text], deployment_id="text-embedding-ada-002")["data"][0]["embedding"] else: - embedding = openai.Embedding.create( - input=[text], model="text-embedding-ada-002" - )["data"][0]["embedding"] + embedding = client.embeddings.create(input=[text], model="text-embedding-ada-002")["data"][0]["embedding"] return tuple(embedding) except Exception as e: attempt += 1 diff --git a/agentverse/memory/chat_history.py b/agentverse/memory/chat_history.py index 9d07b18ce..cc976ef20 100644 --- a/agentverse/memory/chat_history.py +++ b/agentverse/memory/chat_history.py @@ -1,7 +1,9 @@ import json import logging import os -import openai +from openai import AsyncOpenAI + +aclient = AsyncOpenAI() import copy from typing import List, Optional, Tuple, Dict @@ -206,12 +208,10 @@ async def _update_summary_with_batch( summary=self.summary, new_events=new_events_batch ) - self.summary = await openai.ChatCompletion.acreate( - messages=[{"role": "user", "content": prompt}], - model=model, - max_tokens=max_summary_length, - temperature=0.5, - )["choices"][0]["message"]["content"] + self.summary = await aclient.chat.completions.create(messages=[{"role": "user", "content": prompt}], + model=model, + max_tokens=max_summary_length, + temperature=0.5)["choices"][0]["message"]["content"] def summary_message(self) -> dict: return { diff --git a/requirements.txt b/requirements.txt index 0a93efec9..120faa740 100644 --- a/requirements.txt +++ b/requirements.txt @@ -3,7 +3,7 @@ fastapi==0.95.1 uvicorn py3langid setuptools-scm -openai==0.27.8 +openai==1.6.1 opencv-python==4.8.0.76 gradio httpx[socks]==0.25.0 @@ -11,7 +11,7 @@ astunparse langchain==0.0.157 scikit-learn pydantic==1.10.7 -typing-extensions==4.5.0 +typing-extensions==4.9.0 typing-inspect==0.8.0 colorlog rapidfuzz diff --git a/scripts/evaluate_responsegen.py b/scripts/evaluate_responsegen.py index 07b497ae3..03dcca0a7 100644 --- a/scripts/evaluate_responsegen.py +++ b/scripts/evaluate_responsegen.py @@ -2,7 +2,9 @@ import json from string import Template import time -import openai +from openai import OpenAI + +client = OpenAI() from tqdm import tqdm with open("./results.jsonl", "r") as f: @@ -50,11 +52,9 @@ def write_eval_to_file(file, skip=0): ) for i in range(100): try: - eval_response = openai.ChatCompletion.create( - model="gpt-4", - messages=[{"role": "user", "content": prompt}], - temperature=0.0, - ) + eval_response = client.chat.completions.create(model="gpt-4", + messages=[{"role": "user", "content": prompt}], + temperature=0.0) except: time.sleep(min(i**2, 60)) continue