Skip to content

Commit

Permalink
added error handling for agent calls, toast hooks for errors
Browse files Browse the repository at this point in the history
  • Loading branch information
jmagoon committed Dec 19, 2024
1 parent 06908de commit a4394ce
Show file tree
Hide file tree
Showing 14 changed files with 807 additions and 1,192 deletions.
193 changes: 140 additions & 53 deletions frontend/agents/gpt-4_python_compute/generate/computations.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,33 +2,21 @@
import re
import sys
import traceback
from openai import OpenAI
import openai

from langchain.prompts import ChatPromptTemplate
from langchain_openai import ChatOpenAI

# Define system context
openaiSystemContent = """You are an assistant that generates python code and returns it in a way that must follow the template below.
You absolutely need to give a python code section without abbreviation that follows the template. Do not put code lines at the root, but give only the functions and imports.
By default, when requested to do change or add to the code, modify the latest code section. But when the user ask to do it on another section, do so.
In the template, the function compute contains the code and the function test contains a series of call to compute that runs and prints multiple tests.
Don't insert a __main__ section.
Template:
import ...
def compute(in1, in2, in3,...):
'''A textual description of the compute function.'''
#some code
return {{'out1': out1, 'out2': out2, ...}}
def test():
# Call compute multiple times based on a series of inputs. The outputs are then compare with the expected outputs. Print the results and indicate if the tests passed.
"""
def get_fallback_model(original_model):
"""
Returns an appropriate fallback model when the requested model is not available.
Add more fallback cases as needed.
"""
fallback_models = {
'chatgpt-4o-latest': 'gpt-3.5-turbo', # Primary fallback
'gpt-4': 'gpt-3.5-turbo',
'gpt-4-turbo': 'gpt-3.5-turbo',
}

return fallback_models.get(original_model, 'gpt-3.5-turbo') # Default fallback

def extract_python_code(response):
"""
Expand Down Expand Up @@ -58,62 +46,161 @@ def extract_python_code(response):
# Combine and return all processed code blocks
return "\n\n".join(processed_code_blocks)

def make_api_call(client, model_version, messages, original_model=None):
"""
Makes the API call with fallback handling
"""
try:
response = client.chat.completions.create(
model=model_version,
messages=messages,
temperature=0.7
)

# Check if response is valid
if not response or not response.choices or len(response.choices) == 0:
raise Exception("Empty response received from OpenAI API")

return response, model_version

except openai.NotFoundError as e:
# Model not found, attempt fallback
if original_model is None:
fallback_model = get_fallback_model(model_version)
return make_api_call(client, fallback_model, messages, original_model=model_version)
raise
except openai.APIError:
raise

def compute(user_prompt, model_version, conversation_history, apiKey):
# Use only the last entry from the history
if conversation_history:
conversation_history = [conversation_history[-1]]
# Initialize the OpenAI client
client = OpenAI(api_key=apiKey)

# System message content
system_content = """You are an assistant that generates python code and returns it in a way that must follow the template below.
You absolutely need to give a python code section without abbreviation that follows the template. Do not put code lines at the root, but give only the functions and imports.
By default, when requested to do change or add to the code, modify the latest code section. But when the user ask to do it on another section, do so.
# Escape special characters or handle raw strings in conversation history
escaped_history = []
for entry in conversation_history:
# Example of escaping curly braces
prompt = entry['prompt'].replace("{", "{{").replace("}", "}}")
response = entry['response'].replace("{", "{{").replace("}", "}}")
escaped_history.append(("user", prompt))
escaped_history.append(("assistant", response))
In the template, the function compute contains the code and the function test contains a series of call to compute that runs and prints multiple tests.
# Use the escaped history for constructing messages
messages = [("system", openaiSystemContent)] + escaped_history
messages.append(("user", "{text}"))
Don't insert a __main__ section.
# Create a ChatPromptTemplate from the messages
chat_prompt = ChatPromptTemplate.from_messages(messages)
Template:
import ...
# Initialize the ChatOpenAI model
chat_model = ChatOpenAI(openai_api_key=apiKey, model=model_version)
chain = chat_prompt | chat_model
def compute(in1, in2, in3,...):
'''A textual description of the compute function.'''
# Query
response = chain.invoke({"text": user_prompt})
#some code
return {'out1': out1, 'out2': out2, ...}
# Keep only the python code
code = extract_python_code(response.content)
def test():
# Call compute multiple times based on a series of inputs. The outputs are then compare with the expected outputs. Print the results and indicate if the tests passed.
"""

# Prepare messages array
messages = [{"role": "system", "content": system_content}]

return {'response': code, 'model': model_version}
# Add conversation history if it exists (only the last entry)
if conversation_history:
last_entry = conversation_history[-1]
messages.append({"role": "user", "content": last_entry['prompt']})
messages.append({"role": "assistant", "content": last_entry['response']})

# Add the current user prompt
messages.append({"role": "user", "content": user_prompt})

try:
# Make the API call with fallback handling
response, used_model = make_api_call(client, model_version, messages)

# Extract the response content
response_content = response.choices[0].message.content

# Keep only the python code
code = extract_python_code(response_content)

# Verify that code is not empty
if not code or code.isspace():
raise Exception("No valid Python code was generated")

return {'response': code, 'model': used_model}

except openai.APIConnectionError as e:
raise Exception(f"The server could not be reached: {str(e.__cause__)}")
except openai.RateLimitError as e:
raise Exception("Rate limit exceeded. Please try again later.")
except openai.AuthenticationError as e:
raise Exception("Invalid API key provided.")
except openai.BadRequestError as e:
raise Exception(f"Bad request: {str(e)}")
except openai.PermissionDeniedError as e:
raise Exception(f"Permission denied: {str(e)}")
except openai.NotFoundError as e:
raise Exception(f"Resource not found: {str(e)}")
except openai.UnprocessableEntityError as e:
raise Exception(f"Unprocessable entity: {str(e)}")
except openai.InternalServerError as e:
raise Exception("OpenAI server error. Please try again later.")
except Exception as e:
raise Exception(f"Unexpected error: {str(e)}")

if __name__ == "__main__":
try:
# Read JSON string from stdin
input_json = sys.stdin.read()

# Parse the JSON input
data = json.loads(input_json)

# Extract the arguments from the parsed JSON
user_prompt = data['userMessage']
# model_version = data.get('selectedModel', 'gpt-4')
conversation_history = data.get('conversationHistory', [])
apiKey = data["apiKey"]

# Validate API key
if not apiKey or not isinstance(apiKey, str) or len(apiKey.strip()) == 0:
raise Exception("Invalid or missing API key")

# Validate user prompt
if not user_prompt or not isinstance(user_prompt, str) or len(user_prompt.strip()) == 0:
raise Exception("Invalid or empty user prompt")

# Call the compute function and get the result
result = compute(user_prompt, 'gpt-4o', conversation_history, apiKey)
result = compute(user_prompt, 'chatgpt-4o-latest', conversation_history, apiKey)

# Print the result as a JSON string
print(json.dumps(result))
except Exception as e:
# Capture and print the full stack trace
error_traceback = traceback.format_exc()
print(json.dumps({"error": str(e), "traceback": error_traceback}))

# Determine the appropriate status code
status_code = 500 # Default to internal server error
if isinstance(e, openai.RateLimitError):
status_code = 429
elif isinstance(e, openai.AuthenticationError):
status_code = 401
elif isinstance(e, openai.PermissionDeniedError):
status_code = 403
elif isinstance(e, openai.NotFoundError):
status_code = 404
elif isinstance(e, openai.BadRequestError):
status_code = 400
elif isinstance(e, openai.UnprocessableEntityError):
status_code = 422
elif "Invalid or missing API key" in str(e):
status_code = 401
elif "Invalid or empty user prompt" in str(e):
status_code = 400

error_response = {
"error": {
"message": str(e),
"status_code": status_code,
"traceback": error_traceback,
"type": e.__class__.__name__
}
}
print(json.dumps(error_response))
Loading

0 comments on commit a4394ce

Please sign in to comment.