You signed in with another tab or window. Reload to refresh your session.You signed out in another tab or window. Reload to refresh your session.You switched accounts on another tab or window. Reload to refresh your session.Dismiss alert
When using a Pydantic class as a reponse_format with fireworks.ai I'm getting an error:
`class BooleanResponse(BaseModel):
result: bool
def test_fireworks_structured_output():
resp = litellm.completion(
model="fireworks_ai/accounts/fireworks/models/llama-v3p2-90b-vision-instruct",
messages=[{"role": "user", "content": "Is 2+2=4?"}],
response_format=BooleanResponse,
)
logger.info(f"\nResponse: {resp}")
`
Based on the Fireworks docs: https://docs.fireworks.ai/structured-responses/structured-response-formatting
I can see this is being passed as the reponse_format:
{
'type': 'json_schema',
'json_schema': {
'schema': {
'properties': {
'result': {
'title': 'Result',
'type': 'boolean'
}
},
'required': [
'result'
],
'title': 'BooleanResponse',
'type': 'object',
'additionalProperties': False
},
'name': 'BooleanResponse',
'strict': True
}
}
Based on the error, we need to change to:
{
'type': 'json_object',
'schema': {
'schema': {
'properties': {
'result': {
'title': 'Result',
'type': 'boolean'
}
},
'required': [
'result'
],
'title': 'BooleanResponse',
'type': 'object',
'additionalProperties': False
},
'name': 'BooleanResponse',
'strict': True
}
}
Relevant log output
LiteLLM completion() model= accounts/fireworks/models/llama-v3p2-90b-vision-instruct; provider = fireworks_ai
2024-11-18 14:09:34,986 LiteLLM INFO -
LiteLLM completion() model= accounts/fireworks/models/llama-v3p2-90b-vision-instruct; provider = fireworks_ai
2024-11-18 14:09:35,298 httpx INFO - HTTP Request: POST https://api.fireworks.ai/inference/v1/chat/completions "HTTP/1.1 400 Bad Request"
mbue/shared/llm/llm_client_test.py:50 (test_structured_output[fireworks_ai/accounts/fireworks/models/llama-v3p2-90b-vision-instruct])
/home/daved/.cache/pypoetry/virtualenvs/mbue-8lo_cb6f-py3.11/lib/python3.11/site-packages/litellm/llms/OpenAI/openai.py:911: in acompletion
headers, response = await self.make_openai_chat_completion_request(
/home/daved/.cache/pypoetry/virtualenvs/mbue-8lo_cb6f-py3.11/lib/python3.11/site-packages/litellm/llms/OpenAI/openai.py:618: in make_openai_chat_completion_request
raise e
/home/daved/.cache/pypoetry/virtualenvs/mbue-8lo_cb6f-py3.11/lib/python3.11/site-packages/litellm/llms/OpenAI/openai.py:606: in make_openai_chat_completion_request
await openai_aclient.chat.completions.with_raw_response.create(
/home/daved/.cache/pypoetry/virtualenvs/mbue-8lo_cb6f-py3.11/lib/python3.11/site-packages/openai/_legacy_response.py:373: in wrapped
return cast(LegacyAPIResponse[R], await func(*args, **kwargs))
/home/daved/.cache/pypoetry/virtualenvs/mbue-8lo_cb6f-py3.11/lib/python3.11/site-packages/openai/resources/chat/completions.py:1661: in create
return await self._post(
/home/daved/.cache/pypoetry/virtualenvs/mbue-8lo_cb6f-py3.11/lib/python3.11/site-packages/openai/_base_client.py:1839: in post
return await self.request(cast_to, opts, stream=stream, stream_cls=stream_cls)
/home/daved/.cache/pypoetry/virtualenvs/mbue-8lo_cb6f-py3.11/lib/python3.11/site-packages/openai/_base_client.py:1533: in request
return await self._request(
/home/daved/.cache/pypoetry/virtualenvs/mbue-8lo_cb6f-py3.11/lib/python3.11/site-packages/openai/_base_client.py:1634: in _request
raise self._make_status_error_from_response(err.response) from None
E openai.BadRequestError: Error code: 400 - {'error': {'object': 'error', 'type': 'invalid_request_error', 'message': "2 request validation errors: Input should be 'text', 'json_object' or 'grammar', field: 'response_format.type', value: 'json_schema'; Extra inputs are not permitted, field: 'response_format.json_schema'"}}
During handling of the above exception, another exception occurred:
/home/daved/.cache/pypoetry/virtualenvs/mbue-8lo_cb6f-py3.11/lib/python3.11/site-packages/litellm/main.py:477: in acompletion
response = await init_response
/home/daved/.cache/pypoetry/virtualenvs/mbue-8lo_cb6f-py3.11/lib/python3.11/site-packages/litellm/llms/OpenAI/openai.py:942: in acompletion
raise OpenAIError(
E litellm.llms.OpenAI.openai.OpenAIError: Error code: 400 - {'error': {'object': 'error', 'type': 'invalid_request_error', 'message': "2 request validation errors: Input should be 'text', 'json_object' or 'grammar', field: 'response_format.type', value: 'json_schema'; Extra inputs are not permitted, field: 'response_format.json_schema'"}}
During handling of the above exception, another exception occurred:
mbue/shared/llm/llm_client_test.py:58: in test_structured_output
resp = await client.chat_response("Is 2+2=4?", response_format=BooleanResponse)
mbue/shared/llm/llm_client.py:79: in chat_response
chat_completion = await self._cached_chat_completion(
mbue/shared/llm/llm_client.py:236: in _cached_chat_completion
result = await self._create_chat_completion_with_retry(base_url, **kwargs)
mbue/shared/llm/llm_client.py:276: in _create_chat_completion_with_retry
return await LLMClient._call_vendor_api_client(
mbue/shared/llm/llm_client.py:311: in _call_vendor_api_client
return await litellm_chat_completion(base_url=base_url, **kwargs)
mbue/shared/llm/litellm_client.py:18: in litellm_chat_completion
response = await acompletion(**cleaned_args)
/home/daved/.cache/pypoetry/virtualenvs/mbue-8lo_cb6f-py3.11/lib/python3.11/site-packages/litellm/utils.py:1173: in wrapper_async
raise e
/home/daved/.cache/pypoetry/virtualenvs/mbue-8lo_cb6f-py3.11/lib/python3.11/site-packages/litellm/utils.py:1029: in wrapper_async
result = await original_function(*args, **kwargs)
/home/daved/.cache/pypoetry/virtualenvs/mbue-8lo_cb6f-py3.11/lib/python3.11/site-packages/litellm/main.py:499: in acompletion
raise exception_type(
/home/daved/.cache/pypoetry/virtualenvs/mbue-8lo_cb6f-py3.11/lib/python3.11/site-packages/litellm/litellm_core_utils/exception_mapping_utils.py:2122: in exception_type
raise e
/home/daved/.cache/pypoetry/virtualenvs/mbue-8lo_cb6f-py3.11/lib/python3.11/site-packages/litellm/litellm_core_utils/exception_mapping_utils.py:282: in exception_type
raise BadRequestError(
E litellm.exceptions.BadRequestError: litellm.BadRequestError: Fireworks_aiException - Error code: 400 - {'error': {'object': 'error', 'type': 'invalid_request_error', 'message': "2 request validation errors: Input should be 'text', 'json_object' or 'grammar', field: 'response_format.type', value: 'json_schema'; Extra inputs are not permitted, field: 'response_format.json_schema'"}}
Twitter / LinkedIn details
No response
The text was updated successfully, but these errors were encountered:
What happened?
When using a Pydantic class as a reponse_format with fireworks.ai I'm getting an error:
`class BooleanResponse(BaseModel):
result: bool
def test_fireworks_structured_output():
resp = litellm.completion(
model="fireworks_ai/accounts/fireworks/models/llama-v3p2-90b-vision-instruct",
messages=[{"role": "user", "content": "Is 2+2=4?"}],
response_format=BooleanResponse,
)
logger.info(f"\nResponse: {resp}")
`
Based on the Fireworks docs: https://docs.fireworks.ai/structured-responses/structured-response-formatting
I can see this is being passed as the reponse_format:
{
'type': 'json_schema',
'json_schema': {
'schema': {
'properties': {
'result': {
'title': 'Result',
'type': 'boolean'
}
},
'required': [
'result'
],
'title': 'BooleanResponse',
'type': 'object',
'additionalProperties': False
},
'name': 'BooleanResponse',
'strict': True
}
}
Based on the error, we need to change to:
{
'type': 'json_object',
'schema': {
'schema': {
'properties': {
'result': {
'title': 'Result',
'type': 'boolean'
}
},
'required': [
'result'
],
'title': 'BooleanResponse',
'type': 'object',
'additionalProperties': False
},
'name': 'BooleanResponse',
'strict': True
}
}
Relevant log output
Twitter / LinkedIn details
No response
The text was updated successfully, but these errors were encountered: