-
Notifications
You must be signed in to change notification settings - Fork 1.9k
Add logprobs to ModelSettings #971
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
base: main
Are you sure you want to change the base?
Changes from all commits
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -109,6 +109,20 @@ english_agent = Agent( | |
) | ||
``` | ||
|
||
You can also request token log probabilities when using the Responses API by | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Once this property is available at the top level, it will be listed here, which is linked from this page. So, could you revert this one too? |
||
setting `top_logprobs` in `ModelSettings`. | ||
|
||
```python | ||
from agents import Agent, ModelSettings | ||
|
||
agent = Agent( | ||
name="English agent", | ||
instructions="You only speak English", | ||
model="gpt-4o", | ||
model_settings=ModelSettings(top_logprobs=2), | ||
) | ||
``` | ||
|
||
## Common issues with using other LLM providers | ||
|
||
### Tracing client error 401 | ||
|
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -17,9 +17,9 @@ | |
class _OmitTypeAnnotation: | ||
@classmethod | ||
def __get_pydantic_core_schema__( | ||
cls, | ||
_source_type: Any, | ||
_handler: GetCoreSchemaHandler, | ||
cls, | ||
_source_type: Any, | ||
_handler: GetCoreSchemaHandler, | ||
) -> core_schema.CoreSchema: | ||
def validate_from_none(value: None) -> _Omit: | ||
return _Omit() | ||
|
@@ -39,13 +39,14 @@ def validate_from_none(value: None) -> _Omit: | |
from_none_schema, | ||
] | ||
), | ||
serialization=core_schema.plain_serializer_function_ser_schema( | ||
lambda instance: None | ||
), | ||
serialization=core_schema.plain_serializer_function_ser_schema(lambda instance: None), | ||
) | ||
|
||
|
||
Omit = Annotated[_Omit, _OmitTypeAnnotation] | ||
Headers: TypeAlias = Mapping[str, Union[str, Omit]] | ||
|
||
|
||
@dataclass | ||
class ModelSettings: | ||
"""Settings to use when calling an LLM. | ||
|
@@ -107,6 +108,10 @@ class ModelSettings: | |
"""Additional output data to include in the model response. | ||
[include parameter](https://platform.openai.com/docs/api-reference/responses/create#responses-create-include)""" | ||
|
||
top_logprobs: int | None = None | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. this would be used in not only responses, but LiteLLM and Chat Completions, so can you update the following files as well?
|
||
"""Number of top tokens to return logprobs for. Setting this will | ||
automatically include ``"message.output_text.logprobs"`` in the response.""" | ||
|
||
extra_query: Query | None = None | ||
"""Additional query fields to provide with the request. | ||
Defaults to None if not provided.""" | ||
|
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,50 @@ | ||
import pytest | ||
from openai.types.responses.response_usage import InputTokensDetails, OutputTokensDetails | ||
|
||
from agents import ModelSettings, ModelTracing, OpenAIResponsesModel | ||
|
||
|
||
class DummyResponses: | ||
async def create(self, **kwargs): | ||
self.kwargs = kwargs | ||
|
||
class DummyResponse: | ||
id = "dummy" | ||
output = [] | ||
usage = type( | ||
"Usage", | ||
(), | ||
{ | ||
"input_tokens": 0, | ||
"output_tokens": 0, | ||
"total_tokens": 0, | ||
"input_tokens_details": InputTokensDetails(cached_tokens=0), | ||
"output_tokens_details": OutputTokensDetails(reasoning_tokens=0), | ||
}, | ||
)() | ||
|
||
return DummyResponse() | ||
|
||
|
||
class DummyClient: | ||
def __init__(self): | ||
self.responses = DummyResponses() | ||
|
||
|
||
@pytest.mark.allow_call_model_methods | ||
@pytest.mark.asyncio | ||
async def test_top_logprobs_param_passed(): | ||
client = DummyClient() | ||
model = OpenAIResponsesModel(model="gpt-4", openai_client=client) # type: ignore | ||
await model.get_response( | ||
system_instructions=None, | ||
input="hi", | ||
model_settings=ModelSettings(top_logprobs=2), | ||
tools=[], | ||
output_schema=None, | ||
handoffs=[], | ||
tracing=ModelTracing.DISABLED, | ||
previous_response_id=None, | ||
) | ||
assert client.responses.kwargs["top_logprobs"] == 2 | ||
assert "message.output_text.logprobs" in client.responses.kwargs["include"] |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
We generate translated docs using script, so please revert this change.