diff --git a/.release-please-manifest.json b/.release-please-manifest.json index d12300ea76..5c9b107c0d 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "1.98.0" + ".": "1.99.0" } \ No newline at end of file diff --git a/.stats.yml b/.stats.yml index e7fb0bdf9b..f86fa668b1 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,4 +1,4 @@ configured_endpoints: 111 -openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-721e6ccaa72205ee14c71f8163129920464fb814b95d3df9567a9476bbd9b7fb.yml -openapi_spec_hash: 2115413a21df8b5bf9e4552a74df4312 -config_hash: 9606bb315a193bfd8da0459040143242 +openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-d6a16b25b969c3e5382e7d413de15bf83d5f7534d5c3ecce64d3a7e847418f9e.yml +openapi_spec_hash: 0c0bcf4aee9ca2a948dd14b890dfe728 +config_hash: aeff9289bd7f8c8482e4d738c3c2fde1 diff --git a/CHANGELOG.md b/CHANGELOG.md index 669d5a5792..e7a49bcc9a 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,24 @@ # Changelog +## 1.99.0 (2025-08-05) + +Full Changelog: [v1.98.0...v1.99.0](https://github.com/openai/openai-python/compare/v1.98.0...v1.99.0) + +### Features + +* **api:** manual updates ([d4aa726](https://github.com/openai/openai-python/commit/d4aa72602bf489ef270154b881b3967d497d4220)) +* **client:** support file upload requests ([0772e6e](https://github.com/openai/openai-python/commit/0772e6ed8310e15539610b003dd73f72f474ec0c)) + + +### Bug Fixes + +* add missing prompt_cache_key & prompt_cache_key params ([00b49ae](https://github.com/openai/openai-python/commit/00b49ae8d44ea396ac0536fc3ce4658fc669e2f5)) + + +### Chores + +* **internal:** fix ruff target version ([aa6b252](https://github.com/openai/openai-python/commit/aa6b252ae0f25f195dede15755e05dd2f542f42d)) + ## 1.98.0 (2025-07-30) Full Changelog: [v1.97.2...v1.98.0](https://github.com/openai/openai-python/compare/v1.97.2...v1.98.0) diff --git a/api.md b/api.md index 0280b886d1..657ac0905a 100644 --- a/api.md +++ b/api.md @@ -792,12 +792,12 @@ from openai.types.responses import ( ResponsePrompt, ResponseQueuedEvent, ResponseReasoningItem, - ResponseReasoningSummaryDeltaEvent, - ResponseReasoningSummaryDoneEvent, ResponseReasoningSummaryPartAddedEvent, ResponseReasoningSummaryPartDoneEvent, ResponseReasoningSummaryTextDeltaEvent, ResponseReasoningSummaryTextDoneEvent, + ResponseReasoningTextDeltaEvent, + ResponseReasoningTextDoneEvent, ResponseRefusalDeltaEvent, ResponseRefusalDoneEvent, ResponseStatus, diff --git a/pyproject.toml b/pyproject.toml index 6765611fc2..5e0f1fe3ea 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [project] name = "openai" -version = "1.98.0" +version = "1.99.0" description = "The official Python library for the openai API" dynamic = ["readme"] license = "Apache-2.0" @@ -177,7 +177,7 @@ reportPrivateUsage = false [tool.ruff] line-length = 120 output-format = "grouped" -target-version = "py37" +target-version = "py38" [tool.ruff.format] docstring-code-format = true diff --git a/src/openai/_base_client.py b/src/openai/_base_client.py index 3fe669259f..f71e00f51f 100644 --- a/src/openai/_base_client.py +++ b/src/openai/_base_client.py @@ -534,7 +534,10 @@ def _build_request( is_body_allowed = options.method.lower() != "get" if is_body_allowed: - kwargs["json"] = json_data if is_given(json_data) else None + if isinstance(json_data, bytes): + kwargs["content"] = json_data + else: + kwargs["json"] = json_data if is_given(json_data) else None kwargs["files"] = files else: headers.pop("Content-Type", None) diff --git a/src/openai/_files.py b/src/openai/_files.py index 801a0d2928..7b23ca084a 100644 --- a/src/openai/_files.py +++ b/src/openai/_files.py @@ -69,12 +69,12 @@ def _transform_file(file: FileTypes) -> HttpxFileTypes: return file if is_tuple_t(file): - return (file[0], _read_file_content(file[1]), *file[2:]) + return (file[0], read_file_content(file[1]), *file[2:]) raise TypeError(f"Expected file types input to be a FileContent type or to be a tuple") -def _read_file_content(file: FileContent) -> HttpxFileContent: +def read_file_content(file: FileContent) -> HttpxFileContent: if isinstance(file, os.PathLike): return pathlib.Path(file).read_bytes() return file @@ -111,12 +111,12 @@ async def _async_transform_file(file: FileTypes) -> HttpxFileTypes: return file if is_tuple_t(file): - return (file[0], await _async_read_file_content(file[1]), *file[2:]) + return (file[0], await async_read_file_content(file[1]), *file[2:]) raise TypeError(f"Expected file types input to be a FileContent type or to be a tuple") -async def _async_read_file_content(file: FileContent) -> HttpxFileContent: +async def async_read_file_content(file: FileContent) -> HttpxFileContent: if isinstance(file, os.PathLike): return await anyio.Path(file).read_bytes() diff --git a/src/openai/_version.py b/src/openai/_version.py index ca890665bc..a5c9b3df71 100644 --- a/src/openai/_version.py +++ b/src/openai/_version.py @@ -1,4 +1,4 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. __title__ = "openai" -__version__ = "1.98.0" # x-release-please-version +__version__ = "1.99.0" # x-release-please-version diff --git a/src/openai/resources/chat/completions/completions.py b/src/openai/resources/chat/completions/completions.py index c851851418..cd1cb2bd7f 100644 --- a/src/openai/resources/chat/completions/completions.py +++ b/src/openai/resources/chat/completions/completions.py @@ -101,7 +101,9 @@ def parse( parallel_tool_calls: bool | NotGiven = NOT_GIVEN, prediction: Optional[ChatCompletionPredictionContentParam] | NotGiven = NOT_GIVEN, presence_penalty: Optional[float] | NotGiven = NOT_GIVEN, + prompt_cache_key: str | NotGiven = NOT_GIVEN, reasoning_effort: Optional[ReasoningEffort] | NotGiven = NOT_GIVEN, + safety_identifier: str | NotGiven = NOT_GIVEN, seed: Optional[int] | NotGiven = NOT_GIVEN, service_tier: Optional[Literal["auto", "default", "flex", "scale", "priority"]] | NotGiven = NOT_GIVEN, stop: Union[Optional[str], List[str], None] | NotGiven = NOT_GIVEN, @@ -197,8 +199,10 @@ def parser(raw_completion: ChatCompletion) -> ParsedChatCompletion[ResponseForma "parallel_tool_calls": parallel_tool_calls, "prediction": prediction, "presence_penalty": presence_penalty, + "prompt_cache_key": prompt_cache_key, "reasoning_effort": reasoning_effort, "response_format": _type_to_response_format(response_format), + "safety_identifier": safety_identifier, "seed": seed, "service_tier": service_tier, "stop": stop, @@ -1378,7 +1382,9 @@ def stream( parallel_tool_calls: bool | NotGiven = NOT_GIVEN, prediction: Optional[ChatCompletionPredictionContentParam] | NotGiven = NOT_GIVEN, presence_penalty: Optional[float] | NotGiven = NOT_GIVEN, + prompt_cache_key: str | NotGiven = NOT_GIVEN, reasoning_effort: Optional[ReasoningEffort] | NotGiven = NOT_GIVEN, + safety_identifier: str | NotGiven = NOT_GIVEN, seed: Optional[int] | NotGiven = NOT_GIVEN, service_tier: Optional[Literal["auto", "default", "flex", "scale", "priority"]] | NotGiven = NOT_GIVEN, stop: Union[Optional[str], List[str], None] | NotGiven = NOT_GIVEN, @@ -1445,7 +1451,9 @@ def stream( parallel_tool_calls=parallel_tool_calls, prediction=prediction, presence_penalty=presence_penalty, + prompt_cache_key=prompt_cache_key, reasoning_effort=reasoning_effort, + safety_identifier=safety_identifier, seed=seed, service_tier=service_tier, store=store, @@ -1514,7 +1522,9 @@ async def parse( parallel_tool_calls: bool | NotGiven = NOT_GIVEN, prediction: Optional[ChatCompletionPredictionContentParam] | NotGiven = NOT_GIVEN, presence_penalty: Optional[float] | NotGiven = NOT_GIVEN, + prompt_cache_key: str | NotGiven = NOT_GIVEN, reasoning_effort: Optional[ReasoningEffort] | NotGiven = NOT_GIVEN, + safety_identifier: str | NotGiven = NOT_GIVEN, seed: Optional[int] | NotGiven = NOT_GIVEN, service_tier: Optional[Literal["auto", "default", "flex", "scale", "priority"]] | NotGiven = NOT_GIVEN, stop: Union[Optional[str], List[str], None] | NotGiven = NOT_GIVEN, @@ -1610,8 +1620,10 @@ def parser(raw_completion: ChatCompletion) -> ParsedChatCompletion[ResponseForma "parallel_tool_calls": parallel_tool_calls, "prediction": prediction, "presence_penalty": presence_penalty, + "prompt_cache_key": prompt_cache_key, "reasoning_effort": reasoning_effort, "response_format": _type_to_response_format(response_format), + "safety_identifier": safety_identifier, "seed": seed, "service_tier": service_tier, "store": store, @@ -2791,7 +2803,9 @@ def stream( parallel_tool_calls: bool | NotGiven = NOT_GIVEN, prediction: Optional[ChatCompletionPredictionContentParam] | NotGiven = NOT_GIVEN, presence_penalty: Optional[float] | NotGiven = NOT_GIVEN, + prompt_cache_key: str | NotGiven = NOT_GIVEN, reasoning_effort: Optional[ReasoningEffort] | NotGiven = NOT_GIVEN, + safety_identifier: str | NotGiven = NOT_GIVEN, seed: Optional[int] | NotGiven = NOT_GIVEN, service_tier: Optional[Literal["auto", "default", "flex", "scale", "priority"]] | NotGiven = NOT_GIVEN, stop: Union[Optional[str], List[str], None] | NotGiven = NOT_GIVEN, @@ -2859,7 +2873,9 @@ def stream( parallel_tool_calls=parallel_tool_calls, prediction=prediction, presence_penalty=presence_penalty, + prompt_cache_key=prompt_cache_key, reasoning_effort=reasoning_effort, + safety_identifier=safety_identifier, seed=seed, service_tier=service_tier, stop=stop, diff --git a/src/openai/resources/responses/responses.py b/src/openai/resources/responses/responses.py index 8de46dbab8..6d2b133110 100644 --- a/src/openai/resources/responses/responses.py +++ b/src/openai/resources/responses/responses.py @@ -1001,7 +1001,9 @@ def parse( parallel_tool_calls: Optional[bool] | NotGiven = NOT_GIVEN, previous_response_id: Optional[str] | NotGiven = NOT_GIVEN, prompt: Optional[ResponsePromptParam] | NotGiven = NOT_GIVEN, + prompt_cache_key: str | NotGiven = NOT_GIVEN, reasoning: Optional[Reasoning] | NotGiven = NOT_GIVEN, + safety_identifier: str | NotGiven = NOT_GIVEN, service_tier: Optional[Literal["auto", "default", "flex", "scale", "priority"]] | NotGiven = NOT_GIVEN, store: Optional[bool] | NotGiven = NOT_GIVEN, stream: Optional[Literal[False]] | Literal[True] | NotGiven = NOT_GIVEN, @@ -1053,7 +1055,9 @@ def parser(raw_response: Response) -> ParsedResponse[TextFormatT]: "parallel_tool_calls": parallel_tool_calls, "previous_response_id": previous_response_id, "prompt": prompt, + "prompt_cache_key": prompt_cache_key, "reasoning": reasoning, + "safety_identifier": safety_identifier, "service_tier": service_tier, "store": store, "stream": stream, @@ -2316,7 +2320,9 @@ async def parse( parallel_tool_calls: Optional[bool] | NotGiven = NOT_GIVEN, previous_response_id: Optional[str] | NotGiven = NOT_GIVEN, prompt: Optional[ResponsePromptParam] | NotGiven = NOT_GIVEN, + prompt_cache_key: str | NotGiven = NOT_GIVEN, reasoning: Optional[Reasoning] | NotGiven = NOT_GIVEN, + safety_identifier: str | NotGiven = NOT_GIVEN, service_tier: Optional[Literal["auto", "default", "flex", "scale", "priority"]] | NotGiven = NOT_GIVEN, store: Optional[bool] | NotGiven = NOT_GIVEN, stream: Optional[Literal[False]] | Literal[True] | NotGiven = NOT_GIVEN, @@ -2368,7 +2374,9 @@ def parser(raw_response: Response) -> ParsedResponse[TextFormatT]: "parallel_tool_calls": parallel_tool_calls, "previous_response_id": previous_response_id, "prompt": prompt, + "prompt_cache_key": prompt_cache_key, "reasoning": reasoning, + "safety_identifier": safety_identifier, "service_tier": service_tier, "store": store, "stream": stream, diff --git a/src/openai/types/responses/__init__.py b/src/openai/types/responses/__init__.py index b563035e78..2e502ed69f 100644 --- a/src/openai/types/responses/__init__.py +++ b/src/openai/types/responses/__init__.py @@ -94,24 +94,20 @@ from .response_function_tool_call_param import ResponseFunctionToolCallParam as ResponseFunctionToolCallParam from .response_mcp_call_completed_event import ResponseMcpCallCompletedEvent as ResponseMcpCallCompletedEvent from .response_function_web_search_param import ResponseFunctionWebSearchParam as ResponseFunctionWebSearchParam +from .response_reasoning_text_done_event import ResponseReasoningTextDoneEvent as ResponseReasoningTextDoneEvent from .response_code_interpreter_tool_call import ResponseCodeInterpreterToolCall as ResponseCodeInterpreterToolCall from .response_input_message_content_list import ResponseInputMessageContentList as ResponseInputMessageContentList from .response_mcp_call_in_progress_event import ResponseMcpCallInProgressEvent as ResponseMcpCallInProgressEvent +from .response_reasoning_text_delta_event import ResponseReasoningTextDeltaEvent as ResponseReasoningTextDeltaEvent from .response_audio_transcript_done_event import ResponseAudioTranscriptDoneEvent as ResponseAudioTranscriptDoneEvent from .response_file_search_tool_call_param import ResponseFileSearchToolCallParam as ResponseFileSearchToolCallParam from .response_mcp_list_tools_failed_event import ResponseMcpListToolsFailedEvent as ResponseMcpListToolsFailedEvent from .response_audio_transcript_delta_event import ( ResponseAudioTranscriptDeltaEvent as ResponseAudioTranscriptDeltaEvent, ) -from .response_reasoning_summary_done_event import ( - ResponseReasoningSummaryDoneEvent as ResponseReasoningSummaryDoneEvent, -) from .response_mcp_call_arguments_done_event import ( ResponseMcpCallArgumentsDoneEvent as ResponseMcpCallArgumentsDoneEvent, ) -from .response_reasoning_summary_delta_event import ( - ResponseReasoningSummaryDeltaEvent as ResponseReasoningSummaryDeltaEvent, -) from .response_computer_tool_call_output_item import ( ResponseComputerToolCallOutputItem as ResponseComputerToolCallOutputItem, ) diff --git a/src/openai/types/responses/response_reasoning_item.py b/src/openai/types/responses/response_reasoning_item.py index f5da7802f8..e5cb094e62 100644 --- a/src/openai/types/responses/response_reasoning_item.py +++ b/src/openai/types/responses/response_reasoning_item.py @@ -5,29 +5,38 @@ from ..._models import BaseModel -__all__ = ["ResponseReasoningItem", "Summary"] +__all__ = ["ResponseReasoningItem", "Summary", "Content"] class Summary(BaseModel): text: str - """ - A short summary of the reasoning used by the model when generating the response. - """ + """A summary of the reasoning output from the model so far.""" type: Literal["summary_text"] """The type of the object. Always `summary_text`.""" +class Content(BaseModel): + text: str + """Reasoning text output from the model.""" + + type: Literal["reasoning_text"] + """The type of the object. Always `reasoning_text`.""" + + class ResponseReasoningItem(BaseModel): id: str """The unique identifier of the reasoning content.""" summary: List[Summary] - """Reasoning text contents.""" + """Reasoning summary content.""" type: Literal["reasoning"] """The type of the object. Always `reasoning`.""" + content: Optional[List[Content]] = None + """Reasoning text content.""" + encrypted_content: Optional[str] = None """ The encrypted content of the reasoning item - populated when a response is diff --git a/src/openai/types/responses/response_reasoning_item_param.py b/src/openai/types/responses/response_reasoning_item_param.py index 2cfa5312ed..042b6c05db 100644 --- a/src/openai/types/responses/response_reasoning_item_param.py +++ b/src/openai/types/responses/response_reasoning_item_param.py @@ -5,29 +5,38 @@ from typing import Iterable, Optional from typing_extensions import Literal, Required, TypedDict -__all__ = ["ResponseReasoningItemParam", "Summary"] +__all__ = ["ResponseReasoningItemParam", "Summary", "Content"] class Summary(TypedDict, total=False): text: Required[str] - """ - A short summary of the reasoning used by the model when generating the response. - """ + """A summary of the reasoning output from the model so far.""" type: Required[Literal["summary_text"]] """The type of the object. Always `summary_text`.""" +class Content(TypedDict, total=False): + text: Required[str] + """Reasoning text output from the model.""" + + type: Required[Literal["reasoning_text"]] + """The type of the object. Always `reasoning_text`.""" + + class ResponseReasoningItemParam(TypedDict, total=False): id: Required[str] """The unique identifier of the reasoning content.""" summary: Required[Iterable[Summary]] - """Reasoning text contents.""" + """Reasoning summary content.""" type: Required[Literal["reasoning"]] """The type of the object. Always `reasoning`.""" + content: Iterable[Content] + """Reasoning text content.""" + encrypted_content: Optional[str] """ The encrypted content of the reasoning item - populated when a response is diff --git a/src/openai/types/responses/response_reasoning_summary_delta_event.py b/src/openai/types/responses/response_reasoning_summary_delta_event.py deleted file mode 100644 index 519a4f24ac..0000000000 --- a/src/openai/types/responses/response_reasoning_summary_delta_event.py +++ /dev/null @@ -1,30 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing_extensions import Literal - -from ..._models import BaseModel - -__all__ = ["ResponseReasoningSummaryDeltaEvent"] - - -class ResponseReasoningSummaryDeltaEvent(BaseModel): - delta: object - """The partial update to the reasoning summary content.""" - - item_id: str - """ - The unique identifier of the item for which the reasoning summary is being - updated. - """ - - output_index: int - """The index of the output item in the response's output array.""" - - sequence_number: int - """The sequence number of this event.""" - - summary_index: int - """The index of the summary part within the output item.""" - - type: Literal["response.reasoning_summary.delta"] - """The type of the event. Always 'response.reasoning_summary.delta'.""" diff --git a/src/openai/types/responses/response_reasoning_summary_done_event.py b/src/openai/types/responses/response_reasoning_summary_done_event.py deleted file mode 100644 index 98bcf9cb9d..0000000000 --- a/src/openai/types/responses/response_reasoning_summary_done_event.py +++ /dev/null @@ -1,27 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing_extensions import Literal - -from ..._models import BaseModel - -__all__ = ["ResponseReasoningSummaryDoneEvent"] - - -class ResponseReasoningSummaryDoneEvent(BaseModel): - item_id: str - """The unique identifier of the item for which the reasoning summary is finalized.""" - - output_index: int - """The index of the output item in the response's output array.""" - - sequence_number: int - """The sequence number of this event.""" - - summary_index: int - """The index of the summary part within the output item.""" - - text: str - """The finalized reasoning summary text.""" - - type: Literal["response.reasoning_summary.done"] - """The type of the event. Always 'response.reasoning_summary.done'.""" diff --git a/src/openai/types/responses/response_reasoning_text_delta_event.py b/src/openai/types/responses/response_reasoning_text_delta_event.py new file mode 100644 index 0000000000..e1df893bac --- /dev/null +++ b/src/openai/types/responses/response_reasoning_text_delta_event.py @@ -0,0 +1,27 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing_extensions import Literal + +from ..._models import BaseModel + +__all__ = ["ResponseReasoningTextDeltaEvent"] + + +class ResponseReasoningTextDeltaEvent(BaseModel): + content_index: int + """The index of the reasoning content part this delta is associated with.""" + + delta: str + """The text delta that was added to the reasoning content.""" + + item_id: str + """The ID of the item this reasoning text delta is associated with.""" + + output_index: int + """The index of the output item this reasoning text delta is associated with.""" + + sequence_number: int + """The sequence number of this event.""" + + type: Literal["response.reasoning_text.delta"] + """The type of the event. Always `response.reasoning_text.delta`.""" diff --git a/src/openai/types/responses/response_reasoning_text_done_event.py b/src/openai/types/responses/response_reasoning_text_done_event.py new file mode 100644 index 0000000000..d22d984e47 --- /dev/null +++ b/src/openai/types/responses/response_reasoning_text_done_event.py @@ -0,0 +1,27 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing_extensions import Literal + +from ..._models import BaseModel + +__all__ = ["ResponseReasoningTextDoneEvent"] + + +class ResponseReasoningTextDoneEvent(BaseModel): + content_index: int + """The index of the reasoning content part.""" + + item_id: str + """The ID of the item this reasoning text is associated with.""" + + output_index: int + """The index of the output item this reasoning text is associated with.""" + + sequence_number: int + """The sequence number of this event.""" + + text: str + """The full text of the completed reasoning content.""" + + type: Literal["response.reasoning_text.done"] + """The type of the event. Always `response.reasoning_text.done`.""" diff --git a/src/openai/types/responses/response_stream_event.py b/src/openai/types/responses/response_stream_event.py index 98e1d6c34d..d62cf8969b 100644 --- a/src/openai/types/responses/response_stream_event.py +++ b/src/openai/types/responses/response_stream_event.py @@ -23,13 +23,13 @@ from .response_output_item_added_event import ResponseOutputItemAddedEvent from .response_content_part_added_event import ResponseContentPartAddedEvent from .response_mcp_call_completed_event import ResponseMcpCallCompletedEvent +from .response_reasoning_text_done_event import ResponseReasoningTextDoneEvent from .response_mcp_call_in_progress_event import ResponseMcpCallInProgressEvent +from .response_reasoning_text_delta_event import ResponseReasoningTextDeltaEvent from .response_audio_transcript_done_event import ResponseAudioTranscriptDoneEvent from .response_mcp_list_tools_failed_event import ResponseMcpListToolsFailedEvent from .response_audio_transcript_delta_event import ResponseAudioTranscriptDeltaEvent -from .response_reasoning_summary_done_event import ResponseReasoningSummaryDoneEvent from .response_mcp_call_arguments_done_event import ResponseMcpCallArgumentsDoneEvent -from .response_reasoning_summary_delta_event import ResponseReasoningSummaryDeltaEvent from .response_image_gen_call_completed_event import ResponseImageGenCallCompletedEvent from .response_mcp_call_arguments_delta_event import ResponseMcpCallArgumentsDeltaEvent from .response_mcp_list_tools_completed_event import ResponseMcpListToolsCompletedEvent @@ -88,6 +88,8 @@ ResponseReasoningSummaryPartDoneEvent, ResponseReasoningSummaryTextDeltaEvent, ResponseReasoningSummaryTextDoneEvent, + ResponseReasoningTextDeltaEvent, + ResponseReasoningTextDoneEvent, ResponseRefusalDeltaEvent, ResponseRefusalDoneEvent, ResponseTextDeltaEvent, @@ -109,8 +111,6 @@ ResponseMcpListToolsInProgressEvent, ResponseOutputTextAnnotationAddedEvent, ResponseQueuedEvent, - ResponseReasoningSummaryDeltaEvent, - ResponseReasoningSummaryDoneEvent, ], PropertyInfo(discriminator="type"), ] diff --git a/src/openai/types/vector_store_search_params.py b/src/openai/types/vector_store_search_params.py index 17573d0f61..973c49ff5a 100644 --- a/src/openai/types/vector_store_search_params.py +++ b/src/openai/types/vector_store_search_params.py @@ -35,6 +35,7 @@ class VectorStoreSearchParams(TypedDict, total=False): class RankingOptions(TypedDict, total=False): - ranker: Literal["auto", "default-2024-11-15"] + ranker: Literal["none", "auto", "default-2024-11-15"] + """Enable re-ranking; set to `none` to disable, which can help reduce latency.""" score_threshold: float diff --git a/tests/api_resources/test_vector_stores.py b/tests/api_resources/test_vector_stores.py index 5af95fec41..dffd2b1d07 100644 --- a/tests/api_resources/test_vector_stores.py +++ b/tests/api_resources/test_vector_stores.py @@ -243,7 +243,7 @@ def test_method_search_with_all_params(self, client: OpenAI) -> None: }, max_num_results=1, ranking_options={ - "ranker": "auto", + "ranker": "none", "score_threshold": 0, }, rewrite_query=True, @@ -511,7 +511,7 @@ async def test_method_search_with_all_params(self, async_client: AsyncOpenAI) -> }, max_num_results=1, ranking_options={ - "ranker": "auto", + "ranker": "none", "score_threshold": 0, }, rewrite_query=True,