diff --git a/.release-please-manifest.json b/.release-please-manifest.json index 2aca35ae..d04f223f 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "0.5.0" + ".": "0.5.1" } \ No newline at end of file diff --git a/.stats.yml b/.stats.yml index 2e733899..fb17fac7 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,4 +1,4 @@ configured_endpoints: 109 -openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-d4bcffecf0cdadf746faa6708ed1ec81fac451f9b857deabbab26f0a343b9314.yml -openapi_spec_hash: 7c54a18b4381248bda7cc34c52142615 -config_hash: d23f847b9ebb3f427d0f198035bd3e9f +openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-2bcc845d8635bf93ddcf9ee723af4d7928248412a417bee5fc10d863a1e13867.yml +openapi_spec_hash: 865230cb3abeb01bd85de05891af23c4 +config_hash: ed1e6b3c5f93d12b80d31167f55c557c diff --git a/CHANGELOG.md b/CHANGELOG.md index 5f18dfc2..7abc774c 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,13 @@ # Changelog +## 0.5.1 (2025-06-02) + +Full Changelog: [v0.5.0...v0.5.1](https://github.com/openai/openai-ruby/compare/v0.5.0...v0.5.1) + +### Bug Fixes + +* **api:** Fix evals and code interpreter interfaces ([24a9100](https://github.com/openai/openai-ruby/commit/24a910015e6885fc19a2ad689fe70a148bed5787)) + ## 0.5.0 (2025-05-29) Full Changelog: [v0.4.1...v0.5.0](https://github.com/openai/openai-ruby/compare/v0.4.1...v0.5.0) diff --git a/Gemfile.lock b/Gemfile.lock index 503224bb..42ec5d77 100644 --- a/Gemfile.lock +++ b/Gemfile.lock @@ -11,7 +11,7 @@ GIT PATH remote: . specs: - openai (0.5.0) + openai (0.5.1) connection_pool GEM diff --git a/README.md b/README.md index ebc0d58c..fe66b8d8 100644 --- a/README.md +++ b/README.md @@ -15,7 +15,7 @@ To use this gem, install via Bundler by adding the following to your application ```ruby -gem "openai", "~> 0.5.0" +gem "openai", "~> 0.5.1" ``` diff --git a/lib/openai/models/audio/transcription_text_delta_event.rb b/lib/openai/models/audio/transcription_text_delta_event.rb index 0541f312..4c54ea63 100644 --- a/lib/openai/models/audio/transcription_text_delta_event.rb +++ b/lib/openai/models/audio/transcription_text_delta_event.rb @@ -50,8 +50,8 @@ class Logprob < OpenAI::Internal::Type::BaseModel # @!attribute bytes # The bytes that were used to generate the log probability. # - # @return [Array, nil] - optional :bytes, OpenAI::Internal::Type::ArrayOf[OpenAI::Internal::Type::Unknown] + # @return [Array, nil] + optional :bytes, OpenAI::Internal::Type::ArrayOf[Integer] # @!attribute logprob # The log probability of the token. @@ -65,7 +65,7 @@ class Logprob < OpenAI::Internal::Type::BaseModel # # @param token [String] The token that was used to generate the log probability. # - # @param bytes [Array] The bytes that were used to generate the log probability. + # @param bytes [Array] The bytes that were used to generate the log probability. # # @param logprob [Float] The log probability of the token. end diff --git a/lib/openai/models/audio/transcription_text_done_event.rb b/lib/openai/models/audio/transcription_text_done_event.rb index 2651d973..eac7a34d 100644 --- a/lib/openai/models/audio/transcription_text_done_event.rb +++ b/lib/openai/models/audio/transcription_text_done_event.rb @@ -51,8 +51,8 @@ class Logprob < OpenAI::Internal::Type::BaseModel # @!attribute bytes # The bytes that were used to generate the log probability. # - # @return [Array, nil] - optional :bytes, OpenAI::Internal::Type::ArrayOf[OpenAI::Internal::Type::Unknown] + # @return [Array, nil] + optional :bytes, OpenAI::Internal::Type::ArrayOf[Integer] # @!attribute logprob # The log probability of the token. @@ -66,7 +66,7 @@ class Logprob < OpenAI::Internal::Type::BaseModel # # @param token [String] The token that was used to generate the log probability. # - # @param bytes [Array] The bytes that were used to generate the log probability. + # @param bytes [Array] The bytes that were used to generate the log probability. # # @param logprob [Float] The log probability of the token. end diff --git a/lib/openai/models/chat/chat_completion.rb b/lib/openai/models/chat/chat_completion.rb index 6f5b922c..b1a17a6f 100644 --- a/lib/openai/models/chat/chat_completion.rb +++ b/lib/openai/models/chat/chat_completion.rb @@ -46,9 +46,9 @@ class ChatCompletion < OpenAI::Internal::Type::BaseModel # utilize scale tier credits until they are exhausted. # - If set to 'auto', and the Project is not Scale tier enabled, the request will # be processed using the default service tier with a lower uptime SLA and no - # latency guarentee. + # latency guarantee. # - If set to 'default', the request will be processed using the default service - # tier with a lower uptime SLA and no latency guarentee. + # tier with a lower uptime SLA and no latency guarantee. # - If set to 'flex', the request will be processed with the Flex Processing # service tier. # [Learn more](https://platform.openai.com/docs/guides/flex-processing). @@ -195,9 +195,9 @@ class Logprobs < OpenAI::Internal::Type::BaseModel # utilize scale tier credits until they are exhausted. # - If set to 'auto', and the Project is not Scale tier enabled, the request will # be processed using the default service tier with a lower uptime SLA and no - # latency guarentee. + # latency guarantee. # - If set to 'default', the request will be processed using the default service - # tier with a lower uptime SLA and no latency guarentee. + # tier with a lower uptime SLA and no latency guarantee. # - If set to 'flex', the request will be processed with the Flex Processing # service tier. # [Learn more](https://platform.openai.com/docs/guides/flex-processing). diff --git a/lib/openai/models/chat/chat_completion_chunk.rb b/lib/openai/models/chat/chat_completion_chunk.rb index 9dfe771e..63c1109e 100644 --- a/lib/openai/models/chat/chat_completion_chunk.rb +++ b/lib/openai/models/chat/chat_completion_chunk.rb @@ -45,9 +45,9 @@ class ChatCompletionChunk < OpenAI::Internal::Type::BaseModel # utilize scale tier credits until they are exhausted. # - If set to 'auto', and the Project is not Scale tier enabled, the request will # be processed using the default service tier with a lower uptime SLA and no - # latency guarentee. + # latency guarantee. # - If set to 'default', the request will be processed using the default service - # tier with a lower uptime SLA and no latency guarentee. + # tier with a lower uptime SLA and no latency guarantee. # - If set to 'flex', the request will be processed with the Flex Processing # service tier. # [Learn more](https://platform.openai.com/docs/guides/flex-processing). @@ -378,9 +378,9 @@ class Logprobs < OpenAI::Internal::Type::BaseModel # utilize scale tier credits until they are exhausted. # - If set to 'auto', and the Project is not Scale tier enabled, the request will # be processed using the default service tier with a lower uptime SLA and no - # latency guarentee. + # latency guarantee. # - If set to 'default', the request will be processed using the default service - # tier with a lower uptime SLA and no latency guarentee. + # tier with a lower uptime SLA and no latency guarantee. # - If set to 'flex', the request will be processed with the Flex Processing # service tier. # [Learn more](https://platform.openai.com/docs/guides/flex-processing). diff --git a/lib/openai/models/chat/completion_create_params.rb b/lib/openai/models/chat/completion_create_params.rb index 77791681..97c5a09a 100644 --- a/lib/openai/models/chat/completion_create_params.rb +++ b/lib/openai/models/chat/completion_create_params.rb @@ -226,9 +226,9 @@ class CompletionCreateParams < OpenAI::Internal::Type::BaseModel # utilize scale tier credits until they are exhausted. # - If set to 'auto', and the Project is not Scale tier enabled, the request will # be processed using the default service tier with a lower uptime SLA and no - # latency guarentee. + # latency guarantee. # - If set to 'default', the request will be processed using the default service - # tier with a lower uptime SLA and no latency guarentee. + # tier with a lower uptime SLA and no latency guarantee. # - If set to 'flex', the request will be processed with the Flex Processing # service tier. # [Learn more](https://platform.openai.com/docs/guides/flex-processing). @@ -553,9 +553,9 @@ module ResponseFormat # utilize scale tier credits until they are exhausted. # - If set to 'auto', and the Project is not Scale tier enabled, the request will # be processed using the default service tier with a lower uptime SLA and no - # latency guarentee. + # latency guarantee. # - If set to 'default', the request will be processed using the default service - # tier with a lower uptime SLA and no latency guarentee. + # tier with a lower uptime SLA and no latency guarantee. # - If set to 'flex', the request will be processed with the Flex Processing # service tier. # [Learn more](https://platform.openai.com/docs/guides/flex-processing). diff --git a/lib/openai/models/fine_tuning/alpha/grader_run_params.rb b/lib/openai/models/fine_tuning/alpha/grader_run_params.rb index 0dfe4ffc..152ac2a1 100644 --- a/lib/openai/models/fine_tuning/alpha/grader_run_params.rb +++ b/lib/openai/models/fine_tuning/alpha/grader_run_params.rb @@ -16,26 +16,32 @@ class GraderRunParams < OpenAI::Internal::Type::BaseModel required :grader, union: -> { OpenAI::FineTuning::Alpha::GraderRunParams::Grader } # @!attribute model_sample - # The model sample to be evaluated. + # The model sample to be evaluated. This value will be used to populate the + # `sample` namespace. See + # [the guide](https://platform.openai.com/docs/guides/graders) for more details. + # The `output_json` variable will be populated if the model sample is a valid JSON + # string. # # @return [String] required :model_sample, String - # @!attribute reference_answer - # The reference answer for the evaluation. + # @!attribute item + # The dataset item provided to the grader. This will be used to populate the + # `item` namespace. See + # [the guide](https://platform.openai.com/docs/guides/graders) for more details. # - # @return [String, Object, Array, Float] - required :reference_answer, - union: -> { - OpenAI::FineTuning::Alpha::GraderRunParams::ReferenceAnswer - } + # @return [Object, nil] + optional :item, OpenAI::Internal::Type::Unknown - # @!method initialize(grader:, model_sample:, reference_answer:, request_options: {}) + # @!method initialize(grader:, model_sample:, item: nil, request_options: {}) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::FineTuning::Alpha::GraderRunParams} for more details. + # # @param grader [OpenAI::Models::Graders::StringCheckGrader, OpenAI::Models::Graders::TextSimilarityGrader, OpenAI::Models::Graders::PythonGrader, OpenAI::Models::Graders::ScoreModelGrader, OpenAI::Models::Graders::MultiGrader] The grader used for the fine-tuning job. # - # @param model_sample [String] The model sample to be evaluated. + # @param model_sample [String] The model sample to be evaluated. This value will be used to populate # - # @param reference_answer [String, Object, Array, Float] The reference answer for the evaluation. + # @param item [Object] The dataset item provided to the grader. This will be used to populate # # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}] @@ -63,25 +69,6 @@ module Grader # @!method self.variants # @return [Array(OpenAI::Models::Graders::StringCheckGrader, OpenAI::Models::Graders::TextSimilarityGrader, OpenAI::Models::Graders::PythonGrader, OpenAI::Models::Graders::ScoreModelGrader, OpenAI::Models::Graders::MultiGrader)] end - - # The reference answer for the evaluation. - module ReferenceAnswer - extend OpenAI::Internal::Type::Union - - variant String - - variant OpenAI::Internal::Type::Unknown - - variant -> { OpenAI::Models::FineTuning::Alpha::GraderRunParams::ReferenceAnswer::UnionMember2Array } - - variant Float - - # @!method self.variants - # @return [Array(String, Object, Array, Float)] - - # @type [OpenAI::Internal::Type::Converter] - UnionMember2Array = OpenAI::Internal::Type::ArrayOf[OpenAI::Internal::Type::Unknown] - end end end end diff --git a/lib/openai/models/fine_tuning/fine_tuning_job.rb b/lib/openai/models/fine_tuning/fine_tuning_job.rb index 64df4360..dad79035 100644 --- a/lib/openai/models/fine_tuning/fine_tuning_job.rb +++ b/lib/openai/models/fine_tuning/fine_tuning_job.rb @@ -226,7 +226,7 @@ class Hyperparameters < OpenAI::Internal::Type::BaseModel # Number of examples in each batch. A larger batch size means that model # parameters are updated less frequently, but with lower variance. # - # @return [Object, Symbol, :auto, Integer, nil] + # @return [Symbol, :auto, Integer, nil] optional :batch_size, union: -> { OpenAI::FineTuning::FineTuningJob::Hyperparameters::BatchSize }, nil?: true @@ -253,7 +253,7 @@ class Hyperparameters < OpenAI::Internal::Type::BaseModel # The hyperparameters used for the fine-tuning job. This value will only be # returned when running `supervised` jobs. # - # @param batch_size [Object, Symbol, :auto, Integer, nil] Number of examples in each batch. A larger batch size means that model parameter + # @param batch_size [Symbol, :auto, Integer, nil] Number of examples in each batch. A larger batch size means that model parameter # # @param learning_rate_multiplier [Symbol, :auto, Float] Scaling factor for the learning rate. A smaller learning rate may be useful to a # @@ -266,14 +266,12 @@ class Hyperparameters < OpenAI::Internal::Type::BaseModel module BatchSize extend OpenAI::Internal::Type::Union - variant OpenAI::Internal::Type::Unknown - variant const: :auto variant Integer # @!method self.variants - # @return [Array(Object, Symbol, :auto, Integer)] + # @return [Array(Symbol, :auto, Integer)] end # Scaling factor for the learning rate. A smaller learning rate may be useful to diff --git a/lib/openai/models/graders/multi_grader.rb b/lib/openai/models/graders/multi_grader.rb index de0d1240..0f5bd82e 100644 --- a/lib/openai/models/graders/multi_grader.rb +++ b/lib/openai/models/graders/multi_grader.rb @@ -11,9 +11,11 @@ class MultiGrader < OpenAI::Internal::Type::BaseModel required :calculate_output, String # @!attribute graders + # A StringCheckGrader object that performs a string comparison between input and + # reference using a specified operation. # - # @return [Hash{Symbol=>OpenAI::Models::Graders::StringCheckGrader, OpenAI::Models::Graders::TextSimilarityGrader, OpenAI::Models::Graders::PythonGrader, OpenAI::Models::Graders::ScoreModelGrader, OpenAI::Models::Graders::LabelModelGrader}] - required :graders, -> { OpenAI::Internal::Type::HashOf[union: OpenAI::Graders::MultiGrader::Grader] } + # @return [OpenAI::Models::Graders::StringCheckGrader, OpenAI::Models::Graders::TextSimilarityGrader, OpenAI::Models::Graders::PythonGrader, OpenAI::Models::Graders::ScoreModelGrader, OpenAI::Models::Graders::LabelModelGrader] + required :graders, union: -> { OpenAI::Graders::MultiGrader::Graders } # @!attribute name # The name of the grader. @@ -28,12 +30,15 @@ class MultiGrader < OpenAI::Internal::Type::BaseModel required :type, const: :multi # @!method initialize(calculate_output:, graders:, name:, type: :multi) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Graders::MultiGrader} for more details. + # # A MultiGrader object combines the output of multiple graders to produce a single # score. # # @param calculate_output [String] A formula to calculate the output based on grader results. # - # @param graders [Hash{Symbol=>OpenAI::Models::Graders::StringCheckGrader, OpenAI::Models::Graders::TextSimilarityGrader, OpenAI::Models::Graders::PythonGrader, OpenAI::Models::Graders::ScoreModelGrader, OpenAI::Models::Graders::LabelModelGrader}] + # @param graders [OpenAI::Models::Graders::StringCheckGrader, OpenAI::Models::Graders::TextSimilarityGrader, OpenAI::Models::Graders::PythonGrader, OpenAI::Models::Graders::ScoreModelGrader, OpenAI::Models::Graders::LabelModelGrader] A StringCheckGrader object that performs a string comparison between input and r # # @param name [String] The name of the grader. # @@ -41,7 +46,9 @@ class MultiGrader < OpenAI::Internal::Type::BaseModel # A StringCheckGrader object that performs a string comparison between input and # reference using a specified operation. - module Grader + # + # @see OpenAI::Models::Graders::MultiGrader#graders + module Graders extend OpenAI::Internal::Type::Union # A StringCheckGrader object that performs a string comparison between input and reference using a specified operation. diff --git a/lib/openai/models/image_edit_params.rb b/lib/openai/models/image_edit_params.rb index ea3225f3..6162af1a 100644 --- a/lib/openai/models/image_edit_params.rb +++ b/lib/openai/models/image_edit_params.rb @@ -11,7 +11,7 @@ class ImageEditParams < OpenAI::Internal::Type::BaseModel # The image(s) to edit. Must be a supported image file or an array of images. # # For `gpt-image-1`, each image should be a `png`, `webp`, or `jpg` file less than - # 25MB. You can provide up to 16 images. + # 50MB. You can provide up to 16 images. # # For `dall-e-2`, you can only provide one image, and it should be a square `png` # file less than 4MB. @@ -123,7 +123,7 @@ class ImageEditParams < OpenAI::Internal::Type::BaseModel # The image(s) to edit. Must be a supported image file or an array of images. # # For `gpt-image-1`, each image should be a `png`, `webp`, or `jpg` file less than - # 25MB. You can provide up to 16 images. + # 50MB. You can provide up to 16 images. # # For `dall-e-2`, you can only provide one image, and it should be a square `png` # file less than 4MB. diff --git a/lib/openai/models/responses/response.rb b/lib/openai/models/responses/response.rb index 20471340..38b6465d 100644 --- a/lib/openai/models/responses/response.rb +++ b/lib/openai/models/responses/response.rb @@ -173,9 +173,9 @@ class Response < OpenAI::Internal::Type::BaseModel # utilize scale tier credits until they are exhausted. # - If set to 'auto', and the Project is not Scale tier enabled, the request will # be processed using the default service tier with a lower uptime SLA and no - # latency guarentee. + # latency guarantee. # - If set to 'default', the request will be processed using the default service - # tier with a lower uptime SLA and no latency guarentee. + # tier with a lower uptime SLA and no latency guarantee. # - If set to 'flex', the request will be processed with the Flex Processing # service tier. # [Learn more](https://platform.openai.com/docs/guides/flex-processing). @@ -346,9 +346,9 @@ module ToolChoice # utilize scale tier credits until they are exhausted. # - If set to 'auto', and the Project is not Scale tier enabled, the request will # be processed using the default service tier with a lower uptime SLA and no - # latency guarentee. + # latency guarantee. # - If set to 'default', the request will be processed using the default service - # tier with a lower uptime SLA and no latency guarentee. + # tier with a lower uptime SLA and no latency guarantee. # - If set to 'flex', the request will be processed with the Flex Processing # service tier. # [Learn more](https://platform.openai.com/docs/guides/flex-processing). diff --git a/lib/openai/models/responses/response_code_interpreter_call_code_delta_event.rb b/lib/openai/models/responses/response_code_interpreter_call_code_delta_event.rb index e8413a69..73bc4f43 100644 --- a/lib/openai/models/responses/response_code_interpreter_call_code_delta_event.rb +++ b/lib/openai/models/responses/response_code_interpreter_call_code_delta_event.rb @@ -23,12 +23,12 @@ class ResponseCodeInterpreterCallCodeDeltaEvent < OpenAI::Internal::Type::BaseMo required :sequence_number, Integer # @!attribute type - # The type of the event. Always `response.code_interpreter_call.code.delta`. + # The type of the event. Always `response.code_interpreter_call_code.delta`. # - # @return [Symbol, :"response.code_interpreter_call.code.delta"] - required :type, const: :"response.code_interpreter_call.code.delta" + # @return [Symbol, :"response.code_interpreter_call_code.delta"] + required :type, const: :"response.code_interpreter_call_code.delta" - # @!method initialize(delta:, output_index:, sequence_number:, type: :"response.code_interpreter_call.code.delta") + # @!method initialize(delta:, output_index:, sequence_number:, type: :"response.code_interpreter_call_code.delta") # Some parameter documentations has been truncated, see # {OpenAI::Models::Responses::ResponseCodeInterpreterCallCodeDeltaEvent} for more # details. @@ -41,7 +41,7 @@ class ResponseCodeInterpreterCallCodeDeltaEvent < OpenAI::Internal::Type::BaseMo # # @param sequence_number [Integer] The sequence number of this event. # - # @param type [Symbol, :"response.code_interpreter_call.code.delta"] The type of the event. Always `response.code_interpreter_call.code.delta`. + # @param type [Symbol, :"response.code_interpreter_call_code.delta"] The type of the event. Always `response.code_interpreter_call_code.delta`. end end end diff --git a/lib/openai/models/responses/response_code_interpreter_call_code_done_event.rb b/lib/openai/models/responses/response_code_interpreter_call_code_done_event.rb index beaab321..356bcee2 100644 --- a/lib/openai/models/responses/response_code_interpreter_call_code_done_event.rb +++ b/lib/openai/models/responses/response_code_interpreter_call_code_done_event.rb @@ -23,12 +23,12 @@ class ResponseCodeInterpreterCallCodeDoneEvent < OpenAI::Internal::Type::BaseMod required :sequence_number, Integer # @!attribute type - # The type of the event. Always `response.code_interpreter_call.code.done`. + # The type of the event. Always `response.code_interpreter_call_code.done`. # - # @return [Symbol, :"response.code_interpreter_call.code.done"] - required :type, const: :"response.code_interpreter_call.code.done" + # @return [Symbol, :"response.code_interpreter_call_code.done"] + required :type, const: :"response.code_interpreter_call_code.done" - # @!method initialize(code:, output_index:, sequence_number:, type: :"response.code_interpreter_call.code.done") + # @!method initialize(code:, output_index:, sequence_number:, type: :"response.code_interpreter_call_code.done") # Some parameter documentations has been truncated, see # {OpenAI::Models::Responses::ResponseCodeInterpreterCallCodeDoneEvent} for more # details. @@ -41,7 +41,7 @@ class ResponseCodeInterpreterCallCodeDoneEvent < OpenAI::Internal::Type::BaseMod # # @param sequence_number [Integer] The sequence number of this event. # - # @param type [Symbol, :"response.code_interpreter_call.code.done"] The type of the event. Always `response.code_interpreter_call.code.done`. + # @param type [Symbol, :"response.code_interpreter_call_code.done"] The type of the event. Always `response.code_interpreter_call_code.done`. end end end diff --git a/lib/openai/models/responses/response_create_params.rb b/lib/openai/models/responses/response_create_params.rb index 4f70650a..7f26b349 100644 --- a/lib/openai/models/responses/response_create_params.rb +++ b/lib/openai/models/responses/response_create_params.rb @@ -55,6 +55,8 @@ class ResponseCreateParams < OpenAI::Internal::Type::BaseModel # multi-turn conversations when using the Responses API statelessly (like when # the `store` parameter is set to `false`, or when an organization is enrolled # in the zero data retention program). + # - `code_interpreter_call.outputs`: Includes the outputs of python code execution + # in code interpreter tool call items. # # @return [Array, nil] optional :include, @@ -122,9 +124,9 @@ class ResponseCreateParams < OpenAI::Internal::Type::BaseModel # utilize scale tier credits until they are exhausted. # - If set to 'auto', and the Project is not Scale tier enabled, the request will # be processed using the default service tier with a lower uptime SLA and no - # latency guarentee. + # latency guarantee. # - If set to 'default', the request will be processed using the default service - # tier with a lower uptime SLA and no latency guarentee. + # tier with a lower uptime SLA and no latency guarantee. # - If set to 'flex', the request will be processed with the Flex Processing # service tier. # [Learn more](https://platform.openai.com/docs/guides/flex-processing). @@ -299,9 +301,9 @@ module Input # utilize scale tier credits until they are exhausted. # - If set to 'auto', and the Project is not Scale tier enabled, the request will # be processed using the default service tier with a lower uptime SLA and no - # latency guarentee. + # latency guarantee. # - If set to 'default', the request will be processed using the default service - # tier with a lower uptime SLA and no latency guarentee. + # tier with a lower uptime SLA and no latency guarantee. # - If set to 'flex', the request will be processed with the Flex Processing # service tier. # [Learn more](https://platform.openai.com/docs/guides/flex-processing). diff --git a/lib/openai/models/responses/response_includable.rb b/lib/openai/models/responses/response_includable.rb index f56e4278..7300f818 100644 --- a/lib/openai/models/responses/response_includable.rb +++ b/lib/openai/models/responses/response_includable.rb @@ -16,6 +16,8 @@ module Responses # multi-turn conversations when using the Responses API statelessly (like when # the `store` parameter is set to `false`, or when an organization is enrolled # in the zero data retention program). + # - `code_interpreter_call.outputs`: Includes the outputs of python code execution + # in code interpreter tool call items. module ResponseIncludable extend OpenAI::Internal::Type::Enum @@ -23,6 +25,7 @@ module ResponseIncludable MESSAGE_INPUT_IMAGE_IMAGE_URL = :"message.input_image.image_url" COMPUTER_CALL_OUTPUT_OUTPUT_IMAGE_URL = :"computer_call_output.output.image_url" REASONING_ENCRYPTED_CONTENT = :"reasoning.encrypted_content" + CODE_INTERPRETER_CALL_OUTPUTS = :"code_interpreter_call.outputs" # @!method self.values # @return [Array] diff --git a/lib/openai/models/responses/response_output_text.rb b/lib/openai/models/responses/response_output_text.rb index 79440d4d..1d945e56 100644 --- a/lib/openai/models/responses/response_output_text.rb +++ b/lib/openai/models/responses/response_output_text.rb @@ -7,7 +7,7 @@ class ResponseOutputText < OpenAI::Internal::Type::BaseModel # @!attribute annotations # The annotations of the text output. # - # @return [Array] + # @return [Array] required :annotations, -> { OpenAI::Internal::Type::ArrayOf[union: OpenAI::Responses::ResponseOutputText::Annotation] @@ -42,7 +42,7 @@ class ResponseOutputText < OpenAI::Internal::Type::BaseModel # @!method initialize(annotations:, text:, logprobs: nil, type: :output_text) # A text output from the model. # - # @param annotations [Array] The annotations of the text output. + # @param annotations [Array] The annotations of the text output. # # @param text [String] The text output from the model. # @@ -62,6 +62,10 @@ module Annotation # A citation for a web resource used to generate a model response. variant :url_citation, -> { OpenAI::Responses::ResponseOutputText::Annotation::URLCitation } + # A citation for a container file used to generate a model response. + variant :container_file_citation, + -> { OpenAI::Responses::ResponseOutputText::Annotation::ContainerFileCitation } + # A path to a file. variant :file_path, -> { OpenAI::Responses::ResponseOutputText::Annotation::FilePath } @@ -139,6 +143,51 @@ class URLCitation < OpenAI::Internal::Type::BaseModel # @param type [Symbol, :url_citation] The type of the URL citation. Always `url_citation`. end + class ContainerFileCitation < OpenAI::Internal::Type::BaseModel + # @!attribute container_id + # The ID of the container file. + # + # @return [String] + required :container_id, String + + # @!attribute end_index + # The index of the last character of the container file citation in the message. + # + # @return [Integer] + required :end_index, Integer + + # @!attribute file_id + # The ID of the file. + # + # @return [String] + required :file_id, String + + # @!attribute start_index + # The index of the first character of the container file citation in the message. + # + # @return [Integer] + required :start_index, Integer + + # @!attribute type + # The type of the container file citation. Always `container_file_citation`. + # + # @return [Symbol, :container_file_citation] + required :type, const: :container_file_citation + + # @!method initialize(container_id:, end_index:, file_id:, start_index:, type: :container_file_citation) + # A citation for a container file used to generate a model response. + # + # @param container_id [String] The ID of the container file. + # + # @param end_index [Integer] The index of the last character of the container file citation in the message. + # + # @param file_id [String] The ID of the file. + # + # @param start_index [Integer] The index of the first character of the container file citation in the message. + # + # @param type [Symbol, :container_file_citation] The type of the container file citation. Always `container_file_citation`. + end + class FilePath < OpenAI::Internal::Type::BaseModel # @!attribute file_id # The ID of the file. @@ -173,7 +222,7 @@ class FilePath < OpenAI::Internal::Type::BaseModel end # @!method self.variants - # @return [Array(OpenAI::Models::Responses::ResponseOutputText::Annotation::FileCitation, OpenAI::Models::Responses::ResponseOutputText::Annotation::URLCitation, OpenAI::Models::Responses::ResponseOutputText::Annotation::FilePath)] + # @return [Array(OpenAI::Models::Responses::ResponseOutputText::Annotation::FileCitation, OpenAI::Models::Responses::ResponseOutputText::Annotation::URLCitation, OpenAI::Models::Responses::ResponseOutputText::Annotation::ContainerFileCitation, OpenAI::Models::Responses::ResponseOutputText::Annotation::FilePath)] end class Logprob < OpenAI::Internal::Type::BaseModel diff --git a/lib/openai/models/responses/response_stream_event.rb b/lib/openai/models/responses/response_stream_event.rb index eb98e953..b5e90a6d 100644 --- a/lib/openai/models/responses/response_stream_event.rb +++ b/lib/openai/models/responses/response_stream_event.rb @@ -25,11 +25,11 @@ module ResponseStreamEvent variant :"response.audio.transcript.done", -> { OpenAI::Responses::ResponseAudioTranscriptDoneEvent } # Emitted when a partial code snippet is added by the code interpreter. - variant :"response.code_interpreter_call.code.delta", + variant :"response.code_interpreter_call_code.delta", -> { OpenAI::Responses::ResponseCodeInterpreterCallCodeDeltaEvent } # Emitted when code snippet output is finalized by the code interpreter. - variant :"response.code_interpreter_call.code.done", + variant :"response.code_interpreter_call_code.done", -> { OpenAI::Responses::ResponseCodeInterpreterCallCodeDoneEvent } # Emitted when the code interpreter call is completed. diff --git a/lib/openai/resources/fine_tuning/alpha/graders.rb b/lib/openai/resources/fine_tuning/alpha/graders.rb index 52c4cc77..ce7775c2 100644 --- a/lib/openai/resources/fine_tuning/alpha/graders.rb +++ b/lib/openai/resources/fine_tuning/alpha/graders.rb @@ -5,15 +5,18 @@ module Resources class FineTuning class Alpha class Graders + # Some parameter documentations has been truncated, see + # {OpenAI::Models::FineTuning::Alpha::GraderRunParams} for more details. + # # Run a grader. # - # @overload run(grader:, model_sample:, reference_answer:, request_options: {}) + # @overload run(grader:, model_sample:, item: nil, request_options: {}) # # @param grader [OpenAI::Models::Graders::StringCheckGrader, OpenAI::Models::Graders::TextSimilarityGrader, OpenAI::Models::Graders::PythonGrader, OpenAI::Models::Graders::ScoreModelGrader, OpenAI::Models::Graders::MultiGrader] The grader used for the fine-tuning job. # - # @param model_sample [String] The model sample to be evaluated. + # @param model_sample [String] The model sample to be evaluated. This value will be used to populate # - # @param reference_answer [String, Object, Array, Float] The reference answer for the evaluation. + # @param item [Object] The dataset item provided to the grader. This will be used to populate # # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] # diff --git a/lib/openai/resources/responses.rb b/lib/openai/resources/responses.rb index 11143033..2a837bab 100644 --- a/lib/openai/resources/responses.rb +++ b/lib/openai/resources/responses.rb @@ -345,14 +345,14 @@ def delete(response_id, params = {}) # # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] # - # @return [nil] + # @return [OpenAI::Models::Responses::Response] # # @see OpenAI::Models::Responses::ResponseCancelParams def cancel(response_id, params = {}) @client.request( method: :post, path: ["responses/%1$s/cancel", response_id], - model: NilClass, + model: OpenAI::Responses::Response, options: params[:request_options] ) end diff --git a/lib/openai/version.rb b/lib/openai/version.rb index 56f83a91..bd01e775 100644 --- a/lib/openai/version.rb +++ b/lib/openai/version.rb @@ -1,5 +1,5 @@ # frozen_string_literal: true module OpenAI - VERSION = "0.5.0" + VERSION = "0.5.1" end diff --git a/rbi/openai/models/audio/transcription_text_delta_event.rbi b/rbi/openai/models/audio/transcription_text_delta_event.rbi index a196922d..d8707c86 100644 --- a/rbi/openai/models/audio/transcription_text_delta_event.rbi +++ b/rbi/openai/models/audio/transcription_text_delta_event.rbi @@ -98,10 +98,10 @@ module OpenAI attr_writer :token # The bytes that were used to generate the log probability. - sig { returns(T.nilable(T::Array[T.anything])) } + sig { returns(T.nilable(T::Array[Integer])) } attr_reader :bytes - sig { params(bytes: T::Array[T.anything]).void } + sig { params(bytes: T::Array[Integer]).void } attr_writer :bytes # The log probability of the token. @@ -114,7 +114,7 @@ module OpenAI sig do params( token: String, - bytes: T::Array[T.anything], + bytes: T::Array[Integer], logprob: Float ).returns(T.attached_class) end @@ -130,7 +130,7 @@ module OpenAI sig do override.returns( - { token: String, bytes: T::Array[T.anything], logprob: Float } + { token: String, bytes: T::Array[Integer], logprob: Float } ) end def to_hash diff --git a/rbi/openai/models/audio/transcription_text_done_event.rbi b/rbi/openai/models/audio/transcription_text_done_event.rbi index 80acac59..21788792 100644 --- a/rbi/openai/models/audio/transcription_text_done_event.rbi +++ b/rbi/openai/models/audio/transcription_text_done_event.rbi @@ -100,10 +100,10 @@ module OpenAI attr_writer :token # The bytes that were used to generate the log probability. - sig { returns(T.nilable(T::Array[T.anything])) } + sig { returns(T.nilable(T::Array[Integer])) } attr_reader :bytes - sig { params(bytes: T::Array[T.anything]).void } + sig { params(bytes: T::Array[Integer]).void } attr_writer :bytes # The log probability of the token. @@ -116,7 +116,7 @@ module OpenAI sig do params( token: String, - bytes: T::Array[T.anything], + bytes: T::Array[Integer], logprob: Float ).returns(T.attached_class) end @@ -132,7 +132,7 @@ module OpenAI sig do override.returns( - { token: String, bytes: T::Array[T.anything], logprob: Float } + { token: String, bytes: T::Array[Integer], logprob: Float } ) end def to_hash diff --git a/rbi/openai/models/chat/chat_completion.rbi b/rbi/openai/models/chat/chat_completion.rbi index c2353966..7408d715 100644 --- a/rbi/openai/models/chat/chat_completion.rbi +++ b/rbi/openai/models/chat/chat_completion.rbi @@ -39,9 +39,9 @@ module OpenAI # utilize scale tier credits until they are exhausted. # - If set to 'auto', and the Project is not Scale tier enabled, the request will # be processed using the default service tier with a lower uptime SLA and no - # latency guarentee. + # latency guarantee. # - If set to 'default', the request will be processed using the default service - # tier with a lower uptime SLA and no latency guarentee. + # tier with a lower uptime SLA and no latency guarantee. # - If set to 'flex', the request will be processed with the Flex Processing # service tier. # [Learn more](https://platform.openai.com/docs/guides/flex-processing). @@ -105,9 +105,9 @@ module OpenAI # utilize scale tier credits until they are exhausted. # - If set to 'auto', and the Project is not Scale tier enabled, the request will # be processed using the default service tier with a lower uptime SLA and no - # latency guarentee. + # latency guarantee. # - If set to 'default', the request will be processed using the default service - # tier with a lower uptime SLA and no latency guarentee. + # tier with a lower uptime SLA and no latency guarantee. # - If set to 'flex', the request will be processed with the Flex Processing # service tier. # [Learn more](https://platform.openai.com/docs/guides/flex-processing). @@ -370,9 +370,9 @@ module OpenAI # utilize scale tier credits until they are exhausted. # - If set to 'auto', and the Project is not Scale tier enabled, the request will # be processed using the default service tier with a lower uptime SLA and no - # latency guarentee. + # latency guarantee. # - If set to 'default', the request will be processed using the default service - # tier with a lower uptime SLA and no latency guarentee. + # tier with a lower uptime SLA and no latency guarantee. # - If set to 'flex', the request will be processed with the Flex Processing # service tier. # [Learn more](https://platform.openai.com/docs/guides/flex-processing). diff --git a/rbi/openai/models/chat/chat_completion_chunk.rbi b/rbi/openai/models/chat/chat_completion_chunk.rbi index 9d06ce7f..b37b09ef 100644 --- a/rbi/openai/models/chat/chat_completion_chunk.rbi +++ b/rbi/openai/models/chat/chat_completion_chunk.rbi @@ -41,9 +41,9 @@ module OpenAI # utilize scale tier credits until they are exhausted. # - If set to 'auto', and the Project is not Scale tier enabled, the request will # be processed using the default service tier with a lower uptime SLA and no - # latency guarentee. + # latency guarantee. # - If set to 'default', the request will be processed using the default service - # tier with a lower uptime SLA and no latency guarentee. + # tier with a lower uptime SLA and no latency guarantee. # - If set to 'flex', the request will be processed with the Flex Processing # service tier. # [Learn more](https://platform.openai.com/docs/guides/flex-processing). @@ -120,9 +120,9 @@ module OpenAI # utilize scale tier credits until they are exhausted. # - If set to 'auto', and the Project is not Scale tier enabled, the request will # be processed using the default service tier with a lower uptime SLA and no - # latency guarentee. + # latency guarantee. # - If set to 'default', the request will be processed using the default service - # tier with a lower uptime SLA and no latency guarentee. + # tier with a lower uptime SLA and no latency guarantee. # - If set to 'flex', the request will be processed with the Flex Processing # service tier. # [Learn more](https://platform.openai.com/docs/guides/flex-processing). @@ -790,9 +790,9 @@ module OpenAI # utilize scale tier credits until they are exhausted. # - If set to 'auto', and the Project is not Scale tier enabled, the request will # be processed using the default service tier with a lower uptime SLA and no - # latency guarentee. + # latency guarantee. # - If set to 'default', the request will be processed using the default service - # tier with a lower uptime SLA and no latency guarentee. + # tier with a lower uptime SLA and no latency guarantee. # - If set to 'flex', the request will be processed with the Flex Processing # service tier. # [Learn more](https://platform.openai.com/docs/guides/flex-processing). diff --git a/rbi/openai/models/chat/completion_create_params.rbi b/rbi/openai/models/chat/completion_create_params.rbi index 47e3715b..a64aff1b 100644 --- a/rbi/openai/models/chat/completion_create_params.rbi +++ b/rbi/openai/models/chat/completion_create_params.rbi @@ -277,9 +277,9 @@ module OpenAI # utilize scale tier credits until they are exhausted. # - If set to 'auto', and the Project is not Scale tier enabled, the request will # be processed using the default service tier with a lower uptime SLA and no - # latency guarentee. + # latency guarantee. # - If set to 'default', the request will be processed using the default service - # tier with a lower uptime SLA and no latency guarentee. + # tier with a lower uptime SLA and no latency guarantee. # - If set to 'flex', the request will be processed with the Flex Processing # service tier. # [Learn more](https://platform.openai.com/docs/guides/flex-processing). @@ -632,9 +632,9 @@ module OpenAI # utilize scale tier credits until they are exhausted. # - If set to 'auto', and the Project is not Scale tier enabled, the request will # be processed using the default service tier with a lower uptime SLA and no - # latency guarentee. + # latency guarantee. # - If set to 'default', the request will be processed using the default service - # tier with a lower uptime SLA and no latency guarentee. + # tier with a lower uptime SLA and no latency guarantee. # - If set to 'flex', the request will be processed with the Flex Processing # service tier. # [Learn more](https://platform.openai.com/docs/guides/flex-processing). @@ -1015,9 +1015,9 @@ module OpenAI # utilize scale tier credits until they are exhausted. # - If set to 'auto', and the Project is not Scale tier enabled, the request will # be processed using the default service tier with a lower uptime SLA and no - # latency guarentee. + # latency guarantee. # - If set to 'default', the request will be processed using the default service - # tier with a lower uptime SLA and no latency guarentee. + # tier with a lower uptime SLA and no latency guarantee. # - If set to 'flex', the request will be processed with the Flex Processing # service tier. # [Learn more](https://platform.openai.com/docs/guides/flex-processing). diff --git a/rbi/openai/models/fine_tuning/alpha/grader_run_params.rbi b/rbi/openai/models/fine_tuning/alpha/grader_run_params.rbi index bca68bdb..f9834f5b 100644 --- a/rbi/openai/models/fine_tuning/alpha/grader_run_params.rbi +++ b/rbi/openai/models/fine_tuning/alpha/grader_run_params.rbi @@ -30,17 +30,22 @@ module OpenAI end attr_accessor :grader - # The model sample to be evaluated. + # The model sample to be evaluated. This value will be used to populate the + # `sample` namespace. See + # [the guide](https://platform.openai.com/docs/guides/graders) for more details. + # The `output_json` variable will be populated if the model sample is a valid JSON + # string. sig { returns(String) } attr_accessor :model_sample - # The reference answer for the evaluation. - sig do - returns( - OpenAI::FineTuning::Alpha::GraderRunParams::ReferenceAnswer::Variants - ) - end - attr_accessor :reference_answer + # The dataset item provided to the grader. This will be used to populate the + # `item` namespace. See + # [the guide](https://platform.openai.com/docs/guides/graders) for more details. + sig { returns(T.nilable(T.anything)) } + attr_reader :item + + sig { params(item: T.anything).void } + attr_writer :item sig do params( @@ -53,18 +58,23 @@ module OpenAI OpenAI::Graders::MultiGrader::OrHash ), model_sample: String, - reference_answer: - OpenAI::FineTuning::Alpha::GraderRunParams::ReferenceAnswer::Variants, + item: T.anything, request_options: OpenAI::RequestOptions::OrHash ).returns(T.attached_class) end def self.new( # The grader used for the fine-tuning job. grader:, - # The model sample to be evaluated. + # The model sample to be evaluated. This value will be used to populate the + # `sample` namespace. See + # [the guide](https://platform.openai.com/docs/guides/graders) for more details. + # The `output_json` variable will be populated if the model sample is a valid JSON + # string. model_sample:, - # The reference answer for the evaluation. - reference_answer:, + # The dataset item provided to the grader. This will be used to populate the + # `item` namespace. See + # [the guide](https://platform.openai.com/docs/guides/graders) for more details. + item: nil, request_options: {} ) end @@ -81,8 +91,7 @@ module OpenAI OpenAI::Graders::MultiGrader ), model_sample: String, - reference_answer: - OpenAI::FineTuning::Alpha::GraderRunParams::ReferenceAnswer::Variants, + item: T.anything, request_options: OpenAI::RequestOptions } ) @@ -115,34 +124,6 @@ module OpenAI def self.variants end end - - # The reference answer for the evaluation. - module ReferenceAnswer - extend OpenAI::Internal::Type::Union - - Variants = - T.type_alias do - T.any(String, T.anything, T::Array[T.anything], Float) - end - - sig do - override.returns( - T::Array[ - OpenAI::FineTuning::Alpha::GraderRunParams::ReferenceAnswer::Variants - ] - ) - end - def self.variants - end - - UnionMember2Array = - T.let( - OpenAI::Internal::Type::ArrayOf[ - OpenAI::Internal::Type::Unknown - ], - OpenAI::Internal::Type::Converter - ) - end end end end diff --git a/rbi/openai/models/fine_tuning/fine_tuning_job.rbi b/rbi/openai/models/fine_tuning/fine_tuning_job.rbi index 8e4d7a3a..090fa734 100644 --- a/rbi/openai/models/fine_tuning/fine_tuning_job.rbi +++ b/rbi/openai/models/fine_tuning/fine_tuning_job.rbi @@ -359,7 +359,7 @@ module OpenAI # returned when running `supervised` jobs. sig do params( - batch_size: T.nilable(T.any(T.anything, Symbol, Integer)), + batch_size: T.nilable(T.any(Symbol, Integer)), learning_rate_multiplier: T.any(Symbol, Float), n_epochs: T.any(Symbol, Integer) ).returns(T.attached_class) @@ -399,8 +399,7 @@ module OpenAI module BatchSize extend OpenAI::Internal::Type::Union - Variants = - T.type_alias { T.nilable(T.any(T.anything, Symbol, Integer)) } + Variants = T.type_alias { T.any(Symbol, Integer) } sig do override.returns( diff --git a/rbi/openai/models/graders/multi_grader.rbi b/rbi/openai/models/graders/multi_grader.rbi index bbf5c142..63f598ad 100644 --- a/rbi/openai/models/graders/multi_grader.rbi +++ b/rbi/openai/models/graders/multi_grader.rbi @@ -15,18 +15,17 @@ module OpenAI sig { returns(String) } attr_accessor :calculate_output + # A StringCheckGrader object that performs a string comparison between input and + # reference using a specified operation. sig do returns( - T::Hash[ - Symbol, - T.any( - OpenAI::Graders::StringCheckGrader, - OpenAI::Graders::TextSimilarityGrader, - OpenAI::Graders::PythonGrader, - OpenAI::Graders::ScoreModelGrader, - OpenAI::Graders::LabelModelGrader - ) - ] + T.any( + OpenAI::Graders::StringCheckGrader, + OpenAI::Graders::TextSimilarityGrader, + OpenAI::Graders::PythonGrader, + OpenAI::Graders::ScoreModelGrader, + OpenAI::Graders::LabelModelGrader + ) ) end attr_accessor :graders @@ -45,16 +44,13 @@ module OpenAI params( calculate_output: String, graders: - T::Hash[ - Symbol, - T.any( - OpenAI::Graders::StringCheckGrader::OrHash, - OpenAI::Graders::TextSimilarityGrader::OrHash, - OpenAI::Graders::PythonGrader::OrHash, - OpenAI::Graders::ScoreModelGrader::OrHash, - OpenAI::Graders::LabelModelGrader::OrHash - ) - ], + T.any( + OpenAI::Graders::StringCheckGrader::OrHash, + OpenAI::Graders::TextSimilarityGrader::OrHash, + OpenAI::Graders::PythonGrader::OrHash, + OpenAI::Graders::ScoreModelGrader::OrHash, + OpenAI::Graders::LabelModelGrader::OrHash + ), name: String, type: Symbol ).returns(T.attached_class) @@ -62,6 +58,8 @@ module OpenAI def self.new( # A formula to calculate the output based on grader results. calculate_output:, + # A StringCheckGrader object that performs a string comparison between input and + # reference using a specified operation. graders:, # The name of the grader. name:, @@ -75,16 +73,13 @@ module OpenAI { calculate_output: String, graders: - T::Hash[ - Symbol, - T.any( - OpenAI::Graders::StringCheckGrader, - OpenAI::Graders::TextSimilarityGrader, - OpenAI::Graders::PythonGrader, - OpenAI::Graders::ScoreModelGrader, - OpenAI::Graders::LabelModelGrader - ) - ], + T.any( + OpenAI::Graders::StringCheckGrader, + OpenAI::Graders::TextSimilarityGrader, + OpenAI::Graders::PythonGrader, + OpenAI::Graders::ScoreModelGrader, + OpenAI::Graders::LabelModelGrader + ), name: String, type: Symbol } @@ -95,7 +90,7 @@ module OpenAI # A StringCheckGrader object that performs a string comparison between input and # reference using a specified operation. - module Grader + module Graders extend OpenAI::Internal::Type::Union Variants = @@ -111,7 +106,7 @@ module OpenAI sig do override.returns( - T::Array[OpenAI::Graders::MultiGrader::Grader::Variants] + T::Array[OpenAI::Graders::MultiGrader::Graders::Variants] ) end def self.variants diff --git a/rbi/openai/models/image_edit_params.rbi b/rbi/openai/models/image_edit_params.rbi index b03c85dd..f3e7df1e 100644 --- a/rbi/openai/models/image_edit_params.rbi +++ b/rbi/openai/models/image_edit_params.rbi @@ -14,7 +14,7 @@ module OpenAI # The image(s) to edit. Must be a supported image file or an array of images. # # For `gpt-image-1`, each image should be a `png`, `webp`, or `jpg` file less than - # 25MB. You can provide up to 16 images. + # 50MB. You can provide up to 16 images. # # For `dall-e-2`, you can only provide one image, and it should be a square `png` # file less than 4MB. @@ -106,7 +106,7 @@ module OpenAI # The image(s) to edit. Must be a supported image file or an array of images. # # For `gpt-image-1`, each image should be a `png`, `webp`, or `jpg` file less than - # 25MB. You can provide up to 16 images. + # 50MB. You can provide up to 16 images. # # For `dall-e-2`, you can only provide one image, and it should be a square `png` # file less than 4MB. @@ -179,7 +179,7 @@ module OpenAI # The image(s) to edit. Must be a supported image file or an array of images. # # For `gpt-image-1`, each image should be a `png`, `webp`, or `jpg` file less than - # 25MB. You can provide up to 16 images. + # 50MB. You can provide up to 16 images. # # For `dall-e-2`, you can only provide one image, and it should be a square `png` # file less than 4MB. diff --git a/rbi/openai/models/responses/response.rbi b/rbi/openai/models/responses/response.rbi index 97408461..d7a1789e 100644 --- a/rbi/openai/models/responses/response.rbi +++ b/rbi/openai/models/responses/response.rbi @@ -160,9 +160,9 @@ module OpenAI # utilize scale tier credits until they are exhausted. # - If set to 'auto', and the Project is not Scale tier enabled, the request will # be processed using the default service tier with a lower uptime SLA and no - # latency guarentee. + # latency guarantee. # - If set to 'default', the request will be processed using the default service - # tier with a lower uptime SLA and no latency guarentee. + # tier with a lower uptime SLA and no latency guarantee. # - If set to 'flex', the request will be processed with the Flex Processing # service tier. # [Learn more](https://platform.openai.com/docs/guides/flex-processing). @@ -390,9 +390,9 @@ module OpenAI # utilize scale tier credits until they are exhausted. # - If set to 'auto', and the Project is not Scale tier enabled, the request will # be processed using the default service tier with a lower uptime SLA and no - # latency guarentee. + # latency guarantee. # - If set to 'default', the request will be processed using the default service - # tier with a lower uptime SLA and no latency guarentee. + # tier with a lower uptime SLA and no latency guarantee. # - If set to 'flex', the request will be processed with the Flex Processing # service tier. # [Learn more](https://platform.openai.com/docs/guides/flex-processing). @@ -588,9 +588,9 @@ module OpenAI # utilize scale tier credits until they are exhausted. # - If set to 'auto', and the Project is not Scale tier enabled, the request will # be processed using the default service tier with a lower uptime SLA and no - # latency guarentee. + # latency guarantee. # - If set to 'default', the request will be processed using the default service - # tier with a lower uptime SLA and no latency guarentee. + # tier with a lower uptime SLA and no latency guarantee. # - If set to 'flex', the request will be processed with the Flex Processing # service tier. # [Learn more](https://platform.openai.com/docs/guides/flex-processing). diff --git a/rbi/openai/models/responses/response_code_interpreter_call_code_delta_event.rbi b/rbi/openai/models/responses/response_code_interpreter_call_code_delta_event.rbi index a26838ef..d0012c45 100644 --- a/rbi/openai/models/responses/response_code_interpreter_call_code_delta_event.rbi +++ b/rbi/openai/models/responses/response_code_interpreter_call_code_delta_event.rbi @@ -24,7 +24,7 @@ module OpenAI sig { returns(Integer) } attr_accessor :sequence_number - # The type of the event. Always `response.code_interpreter_call.code.delta`. + # The type of the event. Always `response.code_interpreter_call_code.delta`. sig { returns(Symbol) } attr_accessor :type @@ -44,8 +44,8 @@ module OpenAI output_index:, # The sequence number of this event. sequence_number:, - # The type of the event. Always `response.code_interpreter_call.code.delta`. - type: :"response.code_interpreter_call.code.delta" + # The type of the event. Always `response.code_interpreter_call_code.delta`. + type: :"response.code_interpreter_call_code.delta" ) end diff --git a/rbi/openai/models/responses/response_code_interpreter_call_code_done_event.rbi b/rbi/openai/models/responses/response_code_interpreter_call_code_done_event.rbi index fdd8c46e..0ab6b04b 100644 --- a/rbi/openai/models/responses/response_code_interpreter_call_code_done_event.rbi +++ b/rbi/openai/models/responses/response_code_interpreter_call_code_done_event.rbi @@ -24,7 +24,7 @@ module OpenAI sig { returns(Integer) } attr_accessor :sequence_number - # The type of the event. Always `response.code_interpreter_call.code.done`. + # The type of the event. Always `response.code_interpreter_call_code.done`. sig { returns(Symbol) } attr_accessor :type @@ -44,8 +44,8 @@ module OpenAI output_index:, # The sequence number of this event. sequence_number:, - # The type of the event. Always `response.code_interpreter_call.code.done`. - type: :"response.code_interpreter_call.code.done" + # The type of the event. Always `response.code_interpreter_call_code.done`. + type: :"response.code_interpreter_call_code.done" ) end diff --git a/rbi/openai/models/responses/response_create_params.rbi b/rbi/openai/models/responses/response_create_params.rbi index 671856d6..2f868bca 100644 --- a/rbi/openai/models/responses/response_create_params.rbi +++ b/rbi/openai/models/responses/response_create_params.rbi @@ -63,6 +63,8 @@ module OpenAI # multi-turn conversations when using the Responses API statelessly (like when # the `store` parameter is set to `false`, or when an organization is enrolled # in the zero data retention program). + # - `code_interpreter_call.outputs`: Includes the outputs of python code execution + # in code interpreter tool call items. sig do returns( T.nilable(T::Array[OpenAI::Responses::ResponseIncludable::OrSymbol]) @@ -121,9 +123,9 @@ module OpenAI # utilize scale tier credits until they are exhausted. # - If set to 'auto', and the Project is not Scale tier enabled, the request will # be processed using the default service tier with a lower uptime SLA and no - # latency guarentee. + # latency guarantee. # - If set to 'default', the request will be processed using the default service - # tier with a lower uptime SLA and no latency guarentee. + # tier with a lower uptime SLA and no latency guarantee. # - If set to 'flex', the request will be processed with the Flex Processing # service tier. # [Learn more](https://platform.openai.com/docs/guides/flex-processing). @@ -372,6 +374,8 @@ module OpenAI # multi-turn conversations when using the Responses API statelessly (like when # the `store` parameter is set to `false`, or when an organization is enrolled # in the zero data retention program). + # - `code_interpreter_call.outputs`: Includes the outputs of python code execution + # in code interpreter tool call items. include: nil, # Inserts a system (or developer) message as the first item in the model's # context. @@ -409,9 +413,9 @@ module OpenAI # utilize scale tier credits until they are exhausted. # - If set to 'auto', and the Project is not Scale tier enabled, the request will # be processed using the default service tier with a lower uptime SLA and no - # latency guarentee. + # latency guarantee. # - If set to 'default', the request will be processed using the default service - # tier with a lower uptime SLA and no latency guarentee. + # tier with a lower uptime SLA and no latency guarantee. # - If set to 'flex', the request will be processed with the Flex Processing # service tier. # [Learn more](https://platform.openai.com/docs/guides/flex-processing). @@ -570,9 +574,9 @@ module OpenAI # utilize scale tier credits until they are exhausted. # - If set to 'auto', and the Project is not Scale tier enabled, the request will # be processed using the default service tier with a lower uptime SLA and no - # latency guarentee. + # latency guarantee. # - If set to 'default', the request will be processed using the default service - # tier with a lower uptime SLA and no latency guarentee. + # tier with a lower uptime SLA and no latency guarantee. # - If set to 'flex', the request will be processed with the Flex Processing # service tier. # [Learn more](https://platform.openai.com/docs/guides/flex-processing). diff --git a/rbi/openai/models/responses/response_includable.rbi b/rbi/openai/models/responses/response_includable.rbi index b99bd61a..f5f63506 100644 --- a/rbi/openai/models/responses/response_includable.rbi +++ b/rbi/openai/models/responses/response_includable.rbi @@ -16,6 +16,8 @@ module OpenAI # multi-turn conversations when using the Responses API statelessly (like when # the `store` parameter is set to `false`, or when an organization is enrolled # in the zero data retention program). + # - `code_interpreter_call.outputs`: Includes the outputs of python code execution + # in code interpreter tool call items. module ResponseIncludable extend OpenAI::Internal::Type::Enum @@ -43,6 +45,11 @@ module OpenAI :"reasoning.encrypted_content", OpenAI::Responses::ResponseIncludable::TaggedSymbol ) + CODE_INTERPRETER_CALL_OUTPUTS = + T.let( + :"code_interpreter_call.outputs", + OpenAI::Responses::ResponseIncludable::TaggedSymbol + ) sig do override.returns( diff --git a/rbi/openai/models/responses/response_output_text.rbi b/rbi/openai/models/responses/response_output_text.rbi index 7c212ca9..0636bda8 100644 --- a/rbi/openai/models/responses/response_output_text.rbi +++ b/rbi/openai/models/responses/response_output_text.rbi @@ -19,6 +19,7 @@ module OpenAI T.any( OpenAI::Responses::ResponseOutputText::Annotation::FileCitation, OpenAI::Responses::ResponseOutputText::Annotation::URLCitation, + OpenAI::Responses::ResponseOutputText::Annotation::ContainerFileCitation, OpenAI::Responses::ResponseOutputText::Annotation::FilePath ) ] @@ -61,6 +62,7 @@ module OpenAI T.any( OpenAI::Responses::ResponseOutputText::Annotation::FileCitation::OrHash, OpenAI::Responses::ResponseOutputText::Annotation::URLCitation::OrHash, + OpenAI::Responses::ResponseOutputText::Annotation::ContainerFileCitation::OrHash, OpenAI::Responses::ResponseOutputText::Annotation::FilePath::OrHash ) ], @@ -89,6 +91,7 @@ module OpenAI T.any( OpenAI::Responses::ResponseOutputText::Annotation::FileCitation, OpenAI::Responses::ResponseOutputText::Annotation::URLCitation, + OpenAI::Responses::ResponseOutputText::Annotation::ContainerFileCitation, OpenAI::Responses::ResponseOutputText::Annotation::FilePath ) ], @@ -110,6 +113,7 @@ module OpenAI T.any( OpenAI::Responses::ResponseOutputText::Annotation::FileCitation, OpenAI::Responses::ResponseOutputText::Annotation::URLCitation, + OpenAI::Responses::ResponseOutputText::Annotation::ContainerFileCitation, OpenAI::Responses::ResponseOutputText::Annotation::FilePath ) end @@ -228,6 +232,74 @@ module OpenAI end end + class ContainerFileCitation < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Responses::ResponseOutputText::Annotation::ContainerFileCitation, + OpenAI::Internal::AnyHash + ) + end + + # The ID of the container file. + sig { returns(String) } + attr_accessor :container_id + + # The index of the last character of the container file citation in the message. + sig { returns(Integer) } + attr_accessor :end_index + + # The ID of the file. + sig { returns(String) } + attr_accessor :file_id + + # The index of the first character of the container file citation in the message. + sig { returns(Integer) } + attr_accessor :start_index + + # The type of the container file citation. Always `container_file_citation`. + sig { returns(Symbol) } + attr_accessor :type + + # A citation for a container file used to generate a model response. + sig do + params( + container_id: String, + end_index: Integer, + file_id: String, + start_index: Integer, + type: Symbol + ).returns(T.attached_class) + end + def self.new( + # The ID of the container file. + container_id:, + # The index of the last character of the container file citation in the message. + end_index:, + # The ID of the file. + file_id:, + # The index of the first character of the container file citation in the message. + start_index:, + # The type of the container file citation. Always `container_file_citation`. + type: :container_file_citation + ) + end + + sig do + override.returns( + { + container_id: String, + end_index: Integer, + file_id: String, + start_index: Integer, + type: Symbol + } + ) + end + def to_hash + end + end + class FilePath < OpenAI::Internal::Type::BaseModel OrHash = T.type_alias do diff --git a/rbi/openai/resources/chat/completions.rbi b/rbi/openai/resources/chat/completions.rbi index a54d1c53..62e7196c 100644 --- a/rbi/openai/resources/chat/completions.rbi +++ b/rbi/openai/resources/chat/completions.rbi @@ -235,9 +235,9 @@ module OpenAI # utilize scale tier credits until they are exhausted. # - If set to 'auto', and the Project is not Scale tier enabled, the request will # be processed using the default service tier with a lower uptime SLA and no - # latency guarentee. + # latency guarantee. # - If set to 'default', the request will be processed using the default service - # tier with a lower uptime SLA and no latency guarentee. + # tier with a lower uptime SLA and no latency guarantee. # - If set to 'flex', the request will be processed with the Flex Processing # service tier. # [Learn more](https://platform.openai.com/docs/guides/flex-processing). @@ -522,9 +522,9 @@ module OpenAI # utilize scale tier credits until they are exhausted. # - If set to 'auto', and the Project is not Scale tier enabled, the request will # be processed using the default service tier with a lower uptime SLA and no - # latency guarentee. + # latency guarantee. # - If set to 'default', the request will be processed using the default service - # tier with a lower uptime SLA and no latency guarentee. + # tier with a lower uptime SLA and no latency guarantee. # - If set to 'flex', the request will be processed with the Flex Processing # service tier. # [Learn more](https://platform.openai.com/docs/guides/flex-processing). diff --git a/rbi/openai/resources/fine_tuning/alpha/graders.rbi b/rbi/openai/resources/fine_tuning/alpha/graders.rbi index 8b1683c1..f3ea09fe 100644 --- a/rbi/openai/resources/fine_tuning/alpha/graders.rbi +++ b/rbi/openai/resources/fine_tuning/alpha/graders.rbi @@ -17,18 +17,23 @@ module OpenAI OpenAI::Graders::MultiGrader::OrHash ), model_sample: String, - reference_answer: - OpenAI::FineTuning::Alpha::GraderRunParams::ReferenceAnswer::Variants, + item: T.anything, request_options: OpenAI::RequestOptions::OrHash ).returns(OpenAI::Models::FineTuning::Alpha::GraderRunResponse) end def run( # The grader used for the fine-tuning job. grader:, - # The model sample to be evaluated. + # The model sample to be evaluated. This value will be used to populate the + # `sample` namespace. See + # [the guide](https://platform.openai.com/docs/guides/graders) for more details. + # The `output_json` variable will be populated if the model sample is a valid JSON + # string. model_sample:, - # The reference answer for the evaluation. - reference_answer:, + # The dataset item provided to the grader. This will be used to populate the + # `item` namespace. See + # [the guide](https://platform.openai.com/docs/guides/graders) for more details. + item: nil, request_options: {} ) end diff --git a/rbi/openai/resources/images.rbi b/rbi/openai/resources/images.rbi index e3a93ec3..c8440e47 100644 --- a/rbi/openai/resources/images.rbi +++ b/rbi/openai/resources/images.rbi @@ -64,7 +64,7 @@ module OpenAI # The image(s) to edit. Must be a supported image file or an array of images. # # For `gpt-image-1`, each image should be a `png`, `webp`, or `jpg` file less than - # 25MB. You can provide up to 16 images. + # 50MB. You can provide up to 16 images. # # For `dall-e-2`, you can only provide one image, and it should be a square `png` # file less than 4MB. diff --git a/rbi/openai/resources/responses.rbi b/rbi/openai/resources/responses.rbi index bb55b332..c4ac4c2d 100644 --- a/rbi/openai/resources/responses.rbi +++ b/rbi/openai/resources/responses.rbi @@ -112,6 +112,8 @@ module OpenAI # multi-turn conversations when using the Responses API statelessly (like when # the `store` parameter is set to `false`, or when an organization is enrolled # in the zero data retention program). + # - `code_interpreter_call.outputs`: Includes the outputs of python code execution + # in code interpreter tool call items. include: nil, # Inserts a system (or developer) message as the first item in the model's # context. @@ -149,9 +151,9 @@ module OpenAI # utilize scale tier credits until they are exhausted. # - If set to 'auto', and the Project is not Scale tier enabled, the request will # be processed using the default service tier with a lower uptime SLA and no - # latency guarentee. + # latency guarantee. # - If set to 'default', the request will be processed using the default service - # tier with a lower uptime SLA and no latency guarentee. + # tier with a lower uptime SLA and no latency guarantee. # - If set to 'flex', the request will be processed with the Flex Processing # service tier. # [Learn more](https://platform.openai.com/docs/guides/flex-processing). @@ -323,6 +325,8 @@ module OpenAI # multi-turn conversations when using the Responses API statelessly (like when # the `store` parameter is set to `false`, or when an organization is enrolled # in the zero data retention program). + # - `code_interpreter_call.outputs`: Includes the outputs of python code execution + # in code interpreter tool call items. include: nil, # Inserts a system (or developer) message as the first item in the model's # context. @@ -360,9 +364,9 @@ module OpenAI # utilize scale tier credits until they are exhausted. # - If set to 'auto', and the Project is not Scale tier enabled, the request will # be processed using the default service tier with a lower uptime SLA and no - # latency guarentee. + # latency guarantee. # - If set to 'default', the request will be processed using the default service - # tier with a lower uptime SLA and no latency guarentee. + # tier with a lower uptime SLA and no latency guarantee. # - If set to 'flex', the request will be processed with the Flex Processing # service tier. # [Learn more](https://platform.openai.com/docs/guides/flex-processing). @@ -507,7 +511,7 @@ module OpenAI params( response_id: String, request_options: OpenAI::RequestOptions::OrHash - ).void + ).returns(OpenAI::Responses::Response) end def cancel( # The ID of the response to cancel. diff --git a/sig/openai/models/audio/transcription_text_delta_event.rbs b/sig/openai/models/audio/transcription_text_delta_event.rbs index 155b8e1d..08280006 100644 --- a/sig/openai/models/audio/transcription_text_delta_event.rbs +++ b/sig/openai/models/audio/transcription_text_delta_event.rbs @@ -31,16 +31,17 @@ module OpenAI logprobs: ::Array[OpenAI::Audio::TranscriptionTextDeltaEvent::Logprob] } - type logprob = { token: String, bytes: ::Array[top], logprob: Float } + type logprob = + { token: String, bytes: ::Array[Integer], logprob: Float } class Logprob < OpenAI::Internal::Type::BaseModel attr_reader token: String? def token=: (String) -> String - attr_reader bytes: ::Array[top]? + attr_reader bytes: ::Array[Integer]? - def bytes=: (::Array[top]) -> ::Array[top] + def bytes=: (::Array[Integer]) -> ::Array[Integer] attr_reader logprob: Float? @@ -48,11 +49,15 @@ module OpenAI def initialize: ( ?token: String, - ?bytes: ::Array[top], + ?bytes: ::Array[Integer], ?logprob: Float ) -> void - def to_hash: -> { token: String, bytes: ::Array[top], logprob: Float } + def to_hash: -> { + token: String, + bytes: ::Array[Integer], + logprob: Float + } end end end diff --git a/sig/openai/models/audio/transcription_text_done_event.rbs b/sig/openai/models/audio/transcription_text_done_event.rbs index d8f864e3..53e317e8 100644 --- a/sig/openai/models/audio/transcription_text_done_event.rbs +++ b/sig/openai/models/audio/transcription_text_done_event.rbs @@ -31,16 +31,17 @@ module OpenAI logprobs: ::Array[OpenAI::Audio::TranscriptionTextDoneEvent::Logprob] } - type logprob = { token: String, bytes: ::Array[top], logprob: Float } + type logprob = + { token: String, bytes: ::Array[Integer], logprob: Float } class Logprob < OpenAI::Internal::Type::BaseModel attr_reader token: String? def token=: (String) -> String - attr_reader bytes: ::Array[top]? + attr_reader bytes: ::Array[Integer]? - def bytes=: (::Array[top]) -> ::Array[top] + def bytes=: (::Array[Integer]) -> ::Array[Integer] attr_reader logprob: Float? @@ -48,11 +49,15 @@ module OpenAI def initialize: ( ?token: String, - ?bytes: ::Array[top], + ?bytes: ::Array[Integer], ?logprob: Float ) -> void - def to_hash: -> { token: String, bytes: ::Array[top], logprob: Float } + def to_hash: -> { + token: String, + bytes: ::Array[Integer], + logprob: Float + } end end end diff --git a/sig/openai/models/fine_tuning/alpha/grader_run_params.rbs b/sig/openai/models/fine_tuning/alpha/grader_run_params.rbs index 0fcec2cc..64b909d5 100644 --- a/sig/openai/models/fine_tuning/alpha/grader_run_params.rbs +++ b/sig/openai/models/fine_tuning/alpha/grader_run_params.rbs @@ -6,7 +6,7 @@ module OpenAI { grader: OpenAI::Models::FineTuning::Alpha::GraderRunParams::grader, model_sample: String, - reference_answer: OpenAI::Models::FineTuning::Alpha::GraderRunParams::reference_answer + item: top } & OpenAI::Internal::Type::request_parameters @@ -18,19 +18,21 @@ module OpenAI attr_accessor model_sample: String - attr_accessor reference_answer: OpenAI::Models::FineTuning::Alpha::GraderRunParams::reference_answer + attr_reader item: top? + + def item=: (top) -> top def initialize: ( grader: OpenAI::Models::FineTuning::Alpha::GraderRunParams::grader, model_sample: String, - reference_answer: OpenAI::Models::FineTuning::Alpha::GraderRunParams::reference_answer, + ?item: top, ?request_options: OpenAI::request_opts ) -> void def to_hash: -> { grader: OpenAI::Models::FineTuning::Alpha::GraderRunParams::grader, model_sample: String, - reference_answer: OpenAI::Models::FineTuning::Alpha::GraderRunParams::reference_answer, + item: top, request_options: OpenAI::RequestOptions } @@ -46,16 +48,6 @@ module OpenAI def self?.variants: -> ::Array[OpenAI::Models::FineTuning::Alpha::GraderRunParams::grader] end - - type reference_answer = String | top | ::Array[top] | Float - - module ReferenceAnswer - extend OpenAI::Internal::Type::Union - - def self?.variants: -> ::Array[OpenAI::Models::FineTuning::Alpha::GraderRunParams::reference_answer] - - UnionMember2Array: OpenAI::Internal::Type::Converter - end end end end diff --git a/sig/openai/models/fine_tuning/fine_tuning_job.rbs b/sig/openai/models/fine_tuning/fine_tuning_job.rbs index a5368cef..f2c2812d 100644 --- a/sig/openai/models/fine_tuning/fine_tuning_job.rbs +++ b/sig/openai/models/fine_tuning/fine_tuning_job.rbs @@ -165,7 +165,7 @@ module OpenAI n_epochs: OpenAI::Models::FineTuning::FineTuningJob::Hyperparameters::n_epochs } - type batch_size = (top | :auto | Integer)? + type batch_size = :auto | Integer module BatchSize extend OpenAI::Internal::Type::Union diff --git a/sig/openai/models/graders/multi_grader.rbs b/sig/openai/models/graders/multi_grader.rbs index 206d8144..d9ca09fc 100644 --- a/sig/openai/models/graders/multi_grader.rbs +++ b/sig/openai/models/graders/multi_grader.rbs @@ -6,7 +6,7 @@ module OpenAI type multi_grader = { calculate_output: String, - graders: ::Hash[Symbol, OpenAI::Models::Graders::MultiGrader::grader], + graders: OpenAI::Models::Graders::MultiGrader::graders, name: String, type: :multi } @@ -14,7 +14,7 @@ module OpenAI class MultiGrader < OpenAI::Internal::Type::BaseModel attr_accessor calculate_output: String - attr_accessor graders: ::Hash[Symbol, OpenAI::Models::Graders::MultiGrader::grader] + attr_accessor graders: OpenAI::Models::Graders::MultiGrader::graders attr_accessor name: String @@ -22,29 +22,29 @@ module OpenAI def initialize: ( calculate_output: String, - graders: ::Hash[Symbol, OpenAI::Models::Graders::MultiGrader::grader], + graders: OpenAI::Models::Graders::MultiGrader::graders, name: String, ?type: :multi ) -> void def to_hash: -> { calculate_output: String, - graders: ::Hash[Symbol, OpenAI::Models::Graders::MultiGrader::grader], + graders: OpenAI::Models::Graders::MultiGrader::graders, name: String, type: :multi } - type grader = + type graders = OpenAI::Graders::StringCheckGrader | OpenAI::Graders::TextSimilarityGrader | OpenAI::Graders::PythonGrader | OpenAI::Graders::ScoreModelGrader | OpenAI::Graders::LabelModelGrader - module Grader + module Graders extend OpenAI::Internal::Type::Union - def self?.variants: -> ::Array[OpenAI::Models::Graders::MultiGrader::grader] + def self?.variants: -> ::Array[OpenAI::Models::Graders::MultiGrader::graders] end end end diff --git a/sig/openai/models/responses/response_code_interpreter_call_code_delta_event.rbs b/sig/openai/models/responses/response_code_interpreter_call_code_delta_event.rbs index 2da40939..51d5e73d 100644 --- a/sig/openai/models/responses/response_code_interpreter_call_code_delta_event.rbs +++ b/sig/openai/models/responses/response_code_interpreter_call_code_delta_event.rbs @@ -6,7 +6,7 @@ module OpenAI delta: String, output_index: Integer, sequence_number: Integer, - type: :"response.code_interpreter_call.code.delta" + type: :"response.code_interpreter_call_code.delta" } class ResponseCodeInterpreterCallCodeDeltaEvent < OpenAI::Internal::Type::BaseModel @@ -16,20 +16,20 @@ module OpenAI attr_accessor sequence_number: Integer - attr_accessor type: :"response.code_interpreter_call.code.delta" + attr_accessor type: :"response.code_interpreter_call_code.delta" def initialize: ( delta: String, output_index: Integer, sequence_number: Integer, - ?type: :"response.code_interpreter_call.code.delta" + ?type: :"response.code_interpreter_call_code.delta" ) -> void def to_hash: -> { delta: String, output_index: Integer, sequence_number: Integer, - type: :"response.code_interpreter_call.code.delta" + type: :"response.code_interpreter_call_code.delta" } end end diff --git a/sig/openai/models/responses/response_code_interpreter_call_code_done_event.rbs b/sig/openai/models/responses/response_code_interpreter_call_code_done_event.rbs index 5f796490..a025e240 100644 --- a/sig/openai/models/responses/response_code_interpreter_call_code_done_event.rbs +++ b/sig/openai/models/responses/response_code_interpreter_call_code_done_event.rbs @@ -6,7 +6,7 @@ module OpenAI code: String, output_index: Integer, sequence_number: Integer, - type: :"response.code_interpreter_call.code.done" + type: :"response.code_interpreter_call_code.done" } class ResponseCodeInterpreterCallCodeDoneEvent < OpenAI::Internal::Type::BaseModel @@ -16,20 +16,20 @@ module OpenAI attr_accessor sequence_number: Integer - attr_accessor type: :"response.code_interpreter_call.code.done" + attr_accessor type: :"response.code_interpreter_call_code.done" def initialize: ( code: String, output_index: Integer, sequence_number: Integer, - ?type: :"response.code_interpreter_call.code.done" + ?type: :"response.code_interpreter_call_code.done" ) -> void def to_hash: -> { code: String, output_index: Integer, sequence_number: Integer, - type: :"response.code_interpreter_call.code.done" + type: :"response.code_interpreter_call_code.done" } end end diff --git a/sig/openai/models/responses/response_includable.rbs b/sig/openai/models/responses/response_includable.rbs index 4f37a1b1..7d37f3af 100644 --- a/sig/openai/models/responses/response_includable.rbs +++ b/sig/openai/models/responses/response_includable.rbs @@ -6,6 +6,7 @@ module OpenAI | :"message.input_image.image_url" | :"computer_call_output.output.image_url" | :"reasoning.encrypted_content" + | :"code_interpreter_call.outputs" module ResponseIncludable extend OpenAI::Internal::Type::Enum @@ -14,6 +15,7 @@ module OpenAI MESSAGE_INPUT_IMAGE_IMAGE_URL: :"message.input_image.image_url" COMPUTER_CALL_OUTPUT_OUTPUT_IMAGE_URL: :"computer_call_output.output.image_url" REASONING_ENCRYPTED_CONTENT: :"reasoning.encrypted_content" + CODE_INTERPRETER_CALL_OUTPUTS: :"code_interpreter_call.outputs" def self?.values: -> ::Array[OpenAI::Models::Responses::response_includable] end diff --git a/sig/openai/models/responses/response_output_text.rbs b/sig/openai/models/responses/response_output_text.rbs index d29dc9ec..a72f5df5 100644 --- a/sig/openai/models/responses/response_output_text.rbs +++ b/sig/openai/models/responses/response_output_text.rbs @@ -39,6 +39,7 @@ module OpenAI type annotation = OpenAI::Responses::ResponseOutputText::Annotation::FileCitation | OpenAI::Responses::ResponseOutputText::Annotation::URLCitation + | OpenAI::Responses::ResponseOutputText::Annotation::ContainerFileCitation | OpenAI::Responses::ResponseOutputText::Annotation::FilePath module Annotation @@ -104,6 +105,43 @@ module OpenAI } end + type container_file_citation = + { + container_id: String, + end_index: Integer, + file_id: String, + start_index: Integer, + type: :container_file_citation + } + + class ContainerFileCitation < OpenAI::Internal::Type::BaseModel + attr_accessor container_id: String + + attr_accessor end_index: Integer + + attr_accessor file_id: String + + attr_accessor start_index: Integer + + attr_accessor type: :container_file_citation + + def initialize: ( + container_id: String, + end_index: Integer, + file_id: String, + start_index: Integer, + ?type: :container_file_citation + ) -> void + + def to_hash: -> { + container_id: String, + end_index: Integer, + file_id: String, + start_index: Integer, + type: :container_file_citation + } + end + type file_path = { file_id: String, index: Integer, type: :file_path } class FilePath < OpenAI::Internal::Type::BaseModel diff --git a/sig/openai/resources/fine_tuning/alpha/graders.rbs b/sig/openai/resources/fine_tuning/alpha/graders.rbs index a460c4c7..5283fadb 100644 --- a/sig/openai/resources/fine_tuning/alpha/graders.rbs +++ b/sig/openai/resources/fine_tuning/alpha/graders.rbs @@ -6,7 +6,7 @@ module OpenAI def run: ( grader: OpenAI::Models::FineTuning::Alpha::GraderRunParams::grader, model_sample: String, - reference_answer: OpenAI::Models::FineTuning::Alpha::GraderRunParams::reference_answer, + ?item: top, ?request_options: OpenAI::request_opts ) -> OpenAI::Models::FineTuning::Alpha::GraderRunResponse diff --git a/sig/openai/resources/responses.rbs b/sig/openai/resources/responses.rbs index ee118696..7db1e5e4 100644 --- a/sig/openai/resources/responses.rbs +++ b/sig/openai/resources/responses.rbs @@ -71,7 +71,7 @@ module OpenAI def cancel: ( String response_id, ?request_options: OpenAI::request_opts - ) -> nil + ) -> OpenAI::Responses::Response def initialize: (client: OpenAI::Client) -> void end diff --git a/test/openai/resources/fine_tuning/alpha/graders_test.rb b/test/openai/resources/fine_tuning/alpha/graders_test.rb index 7a1c620a..9e2f659c 100644 --- a/test/openai/resources/fine_tuning/alpha/graders_test.rb +++ b/test/openai/resources/fine_tuning/alpha/graders_test.rb @@ -7,8 +7,7 @@ def test_run_required_params response = @openai.fine_tuning.alpha.graders.run( grader: {input: "input", name: "name", operation: :eq, reference: "reference", type: :string_check}, - model_sample: "model_sample", - reference_answer: "string" + model_sample: "model_sample" ) assert_pattern do diff --git a/test/openai/resources/responses_test.rb b/test/openai/resources/responses_test.rb index deb7c605..fe1e2ad5 100644 --- a/test/openai/resources/responses_test.rb +++ b/test/openai/resources/responses_test.rb @@ -89,7 +89,36 @@ def test_cancel response = @openai.responses.cancel("resp_677efb5139a88190b512bc3fef8e535d") assert_pattern do - response => nil + response => OpenAI::Responses::Response + end + + assert_pattern do + response => { + id: String, + created_at: Float, + error: OpenAI::Responses::ResponseError | nil, + incomplete_details: OpenAI::Responses::Response::IncompleteDetails | nil, + instructions: String | nil, + metadata: ^(OpenAI::Internal::Type::HashOf[String]) | nil, + model: OpenAI::ResponsesModel, + object: Symbol, + output: ^(OpenAI::Internal::Type::ArrayOf[union: OpenAI::Responses::ResponseOutputItem]), + parallel_tool_calls: OpenAI::Internal::Type::Boolean, + temperature: Float | nil, + tool_choice: OpenAI::Responses::Response::ToolChoice, + tools: ^(OpenAI::Internal::Type::ArrayOf[union: OpenAI::Responses::Tool]), + top_p: Float | nil, + background: OpenAI::Internal::Type::Boolean | nil, + max_output_tokens: Integer | nil, + previous_response_id: String | nil, + reasoning: OpenAI::Reasoning | nil, + service_tier: OpenAI::Responses::Response::ServiceTier | nil, + status: OpenAI::Responses::ResponseStatus | nil, + text: OpenAI::Responses::ResponseTextConfig | nil, + truncation: OpenAI::Responses::Response::Truncation | nil, + usage: OpenAI::Responses::ResponseUsage | nil, + user: String | nil + } end end end