Skip to content

Commit 24a9100

Browse files
fix(api): Fix evals and code interpreter interfaces
1 parent af420ee commit 24a9100

File tree

51 files changed

+461
-275
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

51 files changed

+461
-275
lines changed

.stats.yml

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
11
configured_endpoints: 109
2-
openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-d4bcffecf0cdadf746faa6708ed1ec81fac451f9b857deabbab26f0a343b9314.yml
3-
openapi_spec_hash: 7c54a18b4381248bda7cc34c52142615
4-
config_hash: e618aa8ff61aea826540916336de65a6
2+
openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-2bcc845d8635bf93ddcf9ee723af4d7928248412a417bee5fc10d863a1e13867.yml
3+
openapi_spec_hash: 865230cb3abeb01bd85de05891af23c4
4+
config_hash: ed1e6b3c5f93d12b80d31167f55c557c

lib/openai/models/audio/transcription_text_delta_event.rb

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -50,8 +50,8 @@ class Logprob < OpenAI::Internal::Type::BaseModel
5050
# @!attribute bytes
5151
# The bytes that were used to generate the log probability.
5252
#
53-
# @return [Array<Object>, nil]
54-
optional :bytes, OpenAI::Internal::Type::ArrayOf[OpenAI::Internal::Type::Unknown]
53+
# @return [Array<Integer>, nil]
54+
optional :bytes, OpenAI::Internal::Type::ArrayOf[Integer]
5555

5656
# @!attribute logprob
5757
# The log probability of the token.
@@ -65,7 +65,7 @@ class Logprob < OpenAI::Internal::Type::BaseModel
6565
#
6666
# @param token [String] The token that was used to generate the log probability.
6767
#
68-
# @param bytes [Array<Object>] The bytes that were used to generate the log probability.
68+
# @param bytes [Array<Integer>] The bytes that were used to generate the log probability.
6969
#
7070
# @param logprob [Float] The log probability of the token.
7171
end

lib/openai/models/audio/transcription_text_done_event.rb

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -51,8 +51,8 @@ class Logprob < OpenAI::Internal::Type::BaseModel
5151
# @!attribute bytes
5252
# The bytes that were used to generate the log probability.
5353
#
54-
# @return [Array<Object>, nil]
55-
optional :bytes, OpenAI::Internal::Type::ArrayOf[OpenAI::Internal::Type::Unknown]
54+
# @return [Array<Integer>, nil]
55+
optional :bytes, OpenAI::Internal::Type::ArrayOf[Integer]
5656

5757
# @!attribute logprob
5858
# The log probability of the token.
@@ -66,7 +66,7 @@ class Logprob < OpenAI::Internal::Type::BaseModel
6666
#
6767
# @param token [String] The token that was used to generate the log probability.
6868
#
69-
# @param bytes [Array<Object>] The bytes that were used to generate the log probability.
69+
# @param bytes [Array<Integer>] The bytes that were used to generate the log probability.
7070
#
7171
# @param logprob [Float] The log probability of the token.
7272
end

lib/openai/models/chat/chat_completion.rb

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -46,9 +46,9 @@ class ChatCompletion < OpenAI::Internal::Type::BaseModel
4646
# utilize scale tier credits until they are exhausted.
4747
# - If set to 'auto', and the Project is not Scale tier enabled, the request will
4848
# be processed using the default service tier with a lower uptime SLA and no
49-
# latency guarentee.
49+
# latency guarantee.
5050
# - If set to 'default', the request will be processed using the default service
51-
# tier with a lower uptime SLA and no latency guarentee.
51+
# tier with a lower uptime SLA and no latency guarantee.
5252
# - If set to 'flex', the request will be processed with the Flex Processing
5353
# service tier.
5454
# [Learn more](https://platform.openai.com/docs/guides/flex-processing).
@@ -195,9 +195,9 @@ class Logprobs < OpenAI::Internal::Type::BaseModel
195195
# utilize scale tier credits until they are exhausted.
196196
# - If set to 'auto', and the Project is not Scale tier enabled, the request will
197197
# be processed using the default service tier with a lower uptime SLA and no
198-
# latency guarentee.
198+
# latency guarantee.
199199
# - If set to 'default', the request will be processed using the default service
200-
# tier with a lower uptime SLA and no latency guarentee.
200+
# tier with a lower uptime SLA and no latency guarantee.
201201
# - If set to 'flex', the request will be processed with the Flex Processing
202202
# service tier.
203203
# [Learn more](https://platform.openai.com/docs/guides/flex-processing).

lib/openai/models/chat/chat_completion_chunk.rb

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -45,9 +45,9 @@ class ChatCompletionChunk < OpenAI::Internal::Type::BaseModel
4545
# utilize scale tier credits until they are exhausted.
4646
# - If set to 'auto', and the Project is not Scale tier enabled, the request will
4747
# be processed using the default service tier with a lower uptime SLA and no
48-
# latency guarentee.
48+
# latency guarantee.
4949
# - If set to 'default', the request will be processed using the default service
50-
# tier with a lower uptime SLA and no latency guarentee.
50+
# tier with a lower uptime SLA and no latency guarantee.
5151
# - If set to 'flex', the request will be processed with the Flex Processing
5252
# service tier.
5353
# [Learn more](https://platform.openai.com/docs/guides/flex-processing).
@@ -378,9 +378,9 @@ class Logprobs < OpenAI::Internal::Type::BaseModel
378378
# utilize scale tier credits until they are exhausted.
379379
# - If set to 'auto', and the Project is not Scale tier enabled, the request will
380380
# be processed using the default service tier with a lower uptime SLA and no
381-
# latency guarentee.
381+
# latency guarantee.
382382
# - If set to 'default', the request will be processed using the default service
383-
# tier with a lower uptime SLA and no latency guarentee.
383+
# tier with a lower uptime SLA and no latency guarantee.
384384
# - If set to 'flex', the request will be processed with the Flex Processing
385385
# service tier.
386386
# [Learn more](https://platform.openai.com/docs/guides/flex-processing).

lib/openai/models/chat/completion_create_params.rb

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -226,9 +226,9 @@ class CompletionCreateParams < OpenAI::Internal::Type::BaseModel
226226
# utilize scale tier credits until they are exhausted.
227227
# - If set to 'auto', and the Project is not Scale tier enabled, the request will
228228
# be processed using the default service tier with a lower uptime SLA and no
229-
# latency guarentee.
229+
# latency guarantee.
230230
# - If set to 'default', the request will be processed using the default service
231-
# tier with a lower uptime SLA and no latency guarentee.
231+
# tier with a lower uptime SLA and no latency guarantee.
232232
# - If set to 'flex', the request will be processed with the Flex Processing
233233
# service tier.
234234
# [Learn more](https://platform.openai.com/docs/guides/flex-processing).
@@ -553,9 +553,9 @@ module ResponseFormat
553553
# utilize scale tier credits until they are exhausted.
554554
# - If set to 'auto', and the Project is not Scale tier enabled, the request will
555555
# be processed using the default service tier with a lower uptime SLA and no
556-
# latency guarentee.
556+
# latency guarantee.
557557
# - If set to 'default', the request will be processed using the default service
558-
# tier with a lower uptime SLA and no latency guarentee.
558+
# tier with a lower uptime SLA and no latency guarantee.
559559
# - If set to 'flex', the request will be processed with the Flex Processing
560560
# service tier.
561561
# [Learn more](https://platform.openai.com/docs/guides/flex-processing).

lib/openai/models/fine_tuning/alpha/grader_run_params.rb

Lines changed: 17 additions & 30 deletions
Original file line numberDiff line numberDiff line change
@@ -16,26 +16,32 @@ class GraderRunParams < OpenAI::Internal::Type::BaseModel
1616
required :grader, union: -> { OpenAI::FineTuning::Alpha::GraderRunParams::Grader }
1717

1818
# @!attribute model_sample
19-
# The model sample to be evaluated.
19+
# The model sample to be evaluated. This value will be used to populate the
20+
# `sample` namespace. See
21+
# [the guide](https://platform.openai.com/docs/guides/graders) for more details.
22+
# The `output_json` variable will be populated if the model sample is a valid JSON
23+
# string.
2024
#
2125
# @return [String]
2226
required :model_sample, String
2327

24-
# @!attribute reference_answer
25-
# The reference answer for the evaluation.
28+
# @!attribute item
29+
# The dataset item provided to the grader. This will be used to populate the
30+
# `item` namespace. See
31+
# [the guide](https://platform.openai.com/docs/guides/graders) for more details.
2632
#
27-
# @return [String, Object, Array<Object>, Float]
28-
required :reference_answer,
29-
union: -> {
30-
OpenAI::FineTuning::Alpha::GraderRunParams::ReferenceAnswer
31-
}
33+
# @return [Object, nil]
34+
optional :item, OpenAI::Internal::Type::Unknown
3235

33-
# @!method initialize(grader:, model_sample:, reference_answer:, request_options: {})
36+
# @!method initialize(grader:, model_sample:, item: nil, request_options: {})
37+
# Some parameter documentations has been truncated, see
38+
# {OpenAI::Models::FineTuning::Alpha::GraderRunParams} for more details.
39+
#
3440
# @param grader [OpenAI::Models::Graders::StringCheckGrader, OpenAI::Models::Graders::TextSimilarityGrader, OpenAI::Models::Graders::PythonGrader, OpenAI::Models::Graders::ScoreModelGrader, OpenAI::Models::Graders::MultiGrader] The grader used for the fine-tuning job.
3541
#
36-
# @param model_sample [String] The model sample to be evaluated.
42+
# @param model_sample [String] The model sample to be evaluated. This value will be used to populate
3743
#
38-
# @param reference_answer [String, Object, Array<Object>, Float] The reference answer for the evaluation.
44+
# @param item [Object] The dataset item provided to the grader. This will be used to populate
3945
#
4046
# @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}]
4147

@@ -63,25 +69,6 @@ module Grader
6369
# @!method self.variants
6470
# @return [Array(OpenAI::Models::Graders::StringCheckGrader, OpenAI::Models::Graders::TextSimilarityGrader, OpenAI::Models::Graders::PythonGrader, OpenAI::Models::Graders::ScoreModelGrader, OpenAI::Models::Graders::MultiGrader)]
6571
end
66-
67-
# The reference answer for the evaluation.
68-
module ReferenceAnswer
69-
extend OpenAI::Internal::Type::Union
70-
71-
variant String
72-
73-
variant OpenAI::Internal::Type::Unknown
74-
75-
variant -> { OpenAI::Models::FineTuning::Alpha::GraderRunParams::ReferenceAnswer::UnionMember2Array }
76-
77-
variant Float
78-
79-
# @!method self.variants
80-
# @return [Array(String, Object, Array<Object>, Float)]
81-
82-
# @type [OpenAI::Internal::Type::Converter]
83-
UnionMember2Array = OpenAI::Internal::Type::ArrayOf[OpenAI::Internal::Type::Unknown]
84-
end
8572
end
8673
end
8774
end

lib/openai/models/fine_tuning/fine_tuning_job.rb

Lines changed: 3 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -226,7 +226,7 @@ class Hyperparameters < OpenAI::Internal::Type::BaseModel
226226
# Number of examples in each batch. A larger batch size means that model
227227
# parameters are updated less frequently, but with lower variance.
228228
#
229-
# @return [Object, Symbol, :auto, Integer, nil]
229+
# @return [Symbol, :auto, Integer, nil]
230230
optional :batch_size,
231231
union: -> { OpenAI::FineTuning::FineTuningJob::Hyperparameters::BatchSize },
232232
nil?: true
@@ -253,7 +253,7 @@ class Hyperparameters < OpenAI::Internal::Type::BaseModel
253253
# The hyperparameters used for the fine-tuning job. This value will only be
254254
# returned when running `supervised` jobs.
255255
#
256-
# @param batch_size [Object, Symbol, :auto, Integer, nil] Number of examples in each batch. A larger batch size means that model parameter
256+
# @param batch_size [Symbol, :auto, Integer, nil] Number of examples in each batch. A larger batch size means that model parameter
257257
#
258258
# @param learning_rate_multiplier [Symbol, :auto, Float] Scaling factor for the learning rate. A smaller learning rate may be useful to a
259259
#
@@ -266,14 +266,12 @@ class Hyperparameters < OpenAI::Internal::Type::BaseModel
266266
module BatchSize
267267
extend OpenAI::Internal::Type::Union
268268

269-
variant OpenAI::Internal::Type::Unknown
270-
271269
variant const: :auto
272270

273271
variant Integer
274272

275273
# @!method self.variants
276-
# @return [Array(Object, Symbol, :auto, Integer)]
274+
# @return [Array(Symbol, :auto, Integer)]
277275
end
278276

279277
# Scaling factor for the learning rate. A smaller learning rate may be useful to

lib/openai/models/graders/multi_grader.rb

Lines changed: 11 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -11,9 +11,11 @@ class MultiGrader < OpenAI::Internal::Type::BaseModel
1111
required :calculate_output, String
1212

1313
# @!attribute graders
14+
# A StringCheckGrader object that performs a string comparison between input and
15+
# reference using a specified operation.
1416
#
15-
# @return [Hash{Symbol=>OpenAI::Models::Graders::StringCheckGrader, OpenAI::Models::Graders::TextSimilarityGrader, OpenAI::Models::Graders::PythonGrader, OpenAI::Models::Graders::ScoreModelGrader, OpenAI::Models::Graders::LabelModelGrader}]
16-
required :graders, -> { OpenAI::Internal::Type::HashOf[union: OpenAI::Graders::MultiGrader::Grader] }
17+
# @return [OpenAI::Models::Graders::StringCheckGrader, OpenAI::Models::Graders::TextSimilarityGrader, OpenAI::Models::Graders::PythonGrader, OpenAI::Models::Graders::ScoreModelGrader, OpenAI::Models::Graders::LabelModelGrader]
18+
required :graders, union: -> { OpenAI::Graders::MultiGrader::Graders }
1719

1820
# @!attribute name
1921
# The name of the grader.
@@ -28,20 +30,25 @@ class MultiGrader < OpenAI::Internal::Type::BaseModel
2830
required :type, const: :multi
2931

3032
# @!method initialize(calculate_output:, graders:, name:, type: :multi)
33+
# Some parameter documentations has been truncated, see
34+
# {OpenAI::Models::Graders::MultiGrader} for more details.
35+
#
3136
# A MultiGrader object combines the output of multiple graders to produce a single
3237
# score.
3338
#
3439
# @param calculate_output [String] A formula to calculate the output based on grader results.
3540
#
36-
# @param graders [Hash{Symbol=>OpenAI::Models::Graders::StringCheckGrader, OpenAI::Models::Graders::TextSimilarityGrader, OpenAI::Models::Graders::PythonGrader, OpenAI::Models::Graders::ScoreModelGrader, OpenAI::Models::Graders::LabelModelGrader}]
41+
# @param graders [OpenAI::Models::Graders::StringCheckGrader, OpenAI::Models::Graders::TextSimilarityGrader, OpenAI::Models::Graders::PythonGrader, OpenAI::Models::Graders::ScoreModelGrader, OpenAI::Models::Graders::LabelModelGrader] A StringCheckGrader object that performs a string comparison between input and r
3742
#
3843
# @param name [String] The name of the grader.
3944
#
4045
# @param type [Symbol, :multi] The object type, which is always `multi`.
4146

4247
# A StringCheckGrader object that performs a string comparison between input and
4348
# reference using a specified operation.
44-
module Grader
49+
#
50+
# @see OpenAI::Models::Graders::MultiGrader#graders
51+
module Graders
4552
extend OpenAI::Internal::Type::Union
4653

4754
# A StringCheckGrader object that performs a string comparison between input and reference using a specified operation.

lib/openai/models/image_edit_params.rb

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -11,7 +11,7 @@ class ImageEditParams < OpenAI::Internal::Type::BaseModel
1111
# The image(s) to edit. Must be a supported image file or an array of images.
1212
#
1313
# For `gpt-image-1`, each image should be a `png`, `webp`, or `jpg` file less than
14-
# 25MB. You can provide up to 16 images.
14+
# 50MB. You can provide up to 16 images.
1515
#
1616
# For `dall-e-2`, you can only provide one image, and it should be a square `png`
1717
# file less than 4MB.
@@ -123,7 +123,7 @@ class ImageEditParams < OpenAI::Internal::Type::BaseModel
123123
# The image(s) to edit. Must be a supported image file or an array of images.
124124
#
125125
# For `gpt-image-1`, each image should be a `png`, `webp`, or `jpg` file less than
126-
# 25MB. You can provide up to 16 images.
126+
# 50MB. You can provide up to 16 images.
127127
#
128128
# For `dall-e-2`, you can only provide one image, and it should be a square `png`
129129
# file less than 4MB.

lib/openai/models/responses/response.rb

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -173,9 +173,9 @@ class Response < OpenAI::Internal::Type::BaseModel
173173
# utilize scale tier credits until they are exhausted.
174174
# - If set to 'auto', and the Project is not Scale tier enabled, the request will
175175
# be processed using the default service tier with a lower uptime SLA and no
176-
# latency guarentee.
176+
# latency guarantee.
177177
# - If set to 'default', the request will be processed using the default service
178-
# tier with a lower uptime SLA and no latency guarentee.
178+
# tier with a lower uptime SLA and no latency guarantee.
179179
# - If set to 'flex', the request will be processed with the Flex Processing
180180
# service tier.
181181
# [Learn more](https://platform.openai.com/docs/guides/flex-processing).
@@ -346,9 +346,9 @@ module ToolChoice
346346
# utilize scale tier credits until they are exhausted.
347347
# - If set to 'auto', and the Project is not Scale tier enabled, the request will
348348
# be processed using the default service tier with a lower uptime SLA and no
349-
# latency guarentee.
349+
# latency guarantee.
350350
# - If set to 'default', the request will be processed using the default service
351-
# tier with a lower uptime SLA and no latency guarentee.
351+
# tier with a lower uptime SLA and no latency guarantee.
352352
# - If set to 'flex', the request will be processed with the Flex Processing
353353
# service tier.
354354
# [Learn more](https://platform.openai.com/docs/guides/flex-processing).

lib/openai/models/responses/response_code_interpreter_call_code_delta_event.rb

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -23,12 +23,12 @@ class ResponseCodeInterpreterCallCodeDeltaEvent < OpenAI::Internal::Type::BaseMo
2323
required :sequence_number, Integer
2424

2525
# @!attribute type
26-
# The type of the event. Always `response.code_interpreter_call.code.delta`.
26+
# The type of the event. Always `response.code_interpreter_call_code.delta`.
2727
#
28-
# @return [Symbol, :"response.code_interpreter_call.code.delta"]
29-
required :type, const: :"response.code_interpreter_call.code.delta"
28+
# @return [Symbol, :"response.code_interpreter_call_code.delta"]
29+
required :type, const: :"response.code_interpreter_call_code.delta"
3030

31-
# @!method initialize(delta:, output_index:, sequence_number:, type: :"response.code_interpreter_call.code.delta")
31+
# @!method initialize(delta:, output_index:, sequence_number:, type: :"response.code_interpreter_call_code.delta")
3232
# Some parameter documentations has been truncated, see
3333
# {OpenAI::Models::Responses::ResponseCodeInterpreterCallCodeDeltaEvent} for more
3434
# details.
@@ -41,7 +41,7 @@ class ResponseCodeInterpreterCallCodeDeltaEvent < OpenAI::Internal::Type::BaseMo
4141
#
4242
# @param sequence_number [Integer] The sequence number of this event.
4343
#
44-
# @param type [Symbol, :"response.code_interpreter_call.code.delta"] The type of the event. Always `response.code_interpreter_call.code.delta`.
44+
# @param type [Symbol, :"response.code_interpreter_call_code.delta"] The type of the event. Always `response.code_interpreter_call_code.delta`.
4545
end
4646
end
4747
end

0 commit comments

Comments
 (0)