From 45045e9090e6d847a586e4ae9d4fda8f8ae691e2 Mon Sep 17 00:00:00 2001 From: Gerardo Lecaros <10088504+glecaros@users.noreply.github.com> Date: Tue, 4 Feb 2025 17:23:59 -0800 Subject: [PATCH] [azopenai] Updates for 2025-01-01-preview. (#24053) * [azopenai] Updates for 2025-01-01-preview. * Fixes + updated recordings. * Update sdk/ai/azopenai/CHANGELOG.md Co-authored-by: Richard Park <51494936+richardpark-msft@users.noreply.github.com> * prep release * rolling back devcontainer change --------- Co-authored-by: Richard Park <51494936+richardpark-msft@users.noreply.github.com> --- sdk/ai/azopenai/CHANGELOG.md | 35 ++- sdk/ai/azopenai/assets.json | 2 +- sdk/ai/azopenai/autorest.md | 34 +++ sdk/ai/azopenai/client.go | 54 ++-- sdk/ai/azopenai/constants.go | 90 +++++++ sdk/ai/azopenai/custom_client.go | 16 +- sdk/ai/azopenai/interfaces.go | 7 +- sdk/ai/azopenai/models.go | 215 +++++++++++++++- sdk/ai/azopenai/models_extra.go | 46 ++++ sdk/ai/azopenai/models_serde.go | 285 +++++++++++++++++++++ sdk/ai/azopenai/polymorphic_helpers.go | 2 + sdk/ai/azopenai/testdata/tsp-location.yaml | 2 +- 12 files changed, 745 insertions(+), 43 deletions(-) diff --git a/sdk/ai/azopenai/CHANGELOG.md b/sdk/ai/azopenai/CHANGELOG.md index 9a568c091f5f..38f3adc31ad9 100644 --- a/sdk/ai/azopenai/CHANGELOG.md +++ b/sdk/ai/azopenai/CHANGELOG.md @@ -1,14 +1,37 @@ # Release History -## 0.7.2 (Unreleased) +## 0.7.2 (2025-02-05) ### Features Added -### Breaking Changes - -### Bugs Fixed - -### Other Changes +- Updating to support Azure OpenAI API version `2025-01-01-preview`. +- Updated `ChatCompletionsOptions` and `ChatCompletionsStreamOptions`: + - Added `Audio` parameter. + - Added `Metadata` parameter. + - Added `Modalities` parameter. + - Added `Prediction` parameter. + - Added `ReasoningEffort` parameter. + - Added `Store` parameter. + - Added `UserSecurityContext` parameter. +- Added `Audio` field to `ChatResponseMessage` +- Added `AudioOutputParameters` type. +- Added `AudioResponseData` type. +- Updated `CompletionsUsageCompletionTokensDetails`: + - Added `AcceptedPredictionTokens` field. + - Added `AudioTokens` field. + - Added `RejectedPredictionTokens` field. +- Updated `CompletionsUsagePromptTokensDetails`: + - Added `AudioTokens` field. +- Added `InputAudioContent` type. +- Added `ChatRequestDeveloperMessage` type. +- Added `PredictionContent` type. +- Added `UserSecurityContext` type. +- Added `ChatMessageAudioContentItem` type. +- Added `ChatCompletionModality` enum. +- Added `ChatRoleDeveloper` to the `ChatRole` enum. +- Added `InputAudioFormat` enum. +- Added `OutputAudioFormat` enum. +- Added `ReasoningEffortValue` enum. ## 0.7.1 (2024-11-13) diff --git a/sdk/ai/azopenai/assets.json b/sdk/ai/azopenai/assets.json index 7fcd69511314..da9480a5e6e4 100644 --- a/sdk/ai/azopenai/assets.json +++ b/sdk/ai/azopenai/assets.json @@ -2,5 +2,5 @@ "AssetsRepo": "Azure/azure-sdk-assets", "AssetsRepoPrefixPath": "go", "TagPrefix": "go/ai/azopenai", - "Tag": "go/ai/azopenai_ce36c649fa" + "Tag": "go/ai/azopenai_0efed71611" } diff --git a/sdk/ai/azopenai/autorest.md b/sdk/ai/azopenai/autorest.md index ea643ebc89d1..6764ea659f9e 100644 --- a/sdk/ai/azopenai/autorest.md +++ b/sdk/ai/azopenai/autorest.md @@ -753,6 +753,23 @@ directive: transform: $["$ref"] = "#/definitions/ChatRequestSystemMessageContent"; return $; ``` +Update ChatRequestDeveloperMessage.content to use its custom type. + +```yaml +directive: + - from: swagger-document + where: $.definitions + transform: | + $["ChatRequestDeveloperMessageContent"] = { + "x-ms-external": true, + "type": "object", "properties": { "stub": { "type": "string" }} + }; + return $; + - from: swagger-document + where: $.definitions.ChatRequestDeveloperMessage.properties.content + transform: $["$ref"] = "#/definitions/ChatRequestDeveloperMessageContent"; return $; +``` + Update ChatRequestToolMessage.content to use its custom type. ```yaml @@ -787,6 +804,23 @@ directive: transform: $["$ref"] = "#/definitions/MongoDBChatExtensionParametersEmbeddingDependency"; return $; ``` +Update PredictionContent.content to use its custom type. + +```yaml +directive: + - from: swagger-document + where: $.definitions + transform: | + $["PredictionContentContent"] = { + "x-ms-external": true, + "type": "object", "properties": { "stub": { "type": "string" }} + }; + return $; + - from: swagger-document + where: $.definitions.PredictionContent.properties.content + transform: $["$ref"] = "#/definitions/PredictionContentContent"; return $; +``` + \*ChatCompletionsToolChoice ```yaml diff --git a/sdk/ai/azopenai/client.go b/sdk/ai/azopenai/client.go index 29855e23bfba..09abb738e7b6 100644 --- a/sdk/ai/azopenai/client.go +++ b/sdk/ai/azopenai/client.go @@ -35,7 +35,7 @@ type Client struct { // Upload. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2024-10-01-preview +// Generated from API version 2025-01-01-preview // - uploadID - The ID of the upload associated with this operation. // - data - The chunk of bytes for this Part. // - options - AddUploadPartOptions contains the optional parameters for the Client.AddUploadPart method. @@ -89,7 +89,7 @@ func (client *Client) addUploadPartHandleResponse(resp *http.Response) (AddUploa // CancelBatch - Gets details for a single batch specified by the given batchID. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2024-10-01-preview +// Generated from API version 2025-01-01-preview // - batchID - The identifier of the batch. // - options - CancelBatchOptions contains the optional parameters for the Client.CancelBatch method. func (client *Client) CancelBatch(ctx context.Context, batchID string, options *CancelBatchOptions) (CancelBatchResponse, error) { @@ -137,7 +137,7 @@ func (client *Client) cancelBatchHandleResponse(resp *http.Response) (CancelBatc // CancelUpload - Cancels the Upload. No Parts may be added after an Upload is cancelled. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2024-10-01-preview +// Generated from API version 2025-01-01-preview // - uploadID - The ID of the upload associated with this operation. // - options - CancelUploadOptions contains the optional parameters for the Client.CancelUpload method. func (client *Client) CancelUpload(ctx context.Context, uploadID string, options *CancelUploadOptions) (CancelUploadResponse, error) { @@ -189,7 +189,7 @@ func (client *Client) cancelUploadHandleResponse(resp *http.Response) (CancelUpl // object. No Parts may be added after an Upload is completed. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2024-10-01-preview +// Generated from API version 2025-01-01-preview // - uploadID - The ID of the upload associated with this operation. // - requestBody - The request body for the completion operation. // - options - CompleteUploadOptions contains the optional parameters for the Client.CompleteUpload method. @@ -242,7 +242,7 @@ func (client *Client) completeUploadHandleResponse(resp *http.Response) (Complet // job including job status. The ID of the result file is added to the response once complete. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2024-10-01-preview +// Generated from API version 2025-01-01-preview // - createBatchRequest - The specification of the batch to create and execute. // - options - CreateBatchOptions contains the optional parameters for the Client.CreateBatch method. func (client *Client) CreateBatch(ctx context.Context, createBatchRequest BatchCreateRequest, options *CreateBatchOptions) (CreateBatchResponse, error) { @@ -295,7 +295,7 @@ func (client *Client) createBatchHandleResponse(resp *http.Response) (CreateBatc // For guidance on the proper filename extensions for each purpose, please follow the documentation on creating a File. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2024-10-01-preview +// Generated from API version 2025-01-01-preview // - requestBody - The request body for the operation options. // - options - CreateUploadOptions contains the optional parameters for the Client.CreateUpload method. func (client *Client) CreateUpload(ctx context.Context, requestBody CreateUploadRequest, options *CreateUploadOptions) (CreateUploadResponse, error) { @@ -342,7 +342,7 @@ func (client *Client) createUploadHandleResponse(resp *http.Response) (CreateUpl // DeleteFile - Delete a previously uploaded file. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2024-10-01-preview +// Generated from API version 2025-01-01-preview // - fileID - The ID of the file to delete. // - options - DeleteFileOptions contains the optional parameters for the Client.DeleteFile method. func (client *Client) DeleteFile(ctx context.Context, fileID string, options *DeleteFileOptions) (DeleteFileResponse, error) { @@ -390,7 +390,7 @@ func (client *Client) deleteFileHandleResponse(resp *http.Response) (DeleteFileR // GenerateSpeechFromText - Generates text-to-speech audio from the input text. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2024-10-01-preview +// Generated from API version 2025-01-01-preview // - body - A representation of the request options that control the behavior of a text-to-speech operation. // - options - GenerateSpeechFromTextOptions contains the optional parameters for the Client.GenerateSpeechFromText method. func (client *Client) GenerateSpeechFromText(ctx context.Context, body SpeechGenerationOptions, options *GenerateSpeechFromTextOptions) (GenerateSpeechFromTextResponse, error) { @@ -418,7 +418,7 @@ func (client *Client) generateSpeechFromTextCreateRequest(ctx context.Context, b return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2024-10-01-preview") + reqQP.Set("api-version", "2025-01-01-preview") req.Raw().URL.RawQuery = reqQP.Encode() runtime.SkipBodyDownload(req) req.Raw().Header["Accept"] = []string{"application/octet-stream, application/json"} @@ -432,7 +432,7 @@ func (client *Client) generateSpeechFromTextCreateRequest(ctx context.Context, b // be transcribed in the written language corresponding to the language it was spoken in. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2024-10-01-preview +// Generated from API version 2025-01-01-preview // - deploymentID - Specifies either the model deployment name (when using Azure OpenAI) or model name (when using non-Azure // OpenAI) to use for this request. // - file - The audio data to transcribe. This must be the binary content of a file in one of the supported media formats: flac, @@ -465,7 +465,7 @@ func (client *Client) getAudioTranscriptionInternalCreateRequest(ctx context.Con return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2024-10-01-preview") + reqQP.Set("api-version", "2025-01-01-preview") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} if err := setMultipartFormData(req, file, *body); err != nil { @@ -487,7 +487,7 @@ func (client *Client) getAudioTranscriptionInternalHandleResponse(resp *http.Res // data. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2024-10-01-preview +// Generated from API version 2025-01-01-preview // - deploymentID - Specifies either the model deployment name (when using Azure OpenAI) or model name (when using non-Azure // OpenAI) to use for this request. // - file - The audio data to translate. This must be the binary content of a file in one of the supported media formats: flac, @@ -520,7 +520,7 @@ func (client *Client) getAudioTranslationInternalCreateRequest(ctx context.Conte return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2024-10-01-preview") + reqQP.Set("api-version", "2025-01-01-preview") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} if err := setMultipartFormData(req, file, *body); err != nil { @@ -541,7 +541,7 @@ func (client *Client) getAudioTranslationInternalHandleResponse(resp *http.Respo // GetBatch - Gets details for a single batch specified by the given batchID. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2024-10-01-preview +// Generated from API version 2025-01-01-preview // - batchID - The identifier of the batch. // - options - GetBatchOptions contains the optional parameters for the Client.GetBatch method. func (client *Client) GetBatch(ctx context.Context, batchID string, options *GetBatchOptions) (GetBatchResponse, error) { @@ -590,7 +590,7 @@ func (client *Client) getBatchHandleResponse(resp *http.Response) (GetBatchRespo // and generate text that continues from or "completes" provided prompt data. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2024-10-01-preview +// Generated from API version 2025-01-01-preview // - body - The configuration information for a chat completions request. Completions support a wide variety of tasks and generate // text that continues from or "completes" provided prompt data. // - options - GetChatCompletionsOptions contains the optional parameters for the Client.getChatCompletions method. @@ -620,7 +620,7 @@ func (client *Client) getChatCompletionsCreateRequest(ctx context.Context, body return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2024-10-01-preview") + reqQP.Set("api-version", "2025-01-01-preview") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} if err := runtime.MarshalAsJSON(req, body); err != nil { @@ -642,7 +642,7 @@ func (client *Client) getChatCompletionsHandleResponse(resp *http.Response) (Get // text that continues from or "completes" provided prompt data. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2024-10-01-preview +// Generated from API version 2025-01-01-preview // - body - The configuration information for a completions request. Completions support a wide variety of tasks and generate // text that continues from or "completes" provided prompt data. // - options - GetCompletionsOptions contains the optional parameters for the Client.getCompletions method. @@ -672,7 +672,7 @@ func (client *Client) getCompletionsCreateRequest(ctx context.Context, body comp return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2024-10-01-preview") + reqQP.Set("api-version", "2025-01-01-preview") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} if err := runtime.MarshalAsJSON(req, body); err != nil { @@ -693,7 +693,7 @@ func (client *Client) getCompletionsHandleResponse(resp *http.Response) (GetComp // GetEmbeddings - Return the embeddings for a given prompt. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2024-10-01-preview +// Generated from API version 2025-01-01-preview // - body - The configuration information for an embeddings request. Embeddings measure the relatedness of text strings and // are commonly used for search, clustering, recommendations, and other similar scenarios. // - options - GetEmbeddingsOptions contains the optional parameters for the Client.GetEmbeddings method. @@ -723,7 +723,7 @@ func (client *Client) getEmbeddingsCreateRequest(ctx context.Context, body Embed return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2024-10-01-preview") + reqQP.Set("api-version", "2025-01-01-preview") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} if err := runtime.MarshalAsJSON(req, body); err != nil { @@ -744,7 +744,7 @@ func (client *Client) getEmbeddingsHandleResponse(resp *http.Response) (GetEmbed // GetFile - Returns information about a specific file. Does not retrieve file content. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2024-10-01-preview +// Generated from API version 2025-01-01-preview // - fileID - The ID of the file to retrieve. // - options - GetFileOptions contains the optional parameters for the Client.GetFile method. func (client *Client) GetFile(ctx context.Context, fileID string, options *GetFileOptions) (GetFileResponse, error) { @@ -792,7 +792,7 @@ func (client *Client) getFileHandleResponse(resp *http.Response) (GetFileRespons // GetFileContent - Returns information about a specific file. Does not retrieve file content. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2024-10-01-preview +// Generated from API version 2025-01-01-preview // - fileID - The ID of the file to retrieve. // - options - GetFileContentOptions contains the optional parameters for the Client.GetFileContent method. func (client *Client) GetFileContent(ctx context.Context, fileID string, options *GetFileContentOptions) (GetFileContentResponse, error) { @@ -840,7 +840,7 @@ func (client *Client) getFileContentHandleResponse(resp *http.Response) (GetFile // GetImageGenerations - Creates an image given a prompt. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2024-10-01-preview +// Generated from API version 2025-01-01-preview // - body - Represents the request data used to generate images. // - options - GetImageGenerationsOptions contains the optional parameters for the Client.GetImageGenerations method. func (client *Client) GetImageGenerations(ctx context.Context, body ImageGenerationOptions, options *GetImageGenerationsOptions) (GetImageGenerationsResponse, error) { @@ -869,7 +869,7 @@ func (client *Client) getImageGenerationsCreateRequest(ctx context.Context, body return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2024-10-01-preview") + reqQP.Set("api-version", "2025-01-01-preview") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} if err := runtime.MarshalAsJSON(req, body); err != nil { @@ -890,7 +890,7 @@ func (client *Client) getImageGenerationsHandleResponse(resp *http.Response) (Ge // listBatches - Gets a list of all batches owned by the Azure OpenAI resource. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2024-10-01-preview +// Generated from API version 2025-01-01-preview // - options - ListBatchesOptions contains the optional parameters for the Client.listBatches method. func (client *Client) listBatches(ctx context.Context, options *ListBatchesOptions) (ListBatchesResponse, error) { var err error @@ -941,7 +941,7 @@ func (client *Client) listBatchesHandleResponse(resp *http.Response) (ListBatche // ListFiles - Gets a list of previously uploaded files. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2024-10-01-preview +// Generated from API version 2025-01-01-preview // - options - ListFilesOptions contains the optional parameters for the Client.ListFiles method. func (client *Client) ListFiles(ctx context.Context, options *ListFilesOptions) (ListFilesResponse, error) { var err error @@ -989,7 +989,7 @@ func (client *Client) listFilesHandleResponse(resp *http.Response) (ListFilesRes // UploadFile - Uploads a file for use by other operations. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2024-10-01-preview +// Generated from API version 2025-01-01-preview // - file - The file data (not filename) to upload. // - purpose - The intended purpose of the file. // - options - UploadFileOptions contains the optional parameters for the Client.UploadFile method. diff --git a/sdk/ai/azopenai/constants.go b/sdk/ai/azopenai/constants.go index 4cb181f364ff..245bde3fae18 100644 --- a/sdk/ai/azopenai/constants.go +++ b/sdk/ai/azopenai/constants.go @@ -222,6 +222,24 @@ func PossibleBatchStatusValues() []BatchStatus { } } +// ChatCompletionModality - Values to specified the required modality for the model to use. +type ChatCompletionModality string + +const ( + // ChatCompletionModalityAudio - The model is to generate audio output. + ChatCompletionModalityAudio ChatCompletionModality = "audio" + // ChatCompletionModalityText - The model is to generate text output. + ChatCompletionModalityText ChatCompletionModality = "text" +) + +// PossibleChatCompletionModalityValues returns the possible values for the ChatCompletionModality const type. +func PossibleChatCompletionModalityValues() []ChatCompletionModality { + return []ChatCompletionModality{ + ChatCompletionModalityAudio, + ChatCompletionModalityText, + } +} + // ChatCompletionRequestMessageContentPartImageURLDetail - Specifies the detail level of the image. Learn more in the Vision // guide [/docs/guides/vision/low-or-high-fidelity-image-understanding]. type ChatCompletionRequestMessageContentPartImageURLDetail string @@ -317,6 +335,8 @@ type ChatRole string const ( // ChatRoleAssistant - The role that provides responses to system-instructed, user-prompted input. ChatRoleAssistant ChatRole = "assistant" + // ChatRoleDeveloper - The role that provides instructions that the model should follow + ChatRoleDeveloper ChatRole = "developer" // ChatRoleFunction - The role that provides function results for chat completions. ChatRoleFunction ChatRole = "function" // ChatRoleSystem - The role that instructs or sets the behavior of the assistant. @@ -331,6 +351,7 @@ const ( func PossibleChatRoleValues() []ChatRole { return []ChatRole{ ChatRoleAssistant, + ChatRoleDeveloper, ChatRoleFunction, ChatRoleSystem, ChatRoleTool, @@ -647,6 +668,24 @@ func PossibleImageSizeValues() []ImageSize { } } +// InputAudioFormat - Values to describe the format of the input audio data. +type InputAudioFormat string + +const ( + // InputAudioFormatMp3 - Specifies that the audio data is in the MP3 format. + InputAudioFormatMp3 InputAudioFormat = "mp3" + // InputAudioFormatWav - Specifies that the audio data is in the WAV format. + InputAudioFormatWav InputAudioFormat = "wav" +) + +// PossibleInputAudioFormatValues returns the possible values for the InputAudioFormat const type. +func PossibleInputAudioFormatValues() []InputAudioFormat { + return []InputAudioFormat{ + InputAudioFormatMp3, + InputAudioFormatWav, + } +} + // OnYourDataAuthenticationType - The authentication types supported with Azure OpenAI On Your Data. type OnYourDataAuthenticationType string @@ -752,6 +791,57 @@ func PossibleOnYourDataVectorizationSourceTypeValues() []OnYourDataVectorization } } +// OutputAudioFormat - The output audio format. +type OutputAudioFormat string + +const ( + // OutputAudioFormatFlac - The output audio format is FLAC. + OutputAudioFormatFlac OutputAudioFormat = "flac" + // OutputAudioFormatMp3 - The output audio format is MP3. + OutputAudioFormatMp3 OutputAudioFormat = "mp3" + // OutputAudioFormatOpus - The output audio format is OPUS. + OutputAudioFormatOpus OutputAudioFormat = "opus" + // OutputAudioFormatPcm16 - The output audio format is PCM16. + OutputAudioFormatPcm16 OutputAudioFormat = "pcm16" + // OutputAudioFormatWav - The output audio format is WAV. + OutputAudioFormatWav OutputAudioFormat = "wav" +) + +// PossibleOutputAudioFormatValues returns the possible values for the OutputAudioFormat const type. +func PossibleOutputAudioFormatValues() []OutputAudioFormat { + return []OutputAudioFormat{ + OutputAudioFormatFlac, + OutputAudioFormatMp3, + OutputAudioFormatOpus, + OutputAudioFormatPcm16, + OutputAudioFormatWav, + } +} + +// ReasoningEffortValue - This option is only valid for o1 models, +// Constrains effort on reasoning for reasoning models (see https://platform.openai.com/docs/guides/reasoning). +// Currently supported values are low, medium, and high. Reducing reasoning effort can result in faster responses and fewer +// tokens used on reasoning in a response. +type ReasoningEffortValue string + +const ( + // ReasoningEffortValueHigh - The reasoning effort is high. + ReasoningEffortValueHigh ReasoningEffortValue = "high" + // ReasoningEffortValueLow - The reasoning effort is low. + ReasoningEffortValueLow ReasoningEffortValue = "low" + // ReasoningEffortValueMedium - The reasoning effort is medium. + ReasoningEffortValueMedium ReasoningEffortValue = "medium" +) + +// PossibleReasoningEffortValueValues returns the possible values for the ReasoningEffortValue const type. +func PossibleReasoningEffortValueValues() []ReasoningEffortValue { + return []ReasoningEffortValue{ + ReasoningEffortValueHigh, + ReasoningEffortValueLow, + ReasoningEffortValueMedium, + } +} + // SpeechGenerationResponseFormat - The audio output format for the spoken text. By default, the MP3 format will be used. type SpeechGenerationResponseFormat string diff --git a/sdk/ai/azopenai/custom_client.go b/sdk/ai/azopenai/custom_client.go index 3e5183c3869d..7f3a9649da15 100644 --- a/sdk/ai/azopenai/custom_client.go +++ b/sdk/ai/azopenai/custom_client.go @@ -34,7 +34,7 @@ type ClientOptions struct { azcore.ClientOptions } -const apiVersion = "2024-10-01-preview" +const apiVersion = "2025-01-01-preview" // NewClient creates a new instance of Client that connects to an Azure OpenAI endpoint. // - endpoint - Azure OpenAI service endpoint, for example: https://{your-resource-name}.openai.azure.com @@ -195,6 +195,13 @@ func (options *ChatCompletionsOptions) toWireType() chatCompletionsOptions { return chatCompletionsOptions{ Stream: to.Ptr(false), StreamOptions: nil, + Audio: options.Audio, + Metadata: options.Metadata, + Modalities: options.Modalities, + Prediction: options.Prediction, + ReasoningEffort: options.ReasoningEffort, + Store: options.Store, + UserSecurityContext: options.UserSecurityContext, Messages: options.Messages, AzureExtensionsOptions: options.AzureExtensionsOptions, Enhancements: options.Enhancements, @@ -225,6 +232,13 @@ func (options *ChatCompletionsStreamOptions) toWireType() chatCompletionsOptions return chatCompletionsOptions{ Stream: to.Ptr(true), StreamOptions: options.StreamOptions, + Audio: options.Audio, + Metadata: options.Metadata, + Modalities: options.Modalities, + Prediction: options.Prediction, + ReasoningEffort: options.ReasoningEffort, + Store: options.Store, + UserSecurityContext: options.UserSecurityContext, Messages: options.Messages, AzureExtensionsOptions: options.AzureExtensionsOptions, Enhancements: options.Enhancements, diff --git a/sdk/ai/azopenai/interfaces.go b/sdk/ai/azopenai/interfaces.go index a97fcbe6ebc2..96cb16f73f17 100644 --- a/sdk/ai/azopenai/interfaces.go +++ b/sdk/ai/azopenai/interfaces.go @@ -75,7 +75,8 @@ type ChatFinishDetailsClassification interface { // ChatMessageContentItemClassification provides polymorphic access to related types. // Call the interface's GetChatMessageContentItem() method to access the common type. // Use a type switch to determine the concrete type. The possible types are: -// - *ChatMessageContentItem, *ChatMessageImageContentItem, *ChatMessageRefusalContentItem, *ChatMessageTextContentItem +// - *ChatMessageAudioContentItem, *ChatMessageContentItem, *ChatMessageImageContentItem, *ChatMessageRefusalContentItem, +// - *ChatMessageTextContentItem type ChatMessageContentItemClassification interface { // GetChatMessageContentItem returns the ChatMessageContentItem content of the underlying type. GetChatMessageContentItem() *ChatMessageContentItem @@ -84,8 +85,8 @@ type ChatMessageContentItemClassification interface { // ChatRequestMessageClassification provides polymorphic access to related types. // Call the interface's GetChatRequestMessage() method to access the common type. // Use a type switch to determine the concrete type. The possible types are: -// - *ChatRequestAssistantMessage, *ChatRequestFunctionMessage, *ChatRequestMessage, *ChatRequestSystemMessage, *ChatRequestToolMessage, -// - *ChatRequestUserMessage +// - *ChatRequestAssistantMessage, *ChatRequestDeveloperMessage, *ChatRequestFunctionMessage, *ChatRequestMessage, *ChatRequestSystemMessage, +// - *ChatRequestToolMessage, *ChatRequestUserMessage type ChatRequestMessageClassification interface { // GetChatRequestMessage returns the ChatRequestMessage content of the underlying type. GetChatRequestMessage() *ChatRequestMessage diff --git a/sdk/ai/azopenai/models.go b/sdk/ai/azopenai/models.go index f9b6eca17411..90cdee3bb341 100644 --- a/sdk/ai/azopenai/models.go +++ b/sdk/ai/azopenai/models.go @@ -18,6 +18,31 @@ type AddUploadPartRequest struct { Data []byte } +// AudioOutputParameters - Describes the parameters for audio output. +type AudioOutputParameters struct { + // REQUIRED; Specifies the output audio format. + Format *OutputAudioFormat + + // REQUIRED; Specifies the voice type. + Voice *SpeechVoice +} + +// AudioResponseData - Object containing audio response data and its metadata. +type AudioResponseData struct { + // REQUIRED; Base64 encoded audio bytes generated by the model, in the format specified in the request. + Data *string + + // REQUIRED; The Unix timestamp (in seconds) for when this audio response will no longer be accessible on the server for use + // in multi-turn conversations. + ExpiresAt *time.Time + + // REQUIRED; Unique identifier for this audio response. + ID *string + + // REQUIRED; Transcript of the audio generated by the model. + Transcript *string +} + // AudioTranscription - Result information for an operation that transcribed spoken audio into written text. type AudioTranscription struct { // REQUIRED; The transcribed text for the provided audio data. @@ -1051,6 +1076,9 @@ type chatCompletionsOptions struct { // assistant, followed by alternating messages between the User and Assistant roles. Messages []ChatRequestMessageClassification + // Parameters for audio output. Required when audio output is requested with modalities: ["audio"] + Audio *AudioOutputParameters + // The configuration entries for Azure OpenAI chat extensions that use them. This additional specification is only compatible // with Azure OpenAI. AzureExtensionsOptions []AzureChatExtensionConfigurationClassification @@ -1094,6 +1122,14 @@ type chatCompletionsOptions struct { // This value is now deprecated in favor of max_completion_tokens, and is not compatible with o1 series models. MaxTokens *int32 + // Developer-defined tags and values used for filtering completions in the stored completions dashboard. + Metadata map[string]*string + + // Output types that you would like the model to generate for this request. Most models are capable of generating text, which + // is the default: ["text"]The gpt-4o-audio-preview model can also be used to + // generate audio. To request that this model generate both text and audio responses, you can use: ["text", "audio"] + Modalities []ChatCompletionModality + // The model name to provide as part of this completions request. Not applicable to Azure OpenAI, where deployment information // should be included in the Azure resource URI that's connected to. DeploymentName *string @@ -1106,11 +1142,22 @@ type chatCompletionsOptions struct { // Whether to enable parallel function calling during tool use. ParallelToolCalls *bool + // Configuration for a Predicted Output, which can greatly improve response times when large parts of the model response are + // known ahead of time. This is most common when you are regenerating a file with + // only minor changes to most of the content. + Prediction *PredictionContent + // A value that influences the probability of generated tokens appearing based on their existing presence in generated text. // Positive values will make tokens less likely to appear when they already exist // and increase the model's likelihood to output new topics. PresencePenalty *float32 + // This option is only valid for o1 models, + // Constrains effort on reasoning for reasoning models (see https://platform.openai.com/docs/guides/reasoning). + // Currently supported values are low, medium, and high. Reducing reasoning effort can result in faster responses and fewer + // tokens used on reasoning in a response. + ReasoningEffort *ReasoningEffortValue + // An object specifying the format that the model must output. Used to enable JSON mode. ResponseFormat ChatCompletionsResponseFormatClassification @@ -1122,6 +1169,9 @@ type chatCompletionsOptions struct { // A collection of textual sequences that will end completions generation. Stop []string + // Whether or not to store the output of this chat completion request for use in our model distillation or evaluation products. + Store *bool + // A value indicating whether chat completions should be streamed for this request. Stream *bool @@ -1154,6 +1204,10 @@ type chatCompletionsOptions struct { // An identifier for the caller or end user of the operation. This may be used for tracking or rate-limiting purposes. User *string + + // The security context identifies and authenticates users and applications in your multi-tenant AI system, helping security + // teams investigate and mitigate incidents. + UserSecurityContext *UserSecurityContext } // ChatCompletionsResponseFormat - An abstract representation of a response format configuration usable by Chat Completions. @@ -1216,6 +1270,22 @@ type ChatFinishDetails struct { // GetChatFinishDetails implements the ChatFinishDetailsClassification interface for type ChatFinishDetails. func (c *ChatFinishDetails) GetChatFinishDetails() *ChatFinishDetails { return c } +// ChatMessageAudioContentItem - A structured chat content item containing audio data. +type ChatMessageAudioContentItem struct { + // REQUIRED; The audio data. + InputAudio *InputAudioContent + + // REQUIRED; The discriminated object type. + Type *string +} + +// GetChatMessageContentItem implements the ChatMessageContentItemClassification interface for type ChatMessageAudioContentItem. +func (c *ChatMessageAudioContentItem) GetChatMessageContentItem() *ChatMessageContentItem { + return &ChatMessageContentItem{ + Type: c.Type, + } +} + // ChatMessageContentItem - An abstract representation of a structured content item within a chat message. type ChatMessageContentItem struct { // REQUIRED; The discriminated object type. @@ -1313,6 +1383,27 @@ func (c *ChatRequestAssistantMessage) GetChatRequestMessage() *ChatRequestMessag } } +// ChatRequestDeveloperMessage - Developer-provided instructions that the model should follow, regardless of messages sent +// by the user. With o1 models and newer, developer messages replace the previous system messages." +type ChatRequestDeveloperMessage struct { + // REQUIRED; An array of content parts with a defined type. For developer messages, only type text is supported. + Content *ChatRequestDeveloperMessageContent + + // REQUIRED; The chat role associated with this message. + role *ChatRole + + // An optional name for the participant. Provides the model information to differentiate between participants of the same + // role. + Name *string +} + +// GetChatRequestMessage implements the ChatRequestMessageClassification interface for type ChatRequestDeveloperMessage. +func (c *ChatRequestDeveloperMessage) GetChatRequestMessage() *ChatRequestMessage { + return &ChatRequestMessage{ + role: c.role, + } +} + // ChatRequestFunctionMessage - A request chat message representing requested output from a configured function. type ChatRequestFunctionMessage struct { // REQUIRED; The output of the function as requested by the function call. @@ -1410,6 +1501,9 @@ type ChatResponseMessage struct { // REQUIRED; The chat role associated with the message. Role *ChatRole + // If the audio output modality is requested, this object contains data about the audio response from the model. + Audio *AudioResponseData + // If Azure OpenAI chat extensions are configured, this array represents the incremental steps performed by those extensions // while processing the chat completions request. Context *AzureChatExtensionsMessageContext @@ -1648,13 +1742,27 @@ type CompletionsUsage struct { // CompletionsUsageCompletionTokensDetails - Breakdown of tokens used in a completion. type CompletionsUsageCompletionTokensDetails struct { + // When using Predicted Outputs, the number of tokens in the prediction that appeared in the completion. + AcceptedPredictionTokens *int32 + + // Audio input tokens generated by the model. + AudioTokens *int32 + // Tokens generated by the model for reasoning. ReasoningTokens *int32 + + // When using Predicted Outputs, the number of tokens in the prediction that did not appear in the completion. However, like + // reasoning tokens, these tokens are still counted in the total completion + // tokens for purposes of billing, output, and context window limits. + RejectedPredictionTokens *int32 } // CompletionsUsagePromptTokensDetails - Details of the prompt tokens. type CompletionsUsagePromptTokensDetails struct { - // The number of cached prompt tokens. + // Audio input tokens present in the prompt. + AudioTokens *int32 + + // Cached tokens present in the prompt. CachedTokens *int32 } @@ -1773,9 +1881,6 @@ type ContentFilterResultDetailsForPrompt struct { // ContentFilterResultsForChoice - Information about content filtering evaluated against generated model output. type ContentFilterResultsForChoice struct { - // REQUIRED; Information about detection of ungrounded material. - UngroundedMaterial *ContentFilterCompletionTextSpanResult - // Describes detection results against configured custom blocklists. CustomBlockLists *ContentFilterDetailedResults @@ -1805,6 +1910,9 @@ type ContentFilterResultsForChoice struct { // forced sexual violent act against one’s will, prostitution, pornography, and abuse. Sexual *ContentFilterResult + // Information about detection of ungrounded material. + UngroundedMaterial *ContentFilterCompletionTextSpanResult + // Describes language related to physical actions intended to hurt, injure, damage, or kill someone or something; describes // weapons, etc. Violence *ContentFilterResult @@ -2187,6 +2295,15 @@ type ImageGenerations struct { Data []ImageGenerationData } +// InputAudioContent - A structured chat content item containing audio data. +type InputAudioContent struct { + // REQUIRED; Base64-encoded audio data. + Data *string + + // REQUIRED; The format of the audio data. + Format *InputAudioFormat +} + // ListBatchesPage - The response data for a requested list of items. type ListBatchesPage struct { // REQUIRED; The object type, which is always list. @@ -2666,6 +2783,17 @@ type PineconeFieldMappingOptions struct { URLField *string } +// PredictionContent - Static predicted output content, such as the content of a text file that is being regenerated. +type PredictionContent struct { + // REQUIRED; The content that should be matched when generating a model response. If generated tokens would match this content, + // the entire model response can be returned much more quickly. + Content *PredictionContentContent + + // CONSTANT; The type of the predicted content you want to provide. This type is currently always content. + // Field has constant value "content", any specified value is ignored. + Type *string +} + // SpeechGenerationOptions - A representation of the request options that control the behavior of a text-to-speech operation. type SpeechGenerationOptions struct { // REQUIRED; The text to generate audio for. The maximum length is 4096 characters. @@ -2782,6 +2910,27 @@ type UploadPart struct { AzureBlockID *string } +// UserSecurityContext - User security context contains several parameters that describe the AI application itself, and the +// end user that interacts with the AI application. These fields assist your security operations teams +// to investigate and mitigate security incidents by providing a comprehensive approach to protecting your AI applications. +// (Learn more at https://aka.ms/TP4AI/Documentation/EndUserContext) about +// protecting AI applications using Microsoft Defender for Cloud. +type UserSecurityContext struct { + // The name of the application. Sensitive personal information should not be included in this field. + ApplicationName *string + + // This identifier is the Microsoft Entra ID (formerly Azure Active Directory) user object ID used to authenticate end-users + // within the generative AI application. Sensitive personal information should + // not be included in this field. + EndUserID *string + + // The Microsoft 365 tenant ID the end user belongs to. It's required when the generative AI application is multi tenant. + EndUserTenantID *string + + // Captures the original client's IP address, accepting both IPv4 and IPv6 formats. + SourceIP *string +} + // CompletionsOptions - The configuration information for a completions request. Completions support a wide variety of tasks // and generate text that continues from or "completes" provided prompt data. type CompletionsOptions struct { @@ -2947,6 +3096,9 @@ type ChatCompletionsOptions struct { // assistant, followed by alternating messages between the User and Assistant roles. Messages []ChatRequestMessageClassification + // Parameters for audio output. Required when audio output is requested with modalities: ["audio"] + Audio *AudioOutputParameters + // The configuration entries for Azure OpenAI chat extensions that use them. This additional specification is only compatible // with Azure OpenAI. AzureExtensionsOptions []AzureChatExtensionConfigurationClassification @@ -2990,6 +3142,14 @@ type ChatCompletionsOptions struct { // This value is now deprecated in favor of max_completion_tokens, and is not compatible with o1 series models. MaxTokens *int32 + // Developer-defined tags and values used for filtering completions in the stored completions dashboard. + Metadata map[string]*string + + // Output types that you would like the model to generate for this request. Most models are capable of generating text, which + // is the default: ["text"]The gpt-4o-audio-preview model can also be used to + // generate audio. To request that this model generate both text and audio responses, you can use: ["text", "audio"] + Modalities []ChatCompletionModality + // The model name to provide as part of this completions request. Not applicable to Azure OpenAI, where deployment information // should be included in the Azure resource URI that's connected to. DeploymentName *string @@ -3002,11 +3162,22 @@ type ChatCompletionsOptions struct { // Whether to enable parallel function calling during tool use. ParallelToolCalls *bool + // Configuration for a Predicted Output, which can greatly improve response times when large parts of the model response are + // known ahead of time. This is most common when you are regenerating a file with + // only minor changes to most of the content. + Prediction *PredictionContent + // A value that influences the probability of generated tokens appearing based on their existing presence in generated text. // Positive values will make tokens less likely to appear when they already exist // and increase the model's likelihood to output new topics. PresencePenalty *float32 + // This option is only valid for o1 models, + // Constrains effort on reasoning for reasoning models (see https://platform.openai.com/docs/guides/reasoning). + // Currently supported values are low, medium, and high. Reducing reasoning effort can result in faster responses and fewer + // tokens used on reasoning in a response. + ReasoningEffort *ReasoningEffortValue + // An object specifying the format that the model must output. Used to enable JSON mode. ResponseFormat ChatCompletionsResponseFormatClassification @@ -3018,6 +3189,9 @@ type ChatCompletionsOptions struct { // A collection of textual sequences that will end completions generation. Stop []string + // Whether or not to store the output of this chat completion request for use in our model distillation or evaluation products. + Store *bool + // The sampling temperature to use that controls the apparent creativity of generated completions. Higher values will make // output more random while lower values will make results more focused and // deterministic. It is not recommended to modify temperature and top_p for the same completions request as the interaction @@ -3044,6 +3218,10 @@ type ChatCompletionsOptions struct { // An identifier for the caller or end user of the operation. This may be used for tracking or rate-limiting purposes. User *string + + // The security context identifies and authenticates users and applications in your multi-tenant AI system, helping security + // teams investigate and mitigate incidents. + UserSecurityContext *UserSecurityContext } // ChatCompletionsStreamOptions - The configuration information for a chat completions request. Completions support a wide variety @@ -3054,6 +3232,9 @@ type ChatCompletionsStreamOptions struct { // assistant, followed by alternating messages between the User and Assistant roles. Messages []ChatRequestMessageClassification + // Parameters for audio output. Required when audio output is requested with modalities: ["audio"] + Audio *AudioOutputParameters + // The configuration entries for Azure OpenAI chat extensions that use them. This additional specification is only compatible // with Azure OpenAI. AzureExtensionsOptions []AzureChatExtensionConfigurationClassification @@ -3097,6 +3278,14 @@ type ChatCompletionsStreamOptions struct { // This value is now deprecated in favor of max_completion_tokens, and is not compatible with o1 series models. MaxTokens *int32 + // Developer-defined tags and values used for filtering completions in the stored completions dashboard. + Metadata map[string]*string + + // Output types that you would like the model to generate for this request. Most models are capable of generating text, which + // is the default: ["text"]The gpt-4o-audio-preview model can also be used to + // generate audio. To request that this model generate both text and audio responses, you can use: ["text", "audio"] + Modalities []ChatCompletionModality + // The model name to provide as part of this completions request. Not applicable to Azure OpenAI, where deployment information // should be included in the Azure resource URI that's connected to. DeploymentName *string @@ -3109,11 +3298,22 @@ type ChatCompletionsStreamOptions struct { // Whether to enable parallel function calling during tool use. ParallelToolCalls *bool + // Configuration for a Predicted Output, which can greatly improve response times when large parts of the model response are + // known ahead of time. This is most common when you are regenerating a file with + // only minor changes to most of the content. + Prediction *PredictionContent + // A value that influences the probability of generated tokens appearing based on their existing presence in generated text. // Positive values will make tokens less likely to appear when they already exist // and increase the model's likelihood to output new topics. PresencePenalty *float32 + // This option is only valid for o1 models, + // Constrains effort on reasoning for reasoning models (see https://platform.openai.com/docs/guides/reasoning). + // Currently supported values are low, medium, and high. Reducing reasoning effort can result in faster responses and fewer + // tokens used on reasoning in a response. + ReasoningEffort *ReasoningEffortValue + // An object specifying the format that the model must output. Used to enable JSON mode. ResponseFormat ChatCompletionsResponseFormatClassification @@ -3125,6 +3325,9 @@ type ChatCompletionsStreamOptions struct { // A collection of textual sequences that will end completions generation. Stop []string + // Whether or not to store the output of this chat completion request for use in our model distillation or evaluation products. + Store *bool + // Options for streaming response. Only set this when you set stream: true. StreamOptions *ChatCompletionStreamOptions @@ -3154,4 +3357,8 @@ type ChatCompletionsStreamOptions struct { // An identifier for the caller or end user of the operation. This may be used for tracking or rate-limiting purposes. User *string + + // The security context identifies and authenticates users and applications in your multi-tenant AI system, helping security + // teams investigate and mitigate incidents. + UserSecurityContext *UserSecurityContext } diff --git a/sdk/ai/azopenai/models_extra.go b/sdk/ai/azopenai/models_extra.go index dfd4d97badcd..cc5820fb33a1 100644 --- a/sdk/ai/azopenai/models_extra.go +++ b/sdk/ai/azopenai/models_extra.go @@ -149,6 +149,29 @@ func NewChatRequestSystemMessageContent[T []ChatMessageTextContentItem | string] } } +// ChatRequestDeveloperMessageContent contains the content for a [ChatRequestDeveloperMessage]. +// NOTE: This should be created using [azopenai.NewChatRequestDeveloperMessageContent] +type ChatRequestDeveloperMessageContent struct { + value any +} + +// MarshalJSON implements the json.Marshaller interface for type ChatRequestSystemMessageContent. +func (c ChatRequestDeveloperMessageContent) MarshalJSON() ([]byte, error) { + return json.Marshal(c.value) +} + +// NewChatRequestDeveloperMessageContent creates a [azopenai.ChatRequestDeveloperMessageContent]. +func NewChatRequestDeveloperMessageContent[T []ChatMessageTextContentItem | string](value T) *ChatRequestDeveloperMessageContent { + switch any(value).(type) { + case []ChatMessageTextContentItem: + return &ChatRequestDeveloperMessageContent{value: value} + case string: + return &ChatRequestDeveloperMessageContent{value: value} + default: + panic(fmt.Sprintf("Invalid type %T for ChatRequestDeveloperMessageContent", value)) + } +} + // ChatRequestToolMessageContent contains the content for a [ChatRequestToolMessage]. // NOTE: This should be created using [azopenai.NewChatRequestToolMessageContent] type ChatRequestToolMessageContent struct { @@ -254,6 +277,29 @@ func (c MongoDBChatExtensionParametersEmbeddingDependency) MarshalJSON() ([]byte return json.Marshal(c.value) } +// PredictionContentContent contains the content for a [PredictionContent]. +// NOTE: This should be created using [azopenai.NewPredictionContentContent] +type PredictionContentContent struct { + value any +} + +// MarshalJSON implements the json.Marshaller interface for type PredictionContentContent. +func (c PredictionContentContent) MarshalJSON() ([]byte, error) { + return json.Marshal(c.value) +} + +// NewPredictionContentContent creates a [azopenai.PredictionContentContent]. +func NewPredictionContentContent[T []ChatMessageTextContentItem | string](value T) *PredictionContentContent { + switch any(value).(type) { + case []ChatMessageTextContentItem: + return &PredictionContentContent{value: value} + case string: + return &PredictionContentContent{value: value} + default: + panic(fmt.Sprintf("Invalid type %T for PredictionContentContent", value)) + } +} + // ContentFilterResponseError is an error as a result of a request being filtered. type ContentFilterResponseError struct { azcore.ResponseError diff --git a/sdk/ai/azopenai/models_serde.go b/sdk/ai/azopenai/models_serde.go index 388e23fae654..a907ed5d53d3 100644 --- a/sdk/ai/azopenai/models_serde.go +++ b/sdk/ai/azopenai/models_serde.go @@ -48,6 +48,76 @@ func (a *AddUploadPartRequest) UnmarshalJSON(data []byte) error { return nil } +// MarshalJSON implements the json.Marshaller interface for type AudioOutputParameters. +func (a AudioOutputParameters) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "format", a.Format) + populate(objectMap, "voice", a.Voice) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type AudioOutputParameters. +func (a *AudioOutputParameters) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", a, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "format": + err = unpopulate(val, "Format", &a.Format) + delete(rawMsg, key) + case "voice": + err = unpopulate(val, "Voice", &a.Voice) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", a, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type AudioResponseData. +func (a AudioResponseData) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "data", a.Data) + populateTimeUnix(objectMap, "expires_at", a.ExpiresAt) + populate(objectMap, "id", a.ID) + populate(objectMap, "transcript", a.Transcript) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type AudioResponseData. +func (a *AudioResponseData) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", a, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "data": + err = unpopulate(val, "Data", &a.Data) + delete(rawMsg, key) + case "expires_at": + err = unpopulateTimeUnix(val, "ExpiresAt", &a.ExpiresAt) + delete(rawMsg, key) + case "id": + err = unpopulate(val, "ID", &a.ID) + delete(rawMsg, key) + case "transcript": + err = unpopulate(val, "Transcript", &a.Transcript) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", a, err) + } + } + return nil +} + // MarshalJSON implements the json.Marshaller interface for type AudioTranscription. func (a AudioTranscription) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) @@ -2160,6 +2230,7 @@ func (c *ChatCompletionsNamedToolSelection) UnmarshalJSON(data []byte) error { // MarshalJSON implements the json.Marshaller interface for type ChatCompletionsOptions. func (c chatCompletionsOptions) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) + populate(objectMap, "audio", c.Audio) populate(objectMap, "data_sources", c.AzureExtensionsOptions) populate(objectMap, "enhancements", c.Enhancements) populate(objectMap, "frequency_penalty", c.FrequencyPenalty) @@ -2170,13 +2241,18 @@ func (c chatCompletionsOptions) MarshalJSON() ([]byte, error) { populate(objectMap, "max_completion_tokens", c.MaxCompletionTokens) populate(objectMap, "max_tokens", c.MaxTokens) populate(objectMap, "messages", c.Messages) + populate(objectMap, "metadata", c.Metadata) + populate(objectMap, "modalities", c.Modalities) populate(objectMap, "model", c.DeploymentName) populate(objectMap, "n", c.N) populate(objectMap, "parallel_tool_calls", c.ParallelToolCalls) + populate(objectMap, "prediction", c.Prediction) populate(objectMap, "presence_penalty", c.PresencePenalty) + populate(objectMap, "reasoning_effort", c.ReasoningEffort) populate(objectMap, "response_format", c.ResponseFormat) populate(objectMap, "seed", c.Seed) populate(objectMap, "stop", c.Stop) + populate(objectMap, "store", c.Store) populate(objectMap, "stream", c.Stream) populate(objectMap, "stream_options", c.StreamOptions) populate(objectMap, "temperature", c.Temperature) @@ -2185,6 +2261,7 @@ func (c chatCompletionsOptions) MarshalJSON() ([]byte, error) { populate(objectMap, "top_logprobs", c.TopLogProbs) populate(objectMap, "top_p", c.TopP) populate(objectMap, "user", c.User) + populate(objectMap, "user_security_context", c.UserSecurityContext) return json.Marshal(objectMap) } @@ -2197,6 +2274,9 @@ func (c *chatCompletionsOptions) UnmarshalJSON(data []byte) error { for key, val := range rawMsg { var err error switch key { + case "audio": + err = unpopulate(val, "Audio", &c.Audio) + delete(rawMsg, key) case "data_sources": c.AzureExtensionsOptions, err = unmarshalAzureChatExtensionConfigurationClassificationArray(val) delete(rawMsg, key) @@ -2227,6 +2307,12 @@ func (c *chatCompletionsOptions) UnmarshalJSON(data []byte) error { case "messages": c.Messages, err = unmarshalChatRequestMessageClassificationArray(val) delete(rawMsg, key) + case "metadata": + err = unpopulate(val, "Metadata", &c.Metadata) + delete(rawMsg, key) + case "modalities": + err = unpopulate(val, "Modalities", &c.Modalities) + delete(rawMsg, key) case "model": err = unpopulate(val, "Model", &c.DeploymentName) delete(rawMsg, key) @@ -2236,9 +2322,15 @@ func (c *chatCompletionsOptions) UnmarshalJSON(data []byte) error { case "parallel_tool_calls": err = unpopulate(val, "ParallelToolCalls", &c.ParallelToolCalls) delete(rawMsg, key) + case "prediction": + err = unpopulate(val, "Prediction", &c.Prediction) + delete(rawMsg, key) case "presence_penalty": err = unpopulate(val, "PresencePenalty", &c.PresencePenalty) delete(rawMsg, key) + case "reasoning_effort": + err = unpopulate(val, "ReasoningEffort", &c.ReasoningEffort) + delete(rawMsg, key) case "response_format": c.ResponseFormat, err = unmarshalChatCompletionsResponseFormatClassification(val) delete(rawMsg, key) @@ -2248,6 +2340,9 @@ func (c *chatCompletionsOptions) UnmarshalJSON(data []byte) error { case "stop": err = unpopulate(val, "Stop", &c.Stop) delete(rawMsg, key) + case "store": + err = unpopulate(val, "Store", &c.Store) + delete(rawMsg, key) case "stream": err = unpopulate(val, "Stream", &c.Stream) delete(rawMsg, key) @@ -2272,6 +2367,9 @@ func (c *chatCompletionsOptions) UnmarshalJSON(data []byte) error { case "user": err = unpopulate(val, "User", &c.User) delete(rawMsg, key) + case "user_security_context": + err = unpopulate(val, "UserSecurityContext", &c.UserSecurityContext) + delete(rawMsg, key) } if err != nil { return fmt.Errorf("unmarshalling type %T: %v", c, err) @@ -2419,6 +2517,37 @@ func (c *ChatFinishDetails) UnmarshalJSON(data []byte) error { return nil } +// MarshalJSON implements the json.Marshaller interface for type ChatMessageAudioContentItem. +func (c ChatMessageAudioContentItem) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "input_audio", c.InputAudio) + objectMap["type"] = "input_audio" + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type ChatMessageAudioContentItem. +func (c *ChatMessageAudioContentItem) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", c, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "input_audio": + err = unpopulate(val, "InputAudio", &c.InputAudio) + delete(rawMsg, key) + case "type": + err = unpopulate(val, "Type", &c.Type) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", c, err) + } + } + return nil +} + // MarshalJSON implements the json.Marshaller interface for type ChatMessageContentItem. func (c ChatMessageContentItem) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) @@ -2617,6 +2746,41 @@ func (c *ChatRequestAssistantMessage) UnmarshalJSON(data []byte) error { return nil } +// MarshalJSON implements the json.Marshaller interface for type ChatRequestDeveloperMessage. +func (c ChatRequestDeveloperMessage) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "content", c.Content) + objectMap["role"] = ChatRoleDeveloper + populate(objectMap, "name", c.Name) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type ChatRequestDeveloperMessage. +func (c *ChatRequestDeveloperMessage) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", c, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "content": + err = unpopulate(val, "Content", &c.Content) + delete(rawMsg, key) + case "role": + err = unpopulate(val, "role", &c.role) + delete(rawMsg, key) + case "name": + err = unpopulate(val, "Name", &c.Name) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", c, err) + } + } + return nil +} + // MarshalJSON implements the json.Marshaller interface for type ChatRequestFunctionMessage. func (c ChatRequestFunctionMessage) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) @@ -2787,6 +2951,7 @@ func (c *ChatRequestUserMessage) UnmarshalJSON(data []byte) error { // MarshalJSON implements the json.Marshaller interface for type ChatResponseMessage. func (c ChatResponseMessage) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) + populate(objectMap, "audio", c.Audio) populate(objectMap, "content", c.Content) populate(objectMap, "context", c.Context) populate(objectMap, "function_call", c.FunctionCall) @@ -2805,6 +2970,9 @@ func (c *ChatResponseMessage) UnmarshalJSON(data []byte) error { for key, val := range rawMsg { var err error switch key { + case "audio": + err = unpopulate(val, "Audio", &c.Audio) + delete(rawMsg, key) case "content": err = unpopulate(val, "Content", &c.Content) delete(rawMsg, key) @@ -3247,7 +3415,10 @@ func (c *CompletionsUsage) UnmarshalJSON(data []byte) error { // MarshalJSON implements the json.Marshaller interface for type CompletionsUsageCompletionTokensDetails. func (c CompletionsUsageCompletionTokensDetails) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) + populate(objectMap, "accepted_prediction_tokens", c.AcceptedPredictionTokens) + populate(objectMap, "audio_tokens", c.AudioTokens) populate(objectMap, "reasoning_tokens", c.ReasoningTokens) + populate(objectMap, "rejected_prediction_tokens", c.RejectedPredictionTokens) return json.Marshal(objectMap) } @@ -3260,9 +3431,18 @@ func (c *CompletionsUsageCompletionTokensDetails) UnmarshalJSON(data []byte) err for key, val := range rawMsg { var err error switch key { + case "accepted_prediction_tokens": + err = unpopulate(val, "AcceptedPredictionTokens", &c.AcceptedPredictionTokens) + delete(rawMsg, key) + case "audio_tokens": + err = unpopulate(val, "AudioTokens", &c.AudioTokens) + delete(rawMsg, key) case "reasoning_tokens": err = unpopulate(val, "ReasoningTokens", &c.ReasoningTokens) delete(rawMsg, key) + case "rejected_prediction_tokens": + err = unpopulate(val, "RejectedPredictionTokens", &c.RejectedPredictionTokens) + delete(rawMsg, key) } if err != nil { return fmt.Errorf("unmarshalling type %T: %v", c, err) @@ -3274,6 +3454,7 @@ func (c *CompletionsUsageCompletionTokensDetails) UnmarshalJSON(data []byte) err // MarshalJSON implements the json.Marshaller interface for type CompletionsUsagePromptTokensDetails. func (c CompletionsUsagePromptTokensDetails) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) + populate(objectMap, "audio_tokens", c.AudioTokens) populate(objectMap, "cached_tokens", c.CachedTokens) return json.Marshal(objectMap) } @@ -3287,6 +3468,9 @@ func (c *CompletionsUsagePromptTokensDetails) UnmarshalJSON(data []byte) error { for key, val := range rawMsg { var err error switch key { + case "audio_tokens": + err = unpopulate(val, "AudioTokens", &c.AudioTokens) + delete(rawMsg, key) case "cached_tokens": err = unpopulate(val, "CachedTokens", &c.CachedTokens) delete(rawMsg, key) @@ -4534,6 +4718,37 @@ func (i *ImageGenerations) UnmarshalJSON(data []byte) error { return nil } +// MarshalJSON implements the json.Marshaller interface for type InputAudioContent. +func (i InputAudioContent) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "data", i.Data) + populate(objectMap, "format", i.Format) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type InputAudioContent. +func (i *InputAudioContent) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", i, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "data": + err = unpopulate(val, "Data", &i.Data) + delete(rawMsg, key) + case "format": + err = unpopulate(val, "Format", &i.Format) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", i, err) + } + } + return nil +} + // MarshalJSON implements the json.Marshaller interface for type ListBatchesPage. func (l ListBatchesPage) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) @@ -5425,6 +5640,37 @@ func (p *PineconeFieldMappingOptions) UnmarshalJSON(data []byte) error { return nil } +// MarshalJSON implements the json.Marshaller interface for type PredictionContent. +func (p PredictionContent) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "content", p.Content) + objectMap["type"] = "content" + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type PredictionContent. +func (p *PredictionContent) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", p, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "content": + err = unpopulate(val, "Content", &p.Content) + delete(rawMsg, key) + case "type": + err = unpopulate(val, "Type", &p.Type) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", p, err) + } + } + return nil +} + // MarshalJSON implements the json.Marshaller interface for type SpeechGenerationOptions. func (s SpeechGenerationOptions) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) @@ -5687,6 +5933,45 @@ func (u *UploadPart) UnmarshalJSON(data []byte) error { return nil } +// MarshalJSON implements the json.Marshaller interface for type UserSecurityContext. +func (u UserSecurityContext) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "application_name", u.ApplicationName) + populate(objectMap, "end_user_id", u.EndUserID) + populate(objectMap, "end_user_tenant_id", u.EndUserTenantID) + populate(objectMap, "source_ip", u.SourceIP) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type UserSecurityContext. +func (u *UserSecurityContext) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", u, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "application_name": + err = unpopulate(val, "ApplicationName", &u.ApplicationName) + delete(rawMsg, key) + case "end_user_id": + err = unpopulate(val, "EndUserID", &u.EndUserID) + delete(rawMsg, key) + case "end_user_tenant_id": + err = unpopulate(val, "EndUserTenantID", &u.EndUserTenantID) + delete(rawMsg, key) + case "source_ip": + err = unpopulate(val, "SourceIP", &u.SourceIP) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", u, err) + } + } + return nil +} + func populate(m map[string]any, k string, v any) { if v == nil { return diff --git a/sdk/ai/azopenai/polymorphic_helpers.go b/sdk/ai/azopenai/polymorphic_helpers.go index c2af140043ab..3d3bbfae1829 100644 --- a/sdk/ai/azopenai/polymorphic_helpers.go +++ b/sdk/ai/azopenai/polymorphic_helpers.go @@ -180,6 +180,8 @@ func unmarshalChatRequestMessageClassification(rawMsg json.RawMessage) (ChatRequ switch m["role"] { case string(ChatRoleAssistant): b = &ChatRequestAssistantMessage{} + case string(ChatRoleDeveloper): + b = &ChatRequestDeveloperMessage{} case string(ChatRoleFunction): b = &ChatRequestFunctionMessage{} case string(ChatRoleSystem): diff --git a/sdk/ai/azopenai/testdata/tsp-location.yaml b/sdk/ai/azopenai/testdata/tsp-location.yaml index ae5adde714e0..643b53baa5c7 100644 --- a/sdk/ai/azopenai/testdata/tsp-location.yaml +++ b/sdk/ai/azopenai/testdata/tsp-location.yaml @@ -1,4 +1,4 @@ directory: specification/cognitiveservices/OpenAI.Inference # official release commit for 2024-10-01-preview -commit: d1ae99fabc48ee2ee818a4ea96be5cdc332adc2a +commit: 68e4b1f37f9ed18df0081f0c7c3865ecd89838e1 repo: Azure/azure-rest-api-specs