Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add docs for GenerateContentResponse #42

Merged
merged 2 commits into from
Dec 12, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
11 changes: 10 additions & 1 deletion Sources/GoogleAI/Chat.swift
Original file line number Diff line number Diff line change
Expand Up @@ -29,12 +29,16 @@ public class Chat {
/// model. This will be provided to the model for each message sent as context for the discussion.
public var history: [ModelContent]

/// See ``sendMessage(_:)-3ify5``.
public func sendMessage(_ parts: PartsRepresentable...) async throws -> GenerateContentResponse {
return try await sendMessage([ModelContent(parts: parts)])
}

/// Send a message, using the existing history of this chat as context. If successful, the message
/// Sends a message using the existing history of this chat as context. If successful, the message
/// and response will be added to the history. If unsuccessful, history will remain unchanged.
/// - Parameter content: The new content to send as a single chat message.
/// - Returns: The model's response if no error occurred.
/// - Throws: A ``GenerateContentError`` if an error occurred.
public func sendMessage(_ content: [ModelContent]) async throws
-> GenerateContentResponse {
// Ensure that the new content has the role set.
Expand All @@ -61,11 +65,16 @@ public class Chat {
return result
}

/// See ``sendMessageStream(_:)-4abs3``.
public func sendMessageStream(_ parts: PartsRepresentable...)
-> AsyncThrowingStream<GenerateContentResponse, Error> {
return sendMessageStream([ModelContent(parts: parts)])
}

/// Sends a message using the existing history of this chat as context. If successful, the message
/// and response will be added to the history. If unsuccessful, history will remain unchanged.
/// - Parameter content: The new content to send as a single chat message.
/// - Returns: A stream containing the model's response or an error if an error occurred.
public func sendMessageStream(_ content: [ModelContent])
-> AsyncThrowingStream<GenerateContentResponse, Error> {
return AsyncThrowingStream { continuation in
Expand Down
45 changes: 43 additions & 2 deletions Sources/GoogleAI/GenerateContentResponse.swift
Original file line number Diff line number Diff line change
Expand Up @@ -14,14 +14,19 @@

import Foundation

/// The model's response to a generate content request.
public struct GenerateContentResponse {
/// A list of candidate response content, ordered from best to worst.
public let candidates: [CandidateResponse]

/// A value containing the safety ratings for the response, or, if the request was blocked, a
/// reason for blocking the request.
public let promptFeedback: PromptFeedback?

/// The response's content as text, if it exists.
public var text: String? {
guard let candidate = candidates.first else {
Logging.default.error("Could not get text a response that had no candidates.")
Logging.default.error("Could not get text from a response that had no candidates.")
return nil
}
guard let text = candidate.content.parts.first?.text else {
Expand Down Expand Up @@ -69,12 +74,20 @@ extension GenerateContentResponse: Decodable {
}
}

/// A struct representing a possible reply to a content generation prompt. Each content generation
/// prompt may produce multiple candidate responses.
public struct CandidateResponse {
/// The response's content.
public let content: ModelContent

/// The safety rating of the response content.
public let safetyRatings: [SafetyRating]

/// The reason the model stopped generating content, if it exists; for example, if the model
/// generated a predefined stop sequence.
public let finishReason: FinishReason?

/// Cited works in the model's response content, if it exists.
public let citationMetadata: CitationMetadata?

/// Initializer for SwiftUI previews or tests.
Expand All @@ -96,6 +109,8 @@ extension CandidateResponse: Decodable {
case citationMetadata
}

/// Initializes a response from a decoder. Used for decoding server responses; not for public
/// use.
public init(from decoder: Decoder) throws {
let container = try decoder.container(keyedBy: CodingKeys.self)

Expand Down Expand Up @@ -135,16 +150,26 @@ extension CandidateResponse: Decodable {

/// A collection of source attributions for a piece of content.
public struct CitationMetadata: Decodable {
/// A list of individual cited sources and the parts of the content to which they apply.
public let citationSources: [Citation]
}

/// A struct describing a source attribution.
public struct Citation: Decodable {
/// The inclusive beginning of a sequence in a model response that derives from a cited source.
public let startIndex: Int

/// The exclusive end of a sequence in a model response that derives from a cited source.
public let endIndex: Int

/// A link to the cited source.
public let uri: String

/// The license the cited source work is distributed under.
public let license: String
}

/// A value enumerating possible reasons for a model to terminate a content generation request.
public enum FinishReason: String {
case unknown = "FINISH_REASON_UNKNOWN"

Expand All @@ -156,11 +181,15 @@ public enum FinishReason: String {
/// The maximum number of tokens as specified in the request was reached.
case maxTokens = "MAX_TOKENS"

/// The token generation was stopped as the response was flagged for safety reasons.
/// The token generation was stopped because the response was flagged for safety reasons.
/// NOTE: When streaming, the Candidate.content will be empty if content filters blocked the
/// output.
case safety = "SAFETY"

/// The token generation was stopped because the response was flagged for unauthorized citations.
case recitation = "RECITATION"

/// All other reasons that stopped token generation.
case other = "OTHER"
}

Expand All @@ -179,11 +208,20 @@ extension FinishReason: Decodable {
}
}

/// A metadata struct containing any feedback the model had on the prompt it was provided.
public struct PromptFeedback {
/// A type describing possible reasons to block a prompt.
public enum BlockReason: String, Decodable {
/// The block reason is unknown.
case unknown = "UNKNOWN"

/// The block reason was not specified in the server response.
case unspecified = "BLOCK_REASON_UNSPECIFIED"

/// The prompt was blocked because it was deemed unsafe.
case safety = "SAFETY"

/// All other block reasons.
case other = "OTHER"

/// Do not explicitly use. Initializer required for Decodable conformance.
Expand All @@ -200,7 +238,10 @@ public struct PromptFeedback {
}
}

/// The reason a prompt was blocked, if it was blocked.
public let blockReason: BlockReason?

/// The safety ratings of the prompt.
public let safetyRatings: [SafetyRating]

/// Initializer for SwiftUI previews or tests.
Expand Down
2 changes: 1 addition & 1 deletion Sources/GoogleAI/GenerationConfig.swift
Original file line number Diff line number Diff line change
Expand Up @@ -46,7 +46,7 @@ public struct GenerationConfig: Encodable {
/// must be between [1, 8], inclusive. If unset, this will default to 1.
///
/// - Note: Only unique candidates are returned. Higher temperatures are more
/// likely to produce unique candidates. Setting temperature to 0 will
/// likely to produce unique candidates. Setting `temperature` to 0 will
/// always produce exactly one candidate regardless of the
/// `candidateCount`.
public let candidateCount: Int?
Expand Down
Loading