diff --git a/Sources/GoogleAI/GenerationConfig.swift b/Sources/GoogleAI/GenerationConfig.swift index c417e59..4f4c552 100644 --- a/Sources/GoogleAI/GenerationConfig.swift +++ b/Sources/GoogleAI/GenerationConfig.swift @@ -57,7 +57,7 @@ public struct GenerationConfig: Encodable { /// (unbounded). public let maxOutputTokens: Int? - /// A set of up to 5 ``String``s that will stop output generation. If + /// A set of up to 5 `String`s that will stop output generation. If /// specified, the API will stop at the first appearance of a stop sequence. /// The stop sequence will not be included as part of the response. public let stopSequences: [String]? diff --git a/Sources/GoogleAI/Safety.swift b/Sources/GoogleAI/Safety.swift index 1ec3f13..7385b5b 100644 --- a/Sources/GoogleAI/Safety.swift +++ b/Sources/GoogleAI/Safety.swift @@ -19,7 +19,7 @@ import Foundation /// responses that exceed a certain threshold. public struct SafetyRating: Decodable, Equatable { /// The category describing the potential harm a piece of content may pose. See - /// ``SafetySetting.HarmCategory`` for a list of possible values. + /// ``SafetySetting/HarmCategory`` for a list of possible values. public let category: SafetySetting.HarmCategory /// The model-generated probability that a given piece of content falls under the harm category