From b6b5ca1624bc6d812b3795dbccc541e3c9e2019f Mon Sep 17 00:00:00 2001 From: Andrew Heard Date: Tue, 12 Dec 2023 12:44:41 -0500 Subject: [PATCH] Fix DocC warnings when running `swift package generate-documentation` --- Sources/GoogleAI/GenerationConfig.swift | 2 +- Sources/GoogleAI/Safety.swift | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/Sources/GoogleAI/GenerationConfig.swift b/Sources/GoogleAI/GenerationConfig.swift index c417e59..4f4c552 100644 --- a/Sources/GoogleAI/GenerationConfig.swift +++ b/Sources/GoogleAI/GenerationConfig.swift @@ -57,7 +57,7 @@ public struct GenerationConfig: Encodable { /// (unbounded). public let maxOutputTokens: Int? - /// A set of up to 5 ``String``s that will stop output generation. If + /// A set of up to 5 `String`s that will stop output generation. If /// specified, the API will stop at the first appearance of a stop sequence. /// The stop sequence will not be included as part of the response. public let stopSequences: [String]? diff --git a/Sources/GoogleAI/Safety.swift b/Sources/GoogleAI/Safety.swift index 1ec3f13..7385b5b 100644 --- a/Sources/GoogleAI/Safety.swift +++ b/Sources/GoogleAI/Safety.swift @@ -19,7 +19,7 @@ import Foundation /// responses that exceed a certain threshold. public struct SafetyRating: Decodable, Equatable { /// The category describing the potential harm a piece of content may pose. See - /// ``SafetySetting.HarmCategory`` for a list of possible values. + /// ``SafetySetting/HarmCategory`` for a list of possible values. public let category: SafetySetting.HarmCategory /// The model-generated probability that a given piece of content falls under the harm category