From 6f8b43a8ff9cee2f7b3d6c6a2d41534a539fd169 Mon Sep 17 00:00:00 2001 From: Lee <120439693+lygitdata@users.noreply.github.com> Date: Thu, 15 Feb 2024 13:16:56 -0500 Subject: [PATCH] Add files via upload --- R/src/DESCRIPTION | 16 +- R/src/NAMESPACE | 25 +- R/src/R/available.models.R | 33 +++ R/src/R/chat.R | 211 ++++++++++++++++ R/src/R/chat.edit.R | 218 ++++++++++++++++ R/src/R/chat.history.convert.R | 45 ++++ R/src/R/chat.history.export.R | 39 +++ R/src/R/chat.history.import.R | 41 +++ R/src/R/chat.history.print.R | 45 ++++ R/src/R/chat.history.reset.R | 37 +++ R/src/R/chat.history.save.R | 42 ++++ R/src/R/genai.google.R | 63 +++++ R/src/R/genai.google.chat.R | 104 ++++++++ R/src/R/genai.google.chat.edit.R | 126 ++++++++++ R/src/R/genai.google.chat.history.convert.R | 30 +++ R/src/R/genai.google.chat.history.export.R | 4 + R/src/R/genai.google.chat.history.import.R | 26 ++ R/src/R/genai.google.chat.history.print.R | 41 +++ R/src/R/genai.google.chat.history.reset.R | 4 + R/src/R/genai.google.chat.history.save.R | 6 + R/src/R/genai.google.class.R | 85 +++++++ R/src/R/genai.google.txt.R | 85 +++++++ R/src/R/genai.google.txt.image.R | 104 ++++++++ R/src/R/genai.google.utils.R | 202 +++++++++++++++ R/src/R/genai.moonshot.R | 62 +++++ R/src/R/genai.moonshot.chat.R | 84 +++++++ R/src/R/genai.moonshot.chat.edit.R | 106 ++++++++ R/src/R/genai.moonshot.chat.history.convert.R | 23 ++ R/src/R/genai.moonshot.chat.history.export.R | 4 + R/src/R/genai.moonshot.chat.history.import.R | 22 ++ R/src/R/genai.moonshot.chat.history.print.R | 40 +++ R/src/R/genai.moonshot.chat.history.reset.R | 9 + R/src/R/genai.moonshot.chat.history.save.R | 6 + R/src/R/genai.moonshot.class.R | 81 ++++++ R/src/R/genai.moonshot.txt.R | 72 ++++++ R/src/R/genai.moonshot.utils.R | 115 +++++++++ R/src/R/genai.openai.R | 71 ++++++ R/src/R/genai.openai.chat.R | 107 ++++++++ R/src/R/genai.openai.chat.edit.R | 129 ++++++++++ R/src/R/genai.openai.chat.history.convert.R | 23 ++ R/src/R/genai.openai.chat.history.export.R | 4 + R/src/R/genai.openai.chat.history.import.R | 22 ++ R/src/R/genai.openai.chat.history.print.R | 40 +++ R/src/R/genai.openai.chat.history.reset.R | 9 + R/src/R/genai.openai.chat.history.save.R | 6 + R/src/R/genai.openai.class.R | 105 ++++++++ R/src/R/genai.openai.img.R | 87 +++++++ R/src/R/genai.openai.txt.R | 95 +++++++ R/src/R/genai.openai.txt.image.R | 113 +++++++++ R/src/R/genai.openai.utils.R | 233 ++++++++++++++++++ R/src/R/genai.utils.R | 11 + R/src/R/img.R | 83 +++++++ R/src/R/imports.R | 8 + R/src/R/txt.R | 205 +++++++++++++++ R/src/R/txt.image.R | 193 +++++++++++++++ R/src/man/available.models.Rd | 37 +++ R/src/man/chat.Rd | 216 ++++++++++++++++ R/src/man/chat.edit.Rd | 228 +++++++++++++++++ R/src/man/chat.history.convert.Rd | 51 ++++ R/src/man/chat.history.export.Rd | 46 ++++ R/src/man/chat.history.import.Rd | 47 ++++ R/src/man/chat.history.print.Rd | 51 ++++ R/src/man/chat.history.reset.Rd | 43 ++++ R/src/man/chat.history.save.Rd | 50 ++++ R/src/man/genai.google.Rd | 67 +++++ R/src/man/genai.moonshot.Rd | 66 +++++ R/src/man/genai.openai.Rd | 74 ++++++ R/src/man/img.Rd | 88 +++++++ R/src/man/txt.Rd | 210 ++++++++++++++++ R/src/man/txt.image.Rd | 197 +++++++++++++++ 70 files changed, 5253 insertions(+), 18 deletions(-) create mode 100644 R/src/R/available.models.R create mode 100644 R/src/R/chat.R create mode 100644 R/src/R/chat.edit.R create mode 100644 R/src/R/chat.history.convert.R create mode 100644 R/src/R/chat.history.export.R create mode 100644 R/src/R/chat.history.import.R create mode 100644 R/src/R/chat.history.print.R create mode 100644 R/src/R/chat.history.reset.R create mode 100644 R/src/R/chat.history.save.R create mode 100644 R/src/R/genai.google.R create mode 100644 R/src/R/genai.google.chat.R create mode 100644 R/src/R/genai.google.chat.edit.R create mode 100644 R/src/R/genai.google.chat.history.convert.R create mode 100644 R/src/R/genai.google.chat.history.export.R create mode 100644 R/src/R/genai.google.chat.history.import.R create mode 100644 R/src/R/genai.google.chat.history.print.R create mode 100644 R/src/R/genai.google.chat.history.reset.R create mode 100644 R/src/R/genai.google.chat.history.save.R create mode 100644 R/src/R/genai.google.class.R create mode 100644 R/src/R/genai.google.txt.R create mode 100644 R/src/R/genai.google.txt.image.R create mode 100644 R/src/R/genai.google.utils.R create mode 100644 R/src/R/genai.moonshot.R create mode 100644 R/src/R/genai.moonshot.chat.R create mode 100644 R/src/R/genai.moonshot.chat.edit.R create mode 100644 R/src/R/genai.moonshot.chat.history.convert.R create mode 100644 R/src/R/genai.moonshot.chat.history.export.R create mode 100644 R/src/R/genai.moonshot.chat.history.import.R create mode 100644 R/src/R/genai.moonshot.chat.history.print.R create mode 100644 R/src/R/genai.moonshot.chat.history.reset.R create mode 100644 R/src/R/genai.moonshot.chat.history.save.R create mode 100644 R/src/R/genai.moonshot.class.R create mode 100644 R/src/R/genai.moonshot.txt.R create mode 100644 R/src/R/genai.moonshot.utils.R create mode 100644 R/src/R/genai.openai.R create mode 100644 R/src/R/genai.openai.chat.R create mode 100644 R/src/R/genai.openai.chat.edit.R create mode 100644 R/src/R/genai.openai.chat.history.convert.R create mode 100644 R/src/R/genai.openai.chat.history.export.R create mode 100644 R/src/R/genai.openai.chat.history.import.R create mode 100644 R/src/R/genai.openai.chat.history.print.R create mode 100644 R/src/R/genai.openai.chat.history.reset.R create mode 100644 R/src/R/genai.openai.chat.history.save.R create mode 100644 R/src/R/genai.openai.class.R create mode 100644 R/src/R/genai.openai.img.R create mode 100644 R/src/R/genai.openai.txt.R create mode 100644 R/src/R/genai.openai.txt.image.R create mode 100644 R/src/R/genai.openai.utils.R create mode 100644 R/src/R/genai.utils.R create mode 100644 R/src/R/img.R create mode 100644 R/src/R/imports.R create mode 100644 R/src/R/txt.R create mode 100644 R/src/R/txt.image.R create mode 100644 R/src/man/available.models.Rd create mode 100644 R/src/man/chat.Rd create mode 100644 R/src/man/chat.edit.Rd create mode 100644 R/src/man/chat.history.convert.Rd create mode 100644 R/src/man/chat.history.export.Rd create mode 100644 R/src/man/chat.history.import.Rd create mode 100644 R/src/man/chat.history.print.Rd create mode 100644 R/src/man/chat.history.reset.Rd create mode 100644 R/src/man/chat.history.save.Rd create mode 100644 R/src/man/genai.google.Rd create mode 100644 R/src/man/genai.moonshot.Rd create mode 100644 R/src/man/genai.openai.Rd create mode 100644 R/src/man/img.Rd create mode 100644 R/src/man/txt.Rd create mode 100644 R/src/man/txt.image.Rd diff --git a/R/src/DESCRIPTION b/R/src/DESCRIPTION index 46a9b81..10f49d0 100644 --- a/R/src/DESCRIPTION +++ b/R/src/DESCRIPTION @@ -1,7 +1,7 @@ Package: GenAI Type: Package Title: Generative Artificial Intelligence -Version: 0.1.15 +Version: 0.2.0 Authors@R: c( person( given = "Li", @@ -12,15 +12,15 @@ Authors@R: c( ) ) Maintainer: Li Yuan -Description: Utilizing 'Generative Artificial Intelligence' models like 'GPT-4' and 'Gemini Pro' as coding and writing assistants for 'R' users. Through these models, 'GenAI' offers a variety of functions, encompassing text generation, code optimization, natural language processing, chat, and image interpretation. The goal is to aid 'R' users in streamlining laborious coding and language processing tasks. +Description: Utilizing Generative Artificial Intelligence models like 'GPT-4' and 'Gemini Pro' as coding and writing assistants for 'R' users. Through these models, 'GenAI' offers a variety of functions, encompassing text generation, code optimization, natural language processing, chat, and image interpretation. The goal is to aid 'R' users in streamlining laborious coding and language processing tasks. License: CC BY 4.0 URL: https://genai.gd.edu.kg/ BugReports: https://github.com/GitData-GA/GenAI/issues Encoding: UTF-8 -LazyData: true RoxygenNote: 7.3.0 -Imports: - base64enc, - httr, - jsonlite, - tools +Depends: magrittr +Imports: base64enc, httr, jsonlite, tools, R6, listenv, magick, + ggplotify +NeedsCompilation: no +Packaged: 2024-02-15 03:28:07 UTC; lp130 +Author: Li Yuan [aut, cre] () diff --git a/R/src/NAMESPACE b/R/src/NAMESPACE index b32f188..3f237ea 100644 --- a/R/src/NAMESPACE +++ b/R/src/NAMESPACE @@ -2,21 +2,26 @@ export(available.models) export(chat) -export(chat.convert) export(chat.edit) -export(chat.recent) -export(chat.save) -export(chat.setup) -export(connect.genai) +export(chat.history.convert) +export(chat.history.export) +export(chat.history.import) +export(chat.history.print) +export(chat.history.reset) +export(chat.history.save) +export(genai.google) +export(genai.moonshot) +export(genai.openai) +export(img) export(txt) -export(txt.explain.code) -export(txt.fix.grammar) export(txt.image) -export(txt.optimize.code) -importFrom(base64enc,base64encode) +import(R6) +importFrom(ggplotify,as.ggplot) importFrom(httr,GET) importFrom(httr,POST) importFrom(httr,add_headers) importFrom(httr,content) importFrom(jsonlite,toJSON) -importFrom(tools,file_ext) +importFrom(listenv,listenv) +importFrom(magick,image_read) +importFrom(magrittr,"%>%") diff --git a/R/src/R/available.models.R b/R/src/R/available.models.R new file mode 100644 index 0000000..e21b249 --- /dev/null +++ b/R/src/R/available.models.R @@ -0,0 +1,33 @@ +#' Get Supported Generative AI Models +#' +#' This function sends a request to GenAI database API to retrieve information +#' about available generative AI models. +#' +#' @return If successful, the function returns a list containing generative AI +#' service providers and their corresponding models. If the function encounters an error, +#' it will halt execution and provide an error message. +#' +#' @details +#' The function utilizes the GenAI database API to fetch the latest information about +#' available Generative AI models. The retrieved data includes details about different models +#' offered by various service providers. +#' +#' @seealso +#' \href{https://genai.gd.edu.kg/r/documentation/}{GenAI - R Package "GenAI" Documentation} +#' +#' \href{https://colab.research.google.com/github/GitData-GA/GenAI/blob/gh-pages/r/example/available_models.ipynb}{Live Demo in Colab} +#' +#' @examples +#' \dontrun{ +#' # Assuming there is a GenAI object named 'genai.model' supporting this +#' # function, please refer to the "Live Demo in Colab" above for real +#' # examples. The following examples are just some basic guidelines. +#' +#' all.models = available.models() %>% print() +#' } +#' +#' @export +available.models = function() { + json.data = jsonlite::fromJSON("https://genai.gd.edu.kg/model.json") + return (json.data) +} diff --git a/R/src/R/chat.R b/R/src/R/chat.R new file mode 100644 index 0000000..25a7d68 --- /dev/null +++ b/R/src/R/chat.R @@ -0,0 +1,211 @@ +#' Chat Generation with Text as the Input +#' +#' This function establishes a connection to a generative AI model through a generative AI object. +#' It generates a chat response based on the provided prompt and stores it in the chat history along +#' with the generative AI object. +#' +#' @param genai.object A generative AI object containing necessary and correct information. +#' @param prompt A character string representing the query for chat generation. +#' @param verbose Optional. Default to \code{FALSE}. A boolean value determining whether or not to print +#' out the details of the chat request. +#' @param config Optional. Default to \code{list()}. A list of configuration parameters for chat generation. +#' +#' @return If successful, the most recent chat response will be returned. If the API response indicates +#' an error, the function halts execution and provides an error message. +#' +#' @details Providing accurate and valid information for each argument is crucial for successful chat +#' generation by the generative AI model. If any parameter is incorrect, the function responds with an +#' error message based on the API feedback. To view all supported generative AI models, use the +#' function \code{\link{available.models}}. +#' +#' In addition, this function modifies the chat history along with the generative AI object directly, +#' meaning the chat history is mutable. You can print out the chat history using the +#' function \code{\link{chat.history.print}} or simply use \code{verbose = TRUE} in this function. If you +#' want to edit a message, use the function \code{\link{chat.edit}}. To reset the chat history along with +#' the generative AI object, use the function \code{\link{chat.history.reset}}. +#' +#' For \strong{Google Generative AI} models, available configurations are as follows. For more detail, +#' please refer +#' to \code{https://ai.google.dev/api/rest/v1/HarmCategory}, +#' \code{https://ai.google.dev/api/rest/v1/SafetySetting}, and +#' \code{https://ai.google.dev/api/rest/v1/GenerationConfig}. +#' +#' \itemize{ +#' \item \code{harm.category.dangerous.content} +#' +#' Optional. An integer, from 1 to 5 inclusive, representing the threshold for dangerous content, +#' with a higher value representing a lower probability of being blocked. +#' +#' \item \code{harm.category.harassment} +#' +#' Optional. An integer, from 1 to 5 inclusive, representing the threshold for harasment content, +#' with a higher value representing a lower probability of being blocked. +#' +#' \item \code{harm.category.hate.speech} +#' +#' Optional. An integer, from 1 to 5 inclusive, representing the threshold for hate speech and +#' content, with a higher value representing a lower probability of being blocked. +#' +#' \item \code{harm.category.sexually.explicit} +#' +#' Optional. An integer, from 1 to 5 inclusive, representing the threshold for sexually explicit +#' content, with a higher value representing a lower probability of being blocked. +#' +#' \item \code{stop.sequences} +#' +#' Optional. A list of character sequences (up to 5) that will stop output generation. If specified, +#' the API will stop at the first appearance of a stop sequence. The stop sequence will not be +#' included as part of the response. +#' +#' \item \code{max.output.tokens} +#' +#' Optional. An integer, value varies by model, representing maximum number of tokens to include +#' in a candidate. +#' +#' \item \code{temperature} +#' +#' Optional. A number, from 0.0 to 1.0 inclusive, controlling the randomness of the output. +#' +#' \item \code{top.p} +#' +#' Optional. A number, value varies by model, representing maximum cumulative probability of tokens +#' to consider when sampling. +#' +#' \item \code{top.k} +#' +#' Optional. A number, value varies by model, representing maximum number of tokens to consider when sampling. +#' } +#' +#' For \strong{Moonshot AI} models, available configurations are as follows. For more detail, please refer to +#' \code{https://platform.moonshot.cn/api.html#chat-completion}. +#' +#' \itemize{ +#' \item \code{max.tokens} +#' +#' Optional. An integer. The maximum number of tokens that will be generated when the chat completes. +#' If the chat is not finished by the maximum number of tokens generated, the finish reason will be +#' "length", otherwise it will be "stop". +#' +#' \item \code{temperature} +#' +#' Optional. A number. What sampling temperature to use, between 0 and 1. Higher values (e.g. 0.7) will +#' make the output more random, while lower values (e.g. 0.2) will make it more focused and deterministic. +#' +#' \item \code{top.p} +#' +#' Optional. A number. Another sampling temperature. +#' } +#' +#' For \strong{OpenAI} models, available configurations are as follows. For more detail, please refer to +#' \code{https://platform.openai.com/docs/api-reference/chat/create}. +#' +#' \itemize{ +#' \item \code{frequency.penalty} +#' +#' Optional. A number from -2.0 to 2.0 inclusive. Positive values penalize new tokens based on their +#' existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim. +#' +#' \item \code{logit.bias} +#' +#' Optional. A map. Modify the likelihood of specified tokens appearing in the completion. Accepts a JSON object +#' that maps tokens (specified by their token ID in the tokenizer) to an associated bias value from -100 to +#' 100. Mathematically, the bias is added to the logits generated by the model prior to sampling. The exact +#' effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection; +#' values like -100 or 100 should result in a ban or exclusive selection of the relevant token. +#' +#' \item \code{logprobs} +#' +#' Optional. A boolean value. Whether to return log probabilities of the output tokens or not. If true, returns the log +#' probabilities of each output token returned in the content of message +#' +#' \item \code{top.logprobs} +#' +#' Optional. An integer between 0 and 5 specifying the number of most likely tokens to return at each token +#' position, each with an associated log probability. \code{logprobs} must be set to \code{TRUE} if this +#' parameter is used. +#' +#' \item \code{max.tokens} +#' +#' Optional. An integer. The maximum number of tokens that can be generated in the chat completion. The total length of +#' input tokens and generated tokens is limited by the model's context length. +#' +#' \item \code{presence.penalty} +#' +#' Optional. A Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear +#' in the text so far, increasing the model's likelihood to talk about new topics. +#' +#' \item \code{response.format} +#' +#' Optional. An object specifying the format that the model must output. Compatible with GPT-4 Turbo and +#' all GPT-3.5 Turbo models newer than \code{gpt-3.5-turbo-1106}. +#' +#' \item \code{seed} +#' +#' Optional. An integer. If specified, our system will make a best effort to sample deterministically, such that repeated +#' requests with the same seed and parameters should return the same result. +#' +#' \item \code{stop} +#' +#' Optional. A character string or list contains up to 4 sequences where the API will stop generating further tokens. +#' +#' \item \code{temperature} +#' +#' Optional. A number. What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output +#' more random, while lower values like 0.2 will make it more focused and deterministic. +#' +#' \item \code{top.p} +#' +#' Optional. A number. An alternative to sampling with temperature, called nucleus sampling, where the model considers +#' the results of the tokens with \code{top.p} probability mass. So 0.1 means only the tokens comprising the top +#' 10% probability mass are considered. +#' +#' \item \code{tools} +#' +#' Optional. A list of tools the model may call. Currently, only functions are supported as a tool. Use this +#' to provide a list of functions the model may generate JSON inputs for. +#' +#' \item \code{tool.choice} +#' +#' Optional. A character string or object. Controls which (if any) function is called by the model. \code{none} means +#' the model will not call a function and instead generates a message. \code{auto} means the model can pick +#' between generating a message or calling a function. +#' +#' \item \code{user} +#' +#' Optional. A character string. A unique identifier representing your end-user, which can help OpenAI to monitor +#' and detect abuse. +#' } +#' +#' @seealso +#' \href{https://genai.gd.edu.kg/r/documentation/}{GenAI - R Package "GenAI" Documentation} +#' +#' \href{https://colab.research.google.com/github/GitData-GA/GenAI/blob/gh-pages/r/example/chat.ipynb}{Live Demo in Colab} +#' +#' @examples +#' \dontrun{ +#' # Assuming there is a GenAI object named 'genai.model' supporting this +#' # function, please refer to the "Live Demo in Colab" above for real +#' # examples. The following examples are just some basic guidelines. +#' +#' # Method 1 (recommended): use the pipe operator "%>%" +#' genai.model %>% +#' chat(prompt = "Write a story about Mars in 50 words.") %>% +#' cat() +#' +#' # Method 2: use the reference operator "$" +#' cat(genai.model$chat(prompt = "Write a story about Jupiter in 50 words.")) +#' +#' # Method 3: use the function chat() directly +#' cat(chat(genai.object = genai.model, +#' prompt = "Summarize the chat.")) +#' } +#' +#' @export +chat = function(genai.object, + prompt, + verbose = FALSE, + config = list()) { + genai.object$chat(prompt, + verbose, + config) +} diff --git a/R/src/R/chat.edit.R b/R/src/R/chat.edit.R new file mode 100644 index 0000000..a92dda2 --- /dev/null +++ b/R/src/R/chat.edit.R @@ -0,0 +1,218 @@ +#' Chat Edit with New Text as the Input +#' +#' This function establishes a connection to a generative AI model through a generative AI object. +#' It generates a chat response based on the new prompt and stores it in the chat history along +#' with the generative AI object. +#' +#' @param genai.object A generative AI object containing necessary and correct information. +#' @param prompt A character string representing the query for chat generation. +#' @param message.to.edit An integer representing the index of the message to be edited. +#' @param verbose Optional. Default to \code{FALSE}. A boolean value determining whether or not to print +#' out the details of the chat request. +#' @param config Optional. Default to \code{list()}. A list of configuration parameters for chat generation. +#' +#' @return If successful, the most recent chat response will be returned. If the API response indicates +#' an error, the function halts execution and provides an error message. +#' +#' @details Providing accurate and valid information for each argument is crucial for successful chat +#' generation by the generative AI model. If any parameter is incorrect, the function responds with an +#' error message based on the API feedback. To view all supported generative AI models, use the +#' function \code{\link{available.models}}. +#' +#' In addition, this function modifies the chat history along with the generative AI object directly, +#' meaning the chat history is mutable. You can print out the chat history using the +#' function \code{\link{chat.history.print}} or simply use \code{verbose = TRUE} in this function. To reset the chat history along with +#' the chat history along with the generative AI object, use the function \code{\link{chat.history.reset}}. +#' +#' For \strong{Google Generative AI} models, available configurations are as follows. For more detail, +#' please refer +#' to \code{https://ai.google.dev/api/rest/v1/HarmCategory}, +#' \code{https://ai.google.dev/api/rest/v1/SafetySetting}, and +#' \code{https://ai.google.dev/api/rest/v1/GenerationConfig}. +#' +#' \itemize{ +#' \item \code{harm.category.dangerous.content} +#' +#' Optional. An integer, from 1 to 5 inclusive, representing the threshold for dangerous content, +#' with a higher value representing a lower probability of being blocked. +#' +#' \item \code{harm.category.harassment} +#' +#' Optional. An integer, from 1 to 5 inclusive, representing the threshold for harasment content, +#' with a higher value representing a lower probability of being blocked. +#' +#' \item \code{harm.category.hate.speech} +#' +#' Optional. An integer, from 1 to 5 inclusive, representing the threshold for hate speech and +#' content, with a higher value representing a lower probability of being blocked. +#' +#' \item \code{harm.category.sexually.explicit} +#' +#' Optional. An integer, from 1 to 5 inclusive, representing the threshold for sexually explicit +#' content, with a higher value representing a lower probability of being blocked. +#' +#' \item \code{stop.sequences} +#' +#' Optional. A list of character sequences (up to 5) that will stop output generation. If specified, +#' the API will stop at the first appearance of a stop sequence. The stop sequence will not be +#' included as part of the response. +#' +#' \item \code{max.output.tokens} +#' +#' Optional. An integer, value varies by model, representing maximum number of tokens to include +#' in a candidate. +#' +#' \item \code{temperature} +#' +#' Optional. A number, from 0.0 to 1.0 inclusive, controlling the randomness of the output. +#' +#' \item \code{top.p} +#' +#' Optional. A number, value varies by model, representing maximum cumulative probability of tokens +#' to consider when sampling. +#' +#' \item \code{top.k} +#' +#' Optional. A number, value varies by model, representing maximum number of tokens to consider when sampling. +#' } +#' +#' For \strong{Moonshot AI} models, available configurations are as follows. For more detail, please refer to +#' \code{https://platform.moonshot.cn/api.html#chat-completion}. +#' +#' \itemize{ +#' \item \code{max.tokens} +#' +#' Optional. An integer. The maximum number of tokens that will be generated when the chat completes. +#' If the chat is not finished by the maximum number of tokens generated, the finish reason will be +#' "length", otherwise it will be "stop". +#' +#' \item \code{temperature} +#' +#' Optional. A number. What sampling temperature to use, between 0 and 1. Higher values (e.g. 0.7) will +#' make the output more random, while lower values (e.g. 0.2) will make it more focused and deterministic. +#' +#' \item \code{top.p} +#' +#' Optional. A number. Another sampling temperature. +#' } +#' +#' For \strong{OpenAI} models, available configurations are as follows. For more detail, please refer to +#' \code{https://platform.openai.com/docs/api-reference/chat/create}. +#' +#' \itemize{ +#' \item \code{frequency.penalty} +#' +#' Optional. A number from -2.0 to 2.0 inclusive. Positive values penalize new tokens based on their +#' existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim. +#' +#' \item \code{logit.bias} +#' +#' Optional. A map. Modify the likelihood of specified tokens appearing in the completion. Accepts a JSON object +#' that maps tokens (specified by their token ID in the tokenizer) to an associated bias value from -100 to +#' 100. Mathematically, the bias is added to the logits generated by the model prior to sampling. The exact +#' effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection; +#' values like -100 or 100 should result in a ban or exclusive selection of the relevant token. +#' +#' \item \code{logprobs} +#' +#' Optional. A boolean value. Whether to return log probabilities of the output tokens or not. If true, returns the log +#' probabilities of each output token returned in the content of message +#' +#' \item \code{top.logprobs} +#' +#' Optional. An integer between 0 and 5 specifying the number of most likely tokens to return at each token +#' position, each with an associated log probability. \code{logprobs} must be set to \code{TRUE} if this +#' parameter is used. +#' +#' \item \code{max.tokens} +#' +#' Optional. An integer. The maximum number of tokens that can be generated in the chat completion. The total length of +#' input tokens and generated tokens is limited by the model's context length. +#' +#' \item \code{presence.penalty} +#' +#' Optional. A Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear +#' in the text so far, increasing the model's likelihood to talk about new topics. +#' +#' \item \code{response.format} +#' +#' Optional. An object specifying the format that the model must output. Compatible with GPT-4 Turbo and +#' all GPT-3.5 Turbo models newer than \code{gpt-3.5-turbo-1106}. +#' +#' \item \code{seed} +#' +#' Optional. An integer. If specified, our system will make a best effort to sample deterministically, such that repeated +#' requests with the same seed and parameters should return the same result. +#' +#' \item \code{stop} +#' +#' Optional. A character string or list contains up to 4 sequences where the API will stop generating further tokens. +#' +#' \item \code{temperature} +#' +#' Optional. A number. What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output +#' more random, while lower values like 0.2 will make it more focused and deterministic. +#' +#' \item \code{top.p} +#' +#' Optional. A number. An alternative to sampling with temperature, called nucleus sampling, where the model considers +#' the results of the tokens with \code{top.p} probability mass. So 0.1 means only the tokens comprising the top +#' 10% probability mass are considered. +#' +#' \item \code{tools} +#' +#' Optional. A list of tools the model may call. Currently, only functions are supported as a tool. Use this +#' to provide a list of functions the model may generate JSON inputs for. +#' +#' \item \code{tool.choice} +#' +#' Optional. A character string or object. Controls which (if any) function is called by the model. \code{none} means +#' the model will not call a function and instead generates a message. \code{auto} means the model can pick +#' between generating a message or calling a function. +#' +#' \item \code{user} +#' +#' Optional. A character string. A unique identifier representing your end-user, which can help OpenAI to monitor +#' and detect abuse. +#' } +#' +#' @seealso +#' \href{https://genai.gd.edu.kg/r/documentation/}{GenAI - R Package "GenAI" Documentation} +#' +#' \href{https://colab.research.google.com/github/GitData-GA/GenAI/blob/gh-pages/r/example/chat_edit.ipynb}{Live Demo in Colab} +#' +#' @examples +#' \dontrun{ +#' # Assuming there is a GenAI object named 'genai.model' supporting this +#' # function, please refer to the "Live Demo in Colab" above for real +#' # examples. The following examples are just some basic guidelines. +#' +#' # Method 1 (recommended): use the pipe operator "%>%" +#' genai.model %>% +#' chat.edit(prompt = "What is XGBoost?", +#' message.to.edit = 5, +#' verbose = TRUE, +#' config = parameters) %>% +#' cat() +#' +#' # Method 2: use the reference operator "$" +#' cat(genai.model$chat.edit(prompt = "What is CatBoost?", +#' message.to.edit = 3)) +#' +#' # Method 3: use the function chat.edit() directly +#' cat(chat.edit(genai.object = genai.model, +#' prompt = "What is LightGBM?", +#' message.to.edit = 1)) +#' } +#' +#' @export +chat.edit = function(genai.object, + prompt, + message.to.edit, + verbose = FALSE, + config = list()) { + genai.object$chat.edit(prompt, + message.to.edit, + verbose, + config) +} diff --git a/R/src/R/chat.history.convert.R b/R/src/R/chat.history.convert.R new file mode 100644 index 0000000..8a06aad --- /dev/null +++ b/R/src/R/chat.history.convert.R @@ -0,0 +1,45 @@ +#' Chat History Convert +#' +#' This function converts the chat history along with a generative AI object to a valid format +#' for another generative AI object. +#' +#' @param from.genai.object A source generative AI object containing necessary and correct information. +#' @param to.genai.object A target generative AI object containing necessary and correct information. +#' +#' @return If successful, the converted chat history list will be returned. +#' +#' @details Providing accurate and valid information for each argument is crucial for successful chat +#' generation by the generative AI model. If any parameter is incorrect, the function responds with an +#' error message based on the API feedback. To view all supported generative AI models, use the +#' function \code{\link{available.models}}. Moreover, you can print out the chat history using the +#' function \code{\link{chat.history.print}} or simply use \code{verbose = TRUE} during the chat. +#' +#' @seealso +#' \href{https://genai.gd.edu.kg/r/documentation/}{GenAI - R Package "GenAI" Documentation} +#' +#' \href{https://colab.research.google.com/github/GitData-GA/GenAI/blob/gh-pages/r/example/chat_history_convert.ipynb}{Live Demo in Colab} +#' +#' @examples +#' \dontrun{ +#' # Assuming there are two GenAI objects named 'genai.model' and 'another.genai.model' +#' # supporting this function, please refer to the "Live Demo in Colab" above for +#' # real examples. The following examples are just some basic guidelines. +#' +#' # Method 1 (recommended): use the pipe operator "%>%" +#' converted.history = genai.model %>% +#' chat.history.convert(to.genai.object = another.genai.model) +#' +#' # Method 2: use the reference operator "$" +#' converted.history = genai.model$chat.history.convert(to.genai.object = another.genai.model) +#' +#' # Method 3: use the function chat.history.convert() directly +#' converted.history = chat.history.convert(from.genai.object = genai.model, +#' to.genai.object = another.genai.model) +#' } +#' +#' @export +chat.history.convert = function(from.genai.object, + to.genai.object) { + from.genai.object$chat.history.convert(from.genai.object, + to.genai.object) +} diff --git a/R/src/R/chat.history.export.R b/R/src/R/chat.history.export.R new file mode 100644 index 0000000..4b9f6b0 --- /dev/null +++ b/R/src/R/chat.history.export.R @@ -0,0 +1,39 @@ +#' Chat History Export +#' +#' This function exports the chat history along with a generative AI object as a list. +#' +#' @param genai.object A generative AI object containing necessary and correct information. +#' +#' @return If successful, the chat history list will be returned. +#' +#' @details Providing accurate and valid information for each argument is crucial for successful chat +#' generation by the generative AI model. If any parameter is incorrect, the function responds with an +#' error message based on the API feedback. To view all supported generative AI models, use the +#' function \code{\link{available.models}}. +#' +#' @seealso +#' \href{https://genai.gd.edu.kg/r/documentation/}{GenAI - R Package "GenAI" Documentation} +#' +#' \href{https://colab.research.google.com/github/GitData-GA/GenAI/blob/gh-pages/r/example/chat_history_export.ipynb}{Live Demo in Colab} +#' +#' @examples +#' \dontrun{ +#' # Assuming there is a GenAI object named 'genai.model' supporting this +#' # function, please refer to the "Live Demo in Colab" above for real +#' # examples. The following examples are just some basic guidelines. +#' +#' # Method 1 (recommended): use the pipe operator "%>%" +#' exported.history = genai.model %>% +#' chat.history.export() +#' +#' # Method 2: use the reference operator "$" +#' exported.history = genai.model$chat.history.export() +#' +#' # Method 3: use the function chat.history.export() directly +#' exported.history = chat.history.export(genai.object = genai.model) +#' } +#' +#' @export +chat.history.export = function(genai.object) { + genai.object$chat.history.export() +} diff --git a/R/src/R/chat.history.import.R b/R/src/R/chat.history.import.R new file mode 100644 index 0000000..3f3e1ae --- /dev/null +++ b/R/src/R/chat.history.import.R @@ -0,0 +1,41 @@ +#' Chat History Import +#' +#' This function imports a chat history in list format to a generative AI object. +#' +#' @param genai.object A generative AI object containing necessary and correct information. +#' @param new.chat.history A list containing a chat history in correct format. +#' +#' @details Providing accurate and valid information for each argument is crucial for successful chat +#' generation by the generative AI model. If any parameter is incorrect, the function responds with an +#' error message based on the API feedback. To view all supported generative AI models, use the +#' function \code{\link{available.models}}. +#' +#' @seealso +#' \href{https://genai.gd.edu.kg/r/documentation/}{GenAI - R Package "GenAI" Documentation} +#' +#' \href{https://colab.research.google.com/github/GitData-GA/GenAI/blob/gh-pages/r/example/chat_history_import.ipynb}{Live Demo in Colab} +#' +#' @examples +#' \dontrun{ +#' # Assuming there is a GenAI object named 'genai.model' supporting this +#' # function and a valid chat history list named 'new.history', please +#' # refer to the "Live Demo in Colab" above for real examples. The +#' # following examples are just some basic guidelines. +#' +#' # Method 1 (recommended): use the pipe operator "%>%" +#' genai.model %>% +#' chat.history.import(new.chat.history = new.history) +#' +#' # Method 2: use the reference operator "$" +#' genai.model$chat.history.import(new.chat.history = new.history) +#' +#' # Method 3: use the function chat.history.import() directly +#' chat.history.import(genai.object = genai.model, +#' new.chat.history = new.history) +#' } +#' +#' @export +chat.history.import = function(genai.object, + new.chat.history) { + genai.object$chat.history.import(new.chat.history) +} diff --git a/R/src/R/chat.history.print.R b/R/src/R/chat.history.print.R new file mode 100644 index 0000000..d7df21f --- /dev/null +++ b/R/src/R/chat.history.print.R @@ -0,0 +1,45 @@ +#' Chat History Print +#' +#' This function prints out the chat history along with a generative AI object. +#' +#' @param genai.object A generative AI object containing necessary and correct information. +#' @param from Optional. Default to 1. An integer representing the first message in the chat history that needs +#' to be printed. +#' @param to Optional. Default to \code{NULL}, prints until the last message in the chat history. An integer +#' representing the last message in the chat history that needs to be printed. +#' +#' @details Providing accurate and valid information for each argument is crucial for successful chat +#' generation by the generative AI model. If any parameter is incorrect, the function responds with an +#' error message based on the API feedback. To view all supported generative AI models, use the +#' function \code{\link{available.models}}. +#' +#' @seealso +#' \href{https://genai.gd.edu.kg/r/documentation/}{GenAI - R Package "GenAI" Documentation} +#' +#' \href{https://colab.research.google.com/github/GitData-GA/GenAI/blob/gh-pages/r/example/chat_history_print.ipynb}{Live Demo in Colab} +#' +#' @examples +#' \dontrun{ +#' # Assuming there is a GenAI object named 'genai.model' supporting this +#' # function, please refer to the "Live Demo in Colab" above for real +#' # examples. The following examples are just some basic guidelines. +#' +#' # Method 1 (recommended): use the pipe operator "%>%" +#' genai.model %>% +#' chat.history.print() +#' +#' # Method 2: use the reference operator "$" +#' genai.model$chat.history.print(from = 3) +#' +#' # Method 3: use the function chat.history.print() directly +#' chat.history.print(genai.object = genai.model, +#' from = 3, +#' to = 5) +#' } +#' +#' @export +chat.history.print = function(genai.object, + from = 1, + to = NULL) { + genai.object$chat.history.print(from, to) +} diff --git a/R/src/R/chat.history.reset.R b/R/src/R/chat.history.reset.R new file mode 100644 index 0000000..82f5d8d --- /dev/null +++ b/R/src/R/chat.history.reset.R @@ -0,0 +1,37 @@ +#' Chat History Reset +#' +#' This function resets the chat history along with a generative AI object. +#' +#' @param genai.object A generative AI object containing necessary and correct information. +#' +#' @details Providing accurate and valid information for each argument is crucial for successful chat +#' generation by the generative AI model. If any parameter is incorrect, the function responds with an +#' error message based on the API feedback. To view all supported generative AI models, use the +#' function \code{\link{available.models}}. +#' +#' @seealso +#' \href{https://genai.gd.edu.kg/r/documentation/}{GenAI - R Package "GenAI" Documentation} +#' +#' \href{https://colab.research.google.com/github/GitData-GA/GenAI/blob/gh-pages/r/example/chat_history_reset.ipynb}{Live Demo in Colab} +#' +#' @examples +#' \dontrun{ +#' # Assuming there is a GenAI object named 'genai.model' supporting this +#' # function, please refer to the "Live Demo in Colab" above for real +#' # examples. The following examples are just some basic guidelines. +#' +#' # Method 1 (recommended): use the pipe operator "%>%" +#' genai.model %>% +#' chat.history.reset() +#' +#' # Method 2: use the reference operator "$" +#' genai.model$chat.history.reset() +#' +#' # Method 3: use the function chat.history.reset() directly +#' chat.history.reset(genai.object = genai.model) +#' } +#' +#' @export +chat.history.reset = function(genai.object) { + genai.object$chat.history.reset() +} diff --git a/R/src/R/chat.history.save.R b/R/src/R/chat.history.save.R new file mode 100644 index 0000000..b025869 --- /dev/null +++ b/R/src/R/chat.history.save.R @@ -0,0 +1,42 @@ +#' Chat History Save +#' +#' This function saves a chat history along with a generative AI object as a JSON file. +#' +#' @param genai.object A generative AI object containing necessary and correct information. +#' @param file.name A character string representing the name of the JSON file for the chat history. +#' +#' @return If successful, the chat history will be saved as a JSON file in your current or specified +#' directory. +#' +#' @details Providing accurate and valid information for each argument is crucial for successful chat +#' generation by the generative AI model. If any parameter is incorrect, the function responds with an +#' error message based on the API feedback. To view all supported generative AI models, use the +#' function \code{\link{available.models}}. +#' +#' @seealso +#' \href{https://genai.gd.edu.kg/r/documentation/}{GenAI - R Package "GenAI" Documentation} +#' +#' \href{https://colab.research.google.com/github/GitData-GA/GenAI/blob/gh-pages/r/example/chat_history_save.ipynb}{Live Demo in Colab} +#' +#' @examples +#' \dontrun{ +#' # Assuming there is a GenAI object named 'genai.model' supporting this +#' # function, please refer to the "Live Demo in Colab" above for real +#' # examples. The following examples are just some basic guidelines. +#' +#' # Method 1 (recommended): use the pipe operator "%>%" +#' genai.model %>% +#' chat.history.save(file.name = "saved_history") +#' +#' # Method 2: use the reference operator "$" +#' genai.model$chat.history.save(file.name = "saved_history") +#' +#' # Method 3: use the function chat.history.save() directly +#' chat.history.save(genai.object = genai.model, +#' file.name = "saved_history") +#' } +#' +#' @export +chat.history.save = function(genai.object, file.name) { + genai.object$chat.history.save(file.name) +} diff --git a/R/src/R/genai.google.R b/R/src/R/genai.google.R new file mode 100644 index 0000000..3a7a707 --- /dev/null +++ b/R/src/R/genai.google.R @@ -0,0 +1,63 @@ +#' Google Generative AI Object Creation +#' +#' This function establishes a connection to a Google generative AI model by providing essential +#' parameters. +#' +#' @param api A character string representing the API key required for accessing the model. +#' @param model A character string representing the specific model. +#' @param version A character string representing the version of the chosen model. +#' @param proxy Optional. Default to \code{FALSE}. A boolean value indicating whether to use a +#' proxy for accessing the API URL. If your local internet cannot access the API, set this +#' parameter to \code{TRUE}. +#' +#' @return If successful, the function returns a Google generative AI object. If the API response +#' indicates an error, the function halts execution and provides an error message. +#' +#' @details Providing accurate and valid information for each argument is crucial for successful text +#' generation by the generative AI model. If any parameter is incorrect, the function responds with an +#' error message based on the API feedback. To view all supported generative AI models, use the +#' function \code{\link{available.models}}. +#' +#' Please refer to \code{https://ai.google.dev/tutorials/setup} for the API key. +#' +#' The API proxy service is designed to address the needs of users who hold a valid API key but find +#' themselves outside their home countries or regions due to reasons such as travel, work, or study +#' in locations that may not be covered by certain Generative AI service providers. +#' +#' Please be aware that although GenAI and its affiliated organization - GitData - do not gather user +#' information through this service, the server providers for GenAI API proxy service and the Generative +#' AI service providers may engage in such data collection. Furthermore, the proxy service cannot +#' guarantee a consistent connection speed. Users are strongly encouraged to utilize this service +#' with caution and at their own discretion. +#' +#' @seealso +#' \href{https://genai.gd.edu.kg/r/documentation/}{GenAI - R Package "GenAI" Documentation} +#' +#' \href{https://genai.gd.edu.kg/api/}{GenAI - Generative Artificial Intelligence API Proxy Service} +#' +#' \href{https://colab.research.google.com/github/GitData-GA/GenAI/blob/gh-pages/r/example/genai_google.ipynb}{Live Demo in Colab} +#' +#' @examples +#' \dontrun{ +#' # Please change YOUR_GOOGLE_API to your own API key of Google Generative AI +#' Sys.setenv(GOOGLE_API = "YOUR_GOOGLE_API") +#' +#' all.models = available.models() %>% print() +#' +#' # Create a Google Generative AI object +#' google = genai.google(api = Sys.getenv("GOOGLE_API"), +#' model = all.models$google$model[1], +#' version = all.models$google$version[1], +#' proxy = FALSE) +#' } +#' +#' @export +genai.google = function(api, + model, + version, + proxy = FALSE) { + return (genai.google.class$new(api, + model, + version, + proxy)) +} diff --git a/R/src/R/genai.google.chat.R b/R/src/R/genai.google.chat.R new file mode 100644 index 0000000..7b584d5 --- /dev/null +++ b/R/src/R/genai.google.chat.R @@ -0,0 +1,104 @@ +#' @noRd +genai.google.chat = function(genai.google.object, + prompt, + verbose, + config = list( + harm.category.dangerous.content = NULL, + harm.category.harassment = NULL, + harm.category.hate.speech = NULL, + harm.category.sexually.explicit = NULL, + stop.sequences = NULL, + max.output.tokens = NULL, + temperature = NULL, + top.p = NULL, + top.k = NULL + )) { + # Check configurations + genai.google.config.check(config) + + # Get api url + api.url = paste0( + "https://generativelanguage.googleapis.com/", + genai.google.object$version, + "/models/", + genai.google.object$model, + ":generateContent?key=", + genai.google.object$api + ) + if (genai.google.object$proxy) { + api.url = paste0( + "https://api.genai.gd.edu.kg/google/", + genai.google.object$version, + "/models/", + genai.google.object$model, + ":generateContent?key=", + genai.google.object$api + ) + } + + # Initialize the request body + requestNewContent = list(list(role = "user", + parts = list(text = prompt))) + requestBody = as.list(genai.google.object$chat.history) + requestBody$contents = append(requestBody$contents, requestNewContent) + + # Get the safety settings + safety.setting = genai.google.safety.setting(config) + if (length(safety.setting) > 0) { + requestBody$safetySettings = safety.setting + } + + # Get the generation configuration + generation.config = genai.google.generation.config(config) + if (length(generation.config) > 0) { + requestBody$generationConfig = generation.config + } + + # Convert the request as JSON format + requestBodyJSON = jsonlite::toJSON(requestBody, + auto_unbox = TRUE, + pretty = TRUE) + + # Send request and get response + response = httr::POST( + url = api.url, + body = requestBodyJSON, + httr::add_headers("Content-Type" = "application/json") + ) + responseJSON = httr::content(response, "parsed") + + # Check for harmful prompt + if (!is.null(responseJSON$promptFeedback$blockReason)) { + stop("Invalid prompt. The prompt may contain harmful content.") + } + + # Check for response error + if (!is.null(responseJSON$error)) { + stop(responseJSON$error$message) + } + + # Save the most recent prompt to the chat history + genai.google.object$chat.history$contents = append(genai.google.object$chat.history$contents, + requestNewContent) + + # Save the most recent model response to the chat history + respondContent = list(list( + role = "model", + parts = list(text = responseJSON$candidates[[1]]$content$parts[[1]]$text) + )) + genai.google.object$chat.history$contents = append(genai.google.object$chat.history$contents, + respondContent) + + # Print detail if verbose is TRUE + if (verbose) { + genai.google.formated.confguration(requestBody, prompt) + cat("=============================================================================\n") + cat(" Chat history \n") + cat("-----------------------------------------------------------------------------\n\n") + genai.google.chat.history.print(genai.google.object, from = 1, to = NULL) + cat("=============================================================================\n\n\n\n") + } + + # Get the response text + return (responseJSON$candidates[[1]]$content$parts[[1]]$text) +} diff --git a/R/src/R/genai.google.chat.edit.R b/R/src/R/genai.google.chat.edit.R new file mode 100644 index 0000000..b615c65 --- /dev/null +++ b/R/src/R/genai.google.chat.edit.R @@ -0,0 +1,126 @@ +#' @noRd +genai.google.chat.edit = function(genai.google.object, + prompt, + message.to.edit, + verbose, + config = list( + harm.category.dangerous.content = NULL, + harm.category.harassment = NULL, + harm.category.hate.speech = NULL, + harm.category.sexually.explicit = NULL, + stop.sequences = NULL, + max.output.tokens = NULL, + temperature = NULL, + top.p = NULL, + top.k = NULL + )) { + # Check if there are messages in the chat history + if (length(genai.google.object$chat.history$contents) == 0) { + stop("Invalid chat.history. The chat history is empty.") + } + + # Check message.to.edit with chat.history length + if (message.to.edit > length(genai.google.object$chat.history$contents) || + message.to.edit < 1) { + stop( + "Invalid value for message.to.edit. You can only edit existing messages. Please use 'chat.history.print()' to review the formatted chat history." + ) + } + + # Check message.to.edit (must be an odd number) + if (message.to.edit %% 2 == 0) { + stop( + "Invalid value for message.to.edit. You can only edit messages sent by a user role. Please use 'chat.history.print()' to review the formatted chat history." + ) + } + + # Check configurations + genai.google.config.check(config) + + # Get api url + api.url = paste0( + "https://generativelanguage.googleapis.com/", + genai.google.object$version, + "/models/", + genai.google.object$model, + ":generateContent?key=", + genai.google.object$api + ) + if (genai.google.object$proxy) { + api.url = paste0( + "https://api.genai.gd.edu.kg/google/", + genai.google.object$version, + "/models/", + genai.google.object$model, + ":generateContent?key=", + genai.google.object$api + ) + } + + # Initialize the request body + requestNewContent = list(list(role = "user", + parts = list(text = prompt))) + requestBody = as.list(genai.google.object$chat.history) + requestBody$contents = append(requestBody$contents[1:message.to.edit - 1], + requestNewContent) + + # Get the safety settings + safety.setting = genai.google.safety.setting(config) + if (length(safety.setting) > 0) { + requestBody$safetySettings = safety.setting + } + + # Get the generation configuration + generation.config = genai.google.generation.config(config) + if (length(generation.config) > 0) { + requestBody$generationConfig = generation.config + } + + # Convert the request as JSON format + requestBodyJSON = jsonlite::toJSON(requestBody, + auto_unbox = TRUE, + pretty = TRUE) + + # Send request and get response + response = httr::POST( + url = api.url, + body = requestBodyJSON, + httr::add_headers("Content-Type" = "application/json") + ) + responseJSON = httr::content(response, "parsed") + + # Check for harmful prompt + if (!is.null(responseJSON$promptFeedback$blockReason)) { + stop("Invalid prompt. The prompt may contain harmful content.") + } + + # Check for response error + if (!is.null(responseJSON$error)) { + stop(responseJSON$error$message) + } + + # Save the most recent prompt to the chat history + genai.google.object$chat.history$contents = append(genai.google.object$chat.history$contents[1:message.to.edit - 1], + requestNewContent) + + # Save the most recent model response to the chat history + respondContent = list(list( + role = "model", + parts = list(text = responseJSON$candidates[[1]]$content$parts[[1]]$text) + )) + genai.google.object$chat.history$contents = append(genai.google.object$chat.history$contents, + respondContent) + + # Print detail if verbose is TRUE + if (verbose) { + genai.google.formated.confguration(requestBody, prompt) + cat("=============================================================================\n") + cat(" Chat history \n") + cat("-----------------------------------------------------------------------------\n\n") + genai.google.chat.history.print(genai.google.object, from = 1, to = NULL) + cat("=============================================================================\n\n\n\n") + } + + # Get the response text + return (responseJSON$candidates[[1]]$content$parts[[1]]$text) +} diff --git a/R/src/R/genai.google.chat.history.convert.R b/R/src/R/genai.google.chat.history.convert.R new file mode 100644 index 0000000..c8605a2 --- /dev/null +++ b/R/src/R/genai.google.chat.history.convert.R @@ -0,0 +1,30 @@ +#' @noRd +genai.google.chat.history.convert = function(from.genai.google.object, + to.genai.object) { + if (class(to.genai.object)[1] == "genai.openai") { + system.message = list(role = "system", content = "You are a helpful assistant.") + messages = lapply(from.genai.google.object$chat.history$contents, function(entry) { + list( + role = ifelse(entry$role == "model", "assistant", "user"), + content = entry$parts$text + ) + }) + openai.history = c(list(system.message), messages) + return(openai.history) + } + else if (class(to.genai.object)[1] == "genai.moonshot") { + system.message = list(role = "system", content = "You are Kimi, an Artificial Intelligence Assistant powered by Moonshot AI, and you are better at conversations in Chinese and English. You will provide users with safe, helpful and accurate answers. At the same time, you will reject answers to questions about terrorism, racism, pornography, etc. Moonshot AI is a proper noun and cannot be translated into other languages.") + messages = lapply(from.genai.google.object$chat.history$contents, function(entry) { + list( + role = ifelse(entry$role == "model", "assistant", "user"), + content = entry$parts$text + ) + }) + moonshot.history = c(list(system.message), messages) + return(moonshot.history) + } + else { + stop("Invalid value for to.genai.object.") + } +} + diff --git a/R/src/R/genai.google.chat.history.export.R b/R/src/R/genai.google.chat.history.export.R new file mode 100644 index 0000000..33c27d1 --- /dev/null +++ b/R/src/R/genai.google.chat.history.export.R @@ -0,0 +1,4 @@ +#' @noRd +genai.google.chat.history.export = function(genai.google.object) { + return (genai.google.object$chat.history$contents) +} diff --git a/R/src/R/genai.google.chat.history.import.R b/R/src/R/genai.google.chat.history.import.R new file mode 100644 index 0000000..2f952dd --- /dev/null +++ b/R/src/R/genai.google.chat.history.import.R @@ -0,0 +1,26 @@ +#' @noRd +genai.google.chat.history.import = function(genai.google.object, + new.chat.history) { + # Imported chat history is a list + if (is.list(new.chat.history)) { + expected.format = list( + role = NA, + parts = list( + text = NA + ) + ) + for (message in new.chat.history) { + if (!identical(names(message), names(expected.format)) || + !is.character(message$role) || + !is.list(message$parts) || + length(message$parts) != 1 || + !is.character(message$parts$text)) { + stop("Invalid value for new.chat.history. Please make sure the format of the imported chat history is correct.") + } + } + genai.google.object$chat.history$contents = new.chat.history + } + else { + stop("Invalid new.chat.history. Please make sure it is a list.") + } +} diff --git a/R/src/R/genai.google.chat.history.print.R b/R/src/R/genai.google.chat.history.print.R new file mode 100644 index 0000000..c8c7e22 --- /dev/null +++ b/R/src/R/genai.google.chat.history.print.R @@ -0,0 +1,41 @@ +#' @noRd +genai.google.chat.history.print = function(genai.google.object, + from, + to) { + if (!is.numeric(from) || from < 1) { + stop("Invalid value for from. It should be an integer greater than or equal to 1.") + } + + if (is.numeric(to) && to < from) { + stop("Invalid value for to. It should be an integer greater than or equal to from") + } + + chat.length = length(genai.google.object$chat.history$contents) + + if (is.numeric(to) && to > chat.length) { + stop("Invalid value for to. It should be an integer less than or equal to ", chat.length, ".") + } + + if (is.numeric(to)) { + chat.length = to + } + + if (chat.length > 0) { + for (i in from:chat.length) { + cat( + sprintf( + "-------------------------------- Message %2d ---------------------------------\n", + i + ) + ) + cat("Role:", + genai.google.object$chat.history$contents[[i]]$role, + "\n") + cat("Text: ") + cat(paste(strwrap(genai.google.object$chat.history$contents[[i]]$parts$text, + width = 76, exdent = 0), collapse = "\n")) + cat("\n\n") + } + } +} + diff --git a/R/src/R/genai.google.chat.history.reset.R b/R/src/R/genai.google.chat.history.reset.R new file mode 100644 index 0000000..33dd8c4 --- /dev/null +++ b/R/src/R/genai.google.chat.history.reset.R @@ -0,0 +1,4 @@ +#' @noRd +genai.google.chat.history.reset = function(genai.google.object) { + genai.google.object$chat.history$contents = list() +} diff --git a/R/src/R/genai.google.chat.history.save.R b/R/src/R/genai.google.chat.history.save.R new file mode 100644 index 0000000..d7bb631 --- /dev/null +++ b/R/src/R/genai.google.chat.history.save.R @@ -0,0 +1,6 @@ +#' @noRd +genai.google.chat.history.save = function(genai.google.object, + file.name) { + write(jsonlite::toJSON(genai.google.object$chat.history$contents, pretty = TRUE), + paste0(file.name, ".json")) +} diff --git a/R/src/R/genai.google.class.R b/R/src/R/genai.google.class.R new file mode 100644 index 0000000..0040dbf --- /dev/null +++ b/R/src/R/genai.google.class.R @@ -0,0 +1,85 @@ +#' @noRd +genai.google.class = R6Class( + classname = "genai.google", + public = list( + # Initialize method + initialize = function(api, model, version, proxy = FALSE) { + genai.google.check(api, model, version, proxy) + private$api = api + private$model = model + private$version = version + private$proxy = proxy + }, + # Chat generation + chat = function(prompt, + verbose = FALSE, + config = list()) { + genai.google.chat(private, + prompt, + verbose, + config) + }, + # Chat edit + chat.edit = function(prompt, + message.to.edit, + verbose = FALSE, + config = list()) { + genai.google.chat.edit(private, + prompt, + message.to.edit, + verbose, + config) + }, + # Convert chat history + chat.history.convert = function(from.genai.object, to.genai.object) { + genai.google.chat.history.convert(private, to.genai.object) + }, + # Export chat history + chat.history.export = function() { + genai.google.chat.history.export(private) + }, + # Import chat history + chat.history.import = function(new.chat.history) { + genai.google.chat.history.import(private, new.chat.history) + }, + # Print chat history + chat.history.print = function(from = 1, to = NULL) { + genai.google.chat.history.print(private, from, to) + }, + # Reset chat history + chat.history.reset = function() { + genai.google.chat.history.reset(private) + }, + # Save chat history + chat.history.save = function(file.name) { + genai.google.chat.history.save(private, file.name) + }, + # Text generation + txt = function(prompt, + verbose = FALSE, + config = list()) { + genai.google.txt(private, + prompt, + verbose, + config) + }, + # Text generation with image as input + txt.image = function(prompt, + image.path, + verbose = FALSE, + config = list()) { + genai.google.txt.image(private, + prompt, + image.path, + verbose, + config) + } + ), + private = list( + api = NULL, + model = NULL, + version = NULL, + proxy = FALSE, + chat.history = listenv::listenv(contents = list()) + ) +) diff --git a/R/src/R/genai.google.txt.R b/R/src/R/genai.google.txt.R new file mode 100644 index 0000000..5a7eebc --- /dev/null +++ b/R/src/R/genai.google.txt.R @@ -0,0 +1,85 @@ +#' @noRd +genai.google.txt = function(genai.google.object, + prompt, + verbose, + config = list( + harm.category.dangerous.content = NULL, + harm.category.harassment = NULL, + harm.category.hate.speech = NULL, + harm.category.sexually.explicit = NULL, + stop.sequences = NULL, + max.output.tokens = NULL, + temperature = NULL, + top.p = NULL, + top.k = NULL + )) { + # Check configurations + genai.google.config.check(config) + + # Get api url + api.url = paste0( + "https://generativelanguage.googleapis.com/", + genai.google.object$version, + "/models/", + genai.google.object$model, + ":generateContent?key=", + genai.google.object$api + ) + if (genai.google.object$proxy) { + api.url = paste0( + "https://api.genai.gd.edu.kg/google/", + genai.google.object$version, + "/models/", + genai.google.object$model, + ":generateContent?key=", + genai.google.object$api + ) + } + + # Initialize the request body + requestBody = list(contents = list(parts = list(text = prompt))) + + # Get the safety settings + safety.setting = genai.google.safety.setting(config) + if (length(safety.setting) > 0) { + requestBody$safetySettings = safety.setting + } + + # Get the generation configuration + generation.config = genai.google.generation.config(config) + if (length(generation.config) > 0) { + requestBody$generationConfig = generation.config + } + + # Convert the request as JSON format + requestBodyJSON = jsonlite::toJSON(requestBody, + auto_unbox = TRUE, + pretty = TRUE) + + # Send request and get response + response = httr::POST( + url = api.url, + body = requestBodyJSON, + httr::add_headers("Content-Type" = "application/json") + ) + responseJSON = httr::content(response, "parsed") + + # Check for harmful prompt + if (!is.null(responseJSON$promptFeedback$blockReason)) { + stop("Invalid prompt. The prompt may contain harmful content.") + } + + # Check for response error + if (!is.null(responseJSON$error)) { + stop(responseJSON$error$message) + } + + # Print detail if verbose is TRUE + if (verbose) { + genai.google.formated.confguration(requestBody, prompt) + cat("\n") + } + + # Get the response text + return (responseJSON$candidates[[1]]$content$parts[[1]]$text) +} diff --git a/R/src/R/genai.google.txt.image.R b/R/src/R/genai.google.txt.image.R new file mode 100644 index 0000000..c4b72db --- /dev/null +++ b/R/src/R/genai.google.txt.image.R @@ -0,0 +1,104 @@ +#' @noRd +genai.google.txt.image = function(genai.google.object, + prompt, + image.path, + verbose, + config = list( + harm.category.dangerous.content = NULL, + harm.category.harassment = NULL, + harm.category.hate.speech = NULL, + harm.category.sexually.explicit = NULL, + stop.sequences = NULL, + max.output.tokens = NULL, + temperature = NULL, + top.p = NULL, + top.k = NULL + )) { + # Check configurations + genai.google.config.check(config) + + # Get api url + api.url = paste0( + "https://generativelanguage.googleapis.com/", + genai.google.object$version, + "/models/", + genai.google.object$model, + ":generateContent?key=", + genai.google.object$api + ) + if (genai.google.object$proxy) { + api.url = paste0( + "https://api.genai.gd.edu.kg/google/", + genai.google.object$version, + "/models/", + genai.google.object$model, + ":generateContent?key=", + genai.google.object$api + ) + } + + # Convert image to data uri + img.info = image.to.data.uri(image.path) + if (img.info[1] == "jpg") { + img.info[1] = "jpeg" + } + + # Initialize the request body + requestBody = list(contents = list(parts = list( + list(text = prompt), + list(inline_data = list( + mime_type = paste0("image/", img.info[1]), + data = img.info[2] + )) + ))) + + # Get the safety settings + safety.setting = genai.google.safety.setting(config) + if (length(safety.setting) > 0) { + requestBody$safetySettings = safety.setting + } + + # Get the generation configuration + generation.config = genai.google.generation.config(config) + if (length(generation.config) > 0) { + requestBody$generationConfig = generation.config + } + + # Convert the request as JSON format + requestBodyJSON = jsonlite::toJSON(requestBody, + auto_unbox = TRUE, + pretty = TRUE) + + # Send request and get response + response = httr::POST( + url = api.url, + body = requestBodyJSON, + httr::add_headers("Content-Type" = "application/json") + ) + responseJSON = httr::content(response, "parsed") + + # Check for harmful prompt + if (!is.null(responseJSON$promptFeedback$blockReason)) { + stop("Invalid prompt. The prompt may contain harmful content.") + } + + # Check for response error + if (!is.null(responseJSON$error)) { + stop(responseJSON$error$message) + } + + # Print detail if verbose is TRUE + if (verbose) { + genai.google.formated.confguration(requestBody, prompt) + cat("=============================================================================\n") + cat(" Image Path\n") + cat("-----------------------------------------------------------------------------\n") + cat(paste(strwrap(image.path, width = 76, exdent = 0), collapse = "\n")) + cat("\n") + cat("=============================================================================\n\n\n\n") + cat("\n") + } + + # Get the response text + return (responseJSON$candidates[[1]]$content$parts[[1]]$text) +} diff --git a/R/src/R/genai.google.utils.R b/R/src/R/genai.google.utils.R new file mode 100644 index 0000000..6ea7183 --- /dev/null +++ b/R/src/R/genai.google.utils.R @@ -0,0 +1,202 @@ +#' @noRd +genai.google.check = function(api, model, version, proxy) { + json.data = jsonlite::fromJSON("https://genai.gd.edu.kg/model.json") + if (is.na(match(model, json.data$google$model))) { + stop( + "Invalid value for model. Refer to 'available.models()' to view the supported models." + ) + } + if (is.na(match(version, json.data$google$version))) { + stop( + "Invalid value for version. Refer to 'available.models()' to view the supported versions." + ) + } + if (!proxy %in% c(TRUE, FALSE)) { + stop("Invalid value for proxy. It must be either TRUE or FALSE.") + } + + # Check connection + api.url = paste0( + "https://generativelanguage.googleapis.com/", + version, + "/models/", + model, + "?key=", + api + ) + if (proxy) { + api.url = paste0("https://api.genai.gd.edu.kg/google/", + version, + "/models/", + model, + "?key=", + api) + } + response = httr::GET(url = api.url, + httr::add_headers("Content-Type" = "application/json")) + responseJSON = httr::content(response, "parsed") + if (!is.null(responseJSON$error)) { + stop(responseJSON$error$message) + } + if (response$status_code != 200) { + stop( + "Invalid parameter(s) detected. Please check the values for api, model, version, and proxy." + ) + } +} + +#' @noRd +genai.google.config.check = function(config) { + if (!is.list(config)) { + stop("Invalid configuration. It must be a list.") + } + config.names = c( + "harm.category.dangerous.content", + "harm.category.harassment", + "harm.category.hate.speech", + "harm.category.sexually.explicit", + "stop.sequences", + "max.output.tokens", + "temperature", + "top.p", + "top.k" + ) + wrong.config = setdiff(names(config), config.names) + if (length(wrong.config) > 0) { + stop("Invalid configuration(s) detected: ", + paste0(wrong.config, collapse = ", ")) + } + if (length(unique(names(config))) != length(names(config))) { + stop("Invalid configurations. Duplicate parameters detected.") + } + + # Check harm categories + invalid.harm = lapply(config.names[1:4], function(harm) { + if (!is.null(config[[harm]]) && + is.na(match(config[[harm]], c(1, 2, 3, 4, 5)))) { + return(paste0("Invalid value for ", harm, ". It must be 1, 2, 3, 4, or 5.\n")) + } + }) + invalid.harm = Filter(Negate(is.null), invalid.harm) + if (length(invalid.harm) > 0) { + stop(invalid.harm) + } + + # Check stop sequence + if (!is.null(config[["stop.sequences"]]) && + !is.list(config[["stop.sequences"]])) { + stop("Invalid stop.sequences. It must be a list.") + } + if (length(config[["stop.sequences"]]) > 5) { + stop("Invalid value for stop.sequences. It can only have at most 5 strings.") + } +} + +#' @noRd +genai.google.formated.confguration = function(request.body, prompt) { + if (!is.null(request.body$safetySettings)) { + cat("=============================================================================\n") + cat(" Safety Settings\n") + cat("-----------------------------------------------------------------------------\n") + for (i in 1:length(request.body$safetySettings)) { + cat( + paste0(request.body$safetySettings[[i]]$category, ":"), + request.body$safetySettings[[i]]$threshold, + "\n" + ) + } + cat("=============================================================================\n\n\n\n") + } + if (!is.null(request.body$generationConfig)) { + cat("=============================================================================\n") + cat(" Generation Configuration\n") + cat("-----------------------------------------------------------------------------\n") + has.stop.sequences = FALSE + if (!is.null(request.body$generationConfig$stopSequences)) { + has.stop.sequences = TRUE + cat( + "stopSequences:", + paste0( + request.body$generationConfig$stopSequences, + collapse = ", " + ), + "\n" + ) + } + config.length = length(request.body$generationConfig) + config.names = names(request.body$generationConfig) + if (has.stop.sequences) { + if (config.length > 1) { + for (i in 2:config.length) { + cat(paste0(config.names[i], ":"), + request.body$generationConfig[[config.names[i]]], + "\n") + } + } + } + else { + for (i in 1:config.length) { + cat(paste0(config.names[i], ":"), + request.body$generationConfig[[config.names[i]]], + "\n") + } + } + cat("=============================================================================\n\n\n\n") + } + cat("=============================================================================\n") + cat(" Prompt\n") + cat("-----------------------------------------------------------------------------\n") + cat(paste(strwrap(prompt, width = 76, exdent = 0), collapse = "\n")) + cat("\n") + cat("=============================================================================\n\n\n\n") +} + +#' @noRd +genai.google.generation.config = function(config) { + configuration = list() + if (!is.null(config[["stop.sequences"]])) { + configuration$stopSequences = config[["stop.sequences"]] + } + if (!is.null(config[["max.output.tokens"]])) { + configuration$maxOutputTokens = config[["max.output.tokens"]] + } + if (!is.null(config[["temperature"]])) { + configuration$temperature = config[["temperature"]] + } + if (!is.null(config[["top.p"]])) { + configuration$topP = config[["top.p"]] + } + if (!is.null(config[["top.k"]])) { + configuration$topK = config[["top.k"]] + } + return(configuration) +} + +#' @noRd +genai.google.safety.setting = function(config) { + raw.harm.category = c( + harm.category.dangerous.content = "HARM_CATEGORY_DANGEROUS_CONTENT", + harm.category.harassment = "HARM_CATEGORY_HARASSMENT", + harm.category.hate.speech = "HARM_CATEGORY_HATE_SPEECH", + harm.category.sexually.explicit = "HARM_CATEGORY_SEXUALLY_EXPLICIT" + ) + raw.harm.block.threshold = c( + "HARM_BLOCK_THRESHOLD_UNSPECIFIED", + "BLOCK_LOW_AND_ABOVE", + "BLOCK_MEDIUM_AND_ABOVE", + "BLOCK_ONLY_HIGH", + "BLOCK_NONE" + ) + filled.harm = + lapply(names(raw.harm.category), function(harm) { + if (!is.null(config[[harm]])) { + safety.setting.object = list("category" = raw.harm.category[harm], + "threshold" = raw.harm.block.threshold[config[[harm]]]) + return(safety.setting.object) + } else { + return(NULL) + } + }) + filled.harm = Filter(Negate(is.null), filled.harm) + return(filled.harm) +} diff --git a/R/src/R/genai.moonshot.R b/R/src/R/genai.moonshot.R new file mode 100644 index 0000000..d232c63 --- /dev/null +++ b/R/src/R/genai.moonshot.R @@ -0,0 +1,62 @@ +#' Moonshot AI Object Creation +#' +#' This function establishes a connection to a Moonshot AI model by providing essential parameters. +#' +#' @param api A character string representing the API key required for accessing the model. +#' @param model A character string representing the specific model. +#' @param version A character string representing the version of the chosen model. +#' @param proxy Optional. Default to \code{FALSE}. A boolean value indicating whether to use a +#' proxy for accessing the API URL. If your local internet cannot access the API, set this +#' parameter to \code{TRUE}. +#' +#' @return If successful, the function returns an moonshot object. If the API response +#' indicates an error, the function halts execution and provides an error message. +#' +#' @details Providing accurate and valid information for each argument is crucial for successful text +#' generation by the generative AI model. If any parameter is incorrect, the function responds with an +#' error message based on the API feedback. To view all supported generative AI models, use the +#' function \code{\link{available.models}}. +#' +#' Please refer to \code{https://platform.moonshot.cn/console/api-keys} for the API key. +#' +#' The API proxy service is designed to address the needs of users who hold a valid API key but find +#' themselves outside their home countries or regions due to reasons such as travel, work, or study +#' in locations that may not be covered by certain Generative AI service providers. +#' +#' Please be aware that although GenAI and its affiliated organization - GitData - do not gather user +#' information through this service, the server providers for GenAI API proxy service and the Generative +#' AI service providers may engage in such data collection. Furthermore, the proxy service cannot +#' guarantee a consistent connection speed. Users are strongly encouraged to utilize this service +#' with caution and at their own discretion. +#' +#' @seealso +#' \href{https://genai.gd.edu.kg/r/documentation/}{GenAI - R Package "GenAI" Documentation} +#' +#' \href{https://genai.gd.edu.kg/api/}{GenAI - Generative Artificial Intelligence API Proxy Service} +#' +#' \href{https://colab.research.google.com/github/GitData-GA/GenAI/blob/gh-pages/r/example/genai_moonshot.ipynb}{Live Demo in Colab} +#' +#' @examples +#' \dontrun{ +#' # Please change YOUR_MOONSHOT_API to your own API key of Moonshot AI +#' Sys.setenv(MOONSHOT_API = "YOUR_MOONSHOT_API") +#' +#' all.models = available.models() %>% print() +#' +#' # Create an moonshot object +#' moonshot = genai.moonshot(api = Sys.getenv("MOONSHOT_API"), +#' model = all.models$moonshot$model[1], +#' version = all.models$moonshot$version[1], +#' proxy = FALSE) +#' } +#' +#' @export +genai.moonshot = function(api, + model, + version, + proxy = FALSE) { + return (genai.moonshot.class$new(api, + model, + version, + proxy)) +} diff --git a/R/src/R/genai.moonshot.chat.R b/R/src/R/genai.moonshot.chat.R new file mode 100644 index 0000000..5e14547 --- /dev/null +++ b/R/src/R/genai.moonshot.chat.R @@ -0,0 +1,84 @@ +#' @noRd +genai.moonshot.chat = function(genai.moonshot.object, + prompt, + verbose, + config = list( + max.tokens = NULL, + temperature = NULL, + top.p = NULL + )) { + # Check configurations + genai.moonshot.config.check(config) + + # Get api url + api.url = paste0( + "https://api.moonshot.cn/", + genai.moonshot.object$version, + "/chat/completions" + ) + if (genai.moonshot.object$proxy) { + api.url = paste0( + "https://api.genai.gd.edu.kg/moonshot/", + genai.moonshot.object$version, + "/chat/completions" + ) + } + + # Initialize the request body + requestNewContent = list(list(role = "user", + content = prompt)) + requestBody = as.list(genai.moonshot.object$chat.history) + requestBody$messages = append(requestBody$messages, requestNewContent) + + # Get the generation configuration + if (length(config) > 0) { + requestBody = genai.moonshot.generation.config(requestBody, config) + } + + # Convert the request as JSON format + requestBodyJSON = jsonlite::toJSON(c(model = genai.moonshot.object$model, + requestBody), + auto_unbox = TRUE, + pretty = TRUE) + + # Send request and get response + response = httr::POST( + url = api.url, + body = requestBodyJSON, + httr::add_headers( + "Content-Type" = "application/json", + "Authorization" = paste("Bearer", genai.moonshot.object$api) + ) + ) + responseJSON = httr::content(response, "parsed") + + # Check for response error + if (!is.null(responseJSON$error)) { + stop(responseJSON$error$message) + } + + # Save the most recent prompt to the chat history + genai.moonshot.object$chat.history$messages = append(genai.moonshot.object$chat.history$messages, + requestNewContent) + + # Save the most recent model response to the chat history + respondContent = list(list( + role = "assistant", + content = responseJSON$choices[[1]]$message$content + )) + genai.moonshot.object$chat.history$messages = append(genai.moonshot.object$chat.history$messages, + respondContent) + + # Print detail if verbose is TRUE + if (verbose) { + genai.moonshot.formated.confguration(requestBody, prompt) + cat("=============================================================================\n") + cat(" Chat history \n") + cat("-----------------------------------------------------------------------------\n\n") + genai.moonshot.chat.history.print(genai.moonshot.object, from = 1, to = NULL) + cat("=============================================================================\n\n\n\n") + } + + # Get the response text + return (responseJSON$choices[[1]]$message$content) +} diff --git a/R/src/R/genai.moonshot.chat.edit.R b/R/src/R/genai.moonshot.chat.edit.R new file mode 100644 index 0000000..df1ed8b --- /dev/null +++ b/R/src/R/genai.moonshot.chat.edit.R @@ -0,0 +1,106 @@ +#' @noRd +genai.moonshot.chat.edit = function(genai.moonshot.object, + prompt, + message.to.edit, + verbose, + config = list( + max.tokens = NULL, + temperature = NULL, + top.p = NULL + )) { + # Check if there are messages in the chat history + if (length(genai.moonshot.object$chat.history$messages) == 0) { + stop("Invalid chat.history. The chat history is empty.") + } + + # Check message.to.edit with chat.history length + if (message.to.edit > length(genai.moonshot.object$chat.history$messages) || + message.to.edit < 1) { + stop( + "Invalid value for message.to.edit. You can only edit existing messages. Please use 'chat.history.print()' to review the formatted chat history." + ) + } + + # Check message.to.edit (must be a even number) + if (message.to.edit %% 2 == 1) { + stop( + "Invalid value for message.to.edit. You can only edit messages sent by a user role. Please use 'chat.history.print()' to review the formatted chat history." + ) + } + + # Check configurations + genai.moonshot.config.check(config) + + # Get api url + api.url = paste0( + "https://api.moonshot.cn/", + genai.moonshot.object$version, + "/chat/completions" + ) + if (genai.moonshot.object$proxy) { + api.url = paste0( + "https://api.genai.gd.edu.kg/moonshot/", + genai.moonshot.object$version, + "/chat/completions" + ) + } + + # Initialize the request body + requestNewContent = list(list(role = "user", + content = prompt)) + requestBody = as.list(genai.moonshot.object$chat.history) + requestBody$messages = append(requestBody$messages[1:message.to.edit - 1], + requestNewContent) + + # Get the generation configuration + if (length(config) > 0) { + requestBody = genai.moonshot.generation.config(requestBody, config) + } + + # Convert the request as JSON format + requestBodyJSON = jsonlite::toJSON(c(model = genai.moonshot.object$model, + requestBody), + auto_unbox = TRUE, + pretty = TRUE) + + # Send request and get response + response = httr::POST( + url = api.url, + body = requestBodyJSON, + httr::add_headers( + "Content-Type" = "application/json", + "Authorization" = paste("Bearer", genai.moonshot.object$api) + ) + ) + responseJSON = httr::content(response, "parsed") + + # Check for response error + if (!is.null(responseJSON$error)) { + stop(responseJSON$error$message) + } + + # Save the most recent prompt to the chat history + genai.moonshot.object$chat.history$messages = append(genai.moonshot.object$chat.history$messages[1:message.to.edit - 1], + requestNewContent) + + # Save the most recent model response to the chat history + respondContent = list(list( + role = "assistant", + content = responseJSON$choices[[1]]$message$content + )) + genai.moonshot.object$chat.history$messages = append(genai.moonshot.object$chat.history$messages, + respondContent) + + # Print detail if verbose is TRUE + if (verbose) { + genai.moonshot.formated.confguration(requestBody, prompt) + cat("=============================================================================\n") + cat(" Chat history \n") + cat("-----------------------------------------------------------------------------\n\n") + genai.moonshot.chat.history.print(genai.moonshot.object, from = 1, to = NULL) + cat("=============================================================================\n\n\n\n") + } + + # Get the response text + return (responseJSON$choices[[1]]$message$content) +} diff --git a/R/src/R/genai.moonshot.chat.history.convert.R b/R/src/R/genai.moonshot.chat.history.convert.R new file mode 100644 index 0000000..3416463 --- /dev/null +++ b/R/src/R/genai.moonshot.chat.history.convert.R @@ -0,0 +1,23 @@ +#' @noRd +genai.moonshot.chat.history.convert = function(from.genai.moonshot.object, + to.genai.object) { + if (class(to.genai.object)[1] == "genai.google") { + moonshot.history = from.genai.moonshot.object$chat.history$messages[2:length(from.genai.moonshot.object$chat.history$messages)] + contents = lapply(moonshot.history, function(entry) { + list( + role = ifelse(entry$role == "assistant", "model", "user"), + parts = list(text = entry$content) + ) + }) + google.history = contents + return(google.history) + } + else if (class(to.genai.object)[1] == "genai.openai") { + openai.history = from.genai.moonshot.object$chat.history$messages + openai.history[[1]]$content = "You are a helpful assistant." + return(openai.history) + } + else { + stop("Invalid value for to.genai.object.") + } +} diff --git a/R/src/R/genai.moonshot.chat.history.export.R b/R/src/R/genai.moonshot.chat.history.export.R new file mode 100644 index 0000000..d133233 --- /dev/null +++ b/R/src/R/genai.moonshot.chat.history.export.R @@ -0,0 +1,4 @@ +#' @noRd +genai.moonshot.chat.history.export = function(genai.moonshot.object) { + return (genai.moonshot.object$chat.history$messages) +} diff --git a/R/src/R/genai.moonshot.chat.history.import.R b/R/src/R/genai.moonshot.chat.history.import.R new file mode 100644 index 0000000..25cb52d --- /dev/null +++ b/R/src/R/genai.moonshot.chat.history.import.R @@ -0,0 +1,22 @@ +#' @noRd +genai.moonshot.chat.history.import = function(genai.moonshot.object, + new.chat.history) { + # Imported chat history is a list + if (is.list(new.chat.history)) { + expected.format = list( + role = NA, + content = NA + ) + for (message in new.chat.history) { + if (!identical(names(message), names(expected.format)) || + !is.character(message$role) || + !is.character(message$content)) { + stop("Invalid value for new.chat.history. Please make sure the format of the imported chat history is correct.") + } + } + genai.moonshot.object$chat.history$messages = new.chat.history + } + else { + stop("Invalid new.chat.history. Please make sure it is a list.") + } +} diff --git a/R/src/R/genai.moonshot.chat.history.print.R b/R/src/R/genai.moonshot.chat.history.print.R new file mode 100644 index 0000000..45cb91f --- /dev/null +++ b/R/src/R/genai.moonshot.chat.history.print.R @@ -0,0 +1,40 @@ +#' @noRd +genai.moonshot.chat.history.print = function(genai.moonshot.object, + from, + to) { + if (!is.numeric(from) || from < 1) { + stop("Invalid value for from. It should be an integer greater than or equal to 1.") + } + + if (is.numeric(to) && to < from) { + stop("Invalid value for to. It should be an integer greater than or equal to from") + } + + chat.length = length(genai.moonshot.object$chat.history$messages) + + if (is.numeric(to) && to > chat.length) { + stop("Invalid value for to. It should be an integer less than or equal to ", chat.length, ".") + } + + if (is.numeric(to)) { + chat.length = to + } + + if (chat.length > 0) { + for (i in from:chat.length) { + cat( + sprintf( + "-------------------------------- Message %2d ---------------------------------\n", + i + ) + ) + cat("Role:", + genai.moonshot.object$chat.history$messages[[i]]$role, + "\n") + cat("Text: ") + cat(paste(strwrap(genai.moonshot.object$chat.history$messages[[i]]$content, + width = 76, exdent = 0), collapse = "\n")) + cat("\n\n") + } + } +} diff --git a/R/src/R/genai.moonshot.chat.history.reset.R b/R/src/R/genai.moonshot.chat.history.reset.R new file mode 100644 index 0000000..1a13183 --- /dev/null +++ b/R/src/R/genai.moonshot.chat.history.reset.R @@ -0,0 +1,9 @@ +#' @noRd +genai.moonshot.chat.history.reset = function(genai.moonshot.object) { + genai.moonshot.object$chat.history$messages = list( + list( + role = "system", + content = "You are Kimi, an Artificial Intelligence Assistant powered by Moonshot AI, and you are better at conversations in Chinese and English. You will provide users with safe, helpful and accurate answers. At the same time, you will reject answers to questions about terrorism, racism, pornography, etc. Moonshot AI is a proper noun and cannot be translated into other languages." + ) + ) +} diff --git a/R/src/R/genai.moonshot.chat.history.save.R b/R/src/R/genai.moonshot.chat.history.save.R new file mode 100644 index 0000000..9078081 --- /dev/null +++ b/R/src/R/genai.moonshot.chat.history.save.R @@ -0,0 +1,6 @@ +#' @noRd +genai.moonshot.chat.history.save = function(genai.moonshot.object, + file.name) { + write(jsonlite::toJSON(genai.moonshot.object$chat.history$messages, pretty = TRUE), + paste0(file.name, ".json")) +} diff --git a/R/src/R/genai.moonshot.class.R b/R/src/R/genai.moonshot.class.R new file mode 100644 index 0000000..61092e6 --- /dev/null +++ b/R/src/R/genai.moonshot.class.R @@ -0,0 +1,81 @@ +#' @noRd +genai.moonshot.class = R6Class( + classname = "genai.moonshot", + public = list( + # Initialize method + initialize = function(api, model, version, proxy = FALSE) { + genai.moonshot.check(api, model, version, proxy) + private$api = api + private$model = model + private$version = version + private$proxy = proxy + }, + # Chat generation + chat = function(prompt, + verbose = FALSE, + config = list()) { + genai.moonshot.chat(private, + prompt, + verbose, + config) + }, + # Chat edit + chat.edit = function(prompt, + message.to.edit, + verbose = FALSE, + config = list()) { + genai.moonshot.chat.edit(private, + prompt, + message.to.edit, + verbose, + config) + }, + # Convert chat history + chat.history.convert = function(from.genai.object, to.genai.object) { + genai.moonshot.chat.history.convert(private, to.genai.object) + }, + # Export chat history + chat.history.export = function() { + genai.moonshot.chat.history.export(private) + }, + # Import chat history + chat.history.import = function(new.chat.history) { + genai.moonshot.chat.history.import(private, new.chat.history) + }, + # Print chat history + chat.history.print = function(from = 1, to = NULL) { + genai.moonshot.chat.history.print(private, from, to) + }, + # Reset chat history + chat.history.reset = function() { + genai.moonshot.chat.history.reset(private) + }, + # Save chat history + chat.history.save = function(file.name) { + genai.moonshot.chat.history.save(private, file.name) + }, + # Text generation + txt = function(prompt, + verbose = FALSE, + config = list()) { + genai.moonshot.txt(private, + prompt, + verbose, + config) + } + ), + private = list( + api = NULL, + model = NULL, + version = NULL, + proxy = FALSE, + chat.history = listenv::listenv( + messages = list( + list( + role = "system", + content = "You are Kimi, an Artificial Intelligence Assistant powered by Moonshot AI, and you are better at conversations in Chinese and English. You will provide users with safe, helpful and accurate answers. At the same time, you will reject answers to questions about terrorism, racism, pornography, etc. Moonshot AI is a proper noun and cannot be translated into other languages." + ) + ) + ) + ) +) diff --git a/R/src/R/genai.moonshot.txt.R b/R/src/R/genai.moonshot.txt.R new file mode 100644 index 0000000..9096302 --- /dev/null +++ b/R/src/R/genai.moonshot.txt.R @@ -0,0 +1,72 @@ +#' @noRd +genai.moonshot.txt = function(genai.moonshot.object, + prompt, + verbose, + config = list( + max.tokens = NULL, + temperature = NULL, + top.p = NULL + )) { + # Check configurations + genai.moonshot.config.check(config) + + # Get api url + api.url = paste0( + "https://api.moonshot.cn/", + genai.moonshot.object$version, + "/chat/completions" + ) + if (genai.moonshot.object$proxy) { + api.url = paste0( + "https://api.genai.gd.edu.kg/moonshot/", + genai.moonshot.object$version, + "/chat/completions" + ) + } + + # Initialize the request body + requestBody = list( + model = genai.moonshot.object$model, + messages = list( + list(role = "system", + content = "You are Kimi, an Artificial Intelligence Assistant powered by Moonshot AI, and you are better at conversations in Chinese and English. You will provide users with safe, helpful and accurate answers. At the same time, you will reject answers to questions about terrorism, racism, pornography, etc. Moonshot AI is a proper noun and cannot be translated into other languages."), + list(role = "user", + content = prompt) + ) + ) + + # Get the generation configuration + if (length(config) > 0) { + requestBody = genai.moonshot.generation.config(requestBody, config) + } + + # Convert the request as JSON format + requestBodyJSON = jsonlite::toJSON(requestBody, + auto_unbox = TRUE, + pretty = TRUE) + + # Send request and get response + response = httr::POST( + url = api.url, + body = requestBodyJSON, + httr::add_headers( + "Content-Type" = "application/json", + "Authorization" = paste("Bearer", genai.moonshot.object$api) + ) + ) + responseJSON = httr::content(response, "parsed") + + # Check for response error + if (!is.null(responseJSON$error)) { + stop(responseJSON$error$message) + } + + # Print detail if verbose is TRUE + if (verbose) { + genai.moonshot.formated.confguration(requestBody, prompt) + cat("\n") + } + + # Get the response text + return (responseJSON$choices[[1]]$message$content) +} diff --git a/R/src/R/genai.moonshot.utils.R b/R/src/R/genai.moonshot.utils.R new file mode 100644 index 0000000..b0fca3a --- /dev/null +++ b/R/src/R/genai.moonshot.utils.R @@ -0,0 +1,115 @@ +#' @noRd +genai.moonshot.check = function(api, model, version, proxy) { + json.data = jsonlite::fromJSON("https://genai.gd.edu.kg/model.json") + if (is.na(match(model, json.data$moonshot$model))) { + stop( + "Invalid value for model. Refer to 'available.models()' to view the supported models." + ) + } + if (is.na(match(version, json.data$moonshot$version))) { + stop( + "Invalid value for version. Refer to 'available.models()' to view the supported versions." + ) + } + if (!proxy %in% c(TRUE, FALSE)) { + stop("Invalid value for proxy. It must be either TRUE or FALSE.") + } + + # Check connection + api.url = paste0( + "https://api.moonshot.cn/", + version, + "/models" + ) + if (proxy) { + api.url = paste0( + "https://api.genai.gd.edu.kg/moonshot/", + version, + "/models") + } + response = httr::GET(url = api.url, + httr::add_headers( + "Content-Type" = "application/json", + "Authorization" = paste("Bearer", api) + ) + ) + responseJSON = httr::content(response, "parsed") + if (!is.null(responseJSON$error)) { + stop(responseJSON$error$message) + } + if (response$status_code != 200) { + stop( + "Invalid parameter(s) detected. Please check the values for api, model, version, and proxy." + ) + } +} + +#' @noRd +genai.moonshot.config.check = function(config) { + if (!is.list(config)) { + stop("Invalid configuration. It must be a list.") + } + config.names = c( + "max.tokens", + "temperature", + "top.p" + ) + wrong.config = setdiff(names(config), config.names) + if (length(wrong.config) > 0) { + stop("Invalid configuration(s) detected: ", + paste0(wrong.config, collapse = ", ")) + } + if (length(unique(names(config))) != length(names(config))) { + stop("Invalid configurations. Duplicate parameters detected.") + } +} + +#' @noRd +genai.moonshot.generation.config = function(requestBody, config) { + config.names = c( + max.tokens = "max_tokens", + temperature = "temperature", + top.p = "top_p" + ) + for (param_name in names(config)) { + if (!is.null(config[[param_name]])) { + requestBody[[config.names[param_name]]] = config[[param_name]] + } + } + return(requestBody) +} + +#' @noRd +genai.moonshot.formated.confguration = function(request.body, prompt) { + config.names = c( + max.tokens = "max_tokens", + temperature = "temperature", + top.p = "top_p" + ) + intersect.param = intersect(names(request.body), config.names) + if (length(intersect.param) > 0) { + cat("=============================================================================\n") + cat(" Generation Configuration\n") + cat("-----------------------------------------------------------------------------\n") + for (param in intersect.param) { + if (is.list(request.body[[param]])) { + cat("stop:", + paste0(request.body[[param]], + collapse = ", "), + "\n") + } + else { + cat(paste0(param, ":"), + request.body[[param]], + "\n") + } + } + cat("=============================================================================\n\n\n\n") + } + cat("=============================================================================\n") + cat(" Prompt\n") + cat("-----------------------------------------------------------------------------\n") + cat(paste(strwrap(prompt, width = 76, exdent = 0), collapse = "\n")) + cat("\n") + cat("=============================================================================\n\n\n\n") +} diff --git a/R/src/R/genai.openai.R b/R/src/R/genai.openai.R new file mode 100644 index 0000000..eae46ac --- /dev/null +++ b/R/src/R/genai.openai.R @@ -0,0 +1,71 @@ +#' OpenAI Object Creation +#' +#' This function establishes a connection to an OpenAI model by providing essential parameters. +#' +#' @param api A character string representing the API key required for accessing the model. +#' @param model A character string representing the specific model. +#' @param version A character string representing the version of the chosen model. +#' @param proxy Optional. Default to \code{FALSE}. A boolean value indicating whether to use a +#' proxy for accessing the API URL. If your local internet cannot access the API, set this +#' parameter to \code{TRUE}. +#' @param organization.id Optional. Default to \code{NULL}. A character string representing the +#' organization ID. +#' +#' @return If successful, the function returns an OpenAI object. If the API response +#' indicates an error, the function halts execution and provides an error message. +#' +#' @details Providing accurate and valid information for each argument is crucial for successful text +#' generation by the generative AI model. If any parameter is incorrect, the function responds with an +#' error message based on the API feedback. To view all supported generative AI models, use the +#' function \code{\link{available.models}}. +#' +#' Please refer to \code{https://platform.openai.com/api-keys} for the API key. Moreover, please refer +#' to \code{https://platform.openai.com/account/organization} for the optional organization ID. +#' +#' The API proxy service is designed to address the needs of users who hold a valid API key but find +#' themselves outside their home countries or regions due to reasons such as travel, work, or study +#' in locations that may not be covered by certain Generative AI service providers. +#' +#' Please be aware that although GenAI and its affiliated organization - GitData - do not gather user +#' information through this service, the server providers for GenAI API proxy service and the Generative +#' AI service providers may engage in such data collection. Furthermore, the proxy service cannot +#' guarantee a consistent connection speed. Users are strongly encouraged to utilize this service +#' with caution and at their own discretion. +#' +#' @seealso +#' \href{https://genai.gd.edu.kg/r/documentation/}{GenAI - R Package "GenAI" Documentation} +#' +#' \href{https://genai.gd.edu.kg/api/}{GenAI - Generative Artificial Intelligence API Proxy Service} +#' +#' \href{https://colab.research.google.com/github/GitData-GA/GenAI/blob/gh-pages/r/example/genai_openai.ipynb}{Live Demo in Colab} +#' +#' @examples +#' \dontrun{ +#' # Please change YOUR_OPENAI_API to your own API key of OpenAI +#' Sys.setenv(OPENAI_API = "YOUR_OPENAI_API") +#' +#' # Oprional. Please change YOUR_OPENAI_ORG to your own organization ID for OpenAI +#' Sys.setenv(OPENAI_ORG = "YOUR_OPENAI_ORG") +#' +#' all.models = available.models() %>% print() +#' +#' # Create an OpenAI object +#' openai = genai.openai(api = Sys.getenv("OPENAI_API"), +#' model = all.models$openai$model[1], +#' version = all.models$openai$version[1], +#' proxy = FALSE, +#' organization.id = Sys.getenv("OPENAI_ORG")) +#' } +#' +#' @export +genai.openai = function(api, + model, + version, + proxy = FALSE, + organization.id = NULL) { + return (genai.openai.class$new(api, + model, + version, + proxy, + organization.id)) +} diff --git a/R/src/R/genai.openai.chat.R b/R/src/R/genai.openai.chat.R new file mode 100644 index 0000000..77e5e30 --- /dev/null +++ b/R/src/R/genai.openai.chat.R @@ -0,0 +1,107 @@ +#' @noRd +genai.openai.chat = function(genai.openai.object, + prompt, + verbose, + config = list( + frequency.penalty = NULL, + logit.bias = NULL, + logprobs = NULL, + top.logprobs = NULL, + max.tokens = NULL, + presence.penalty = NULL, + response.format = NULL, + seed = NULL, + stop = NULL, + temperature = NULL, + top.p = NULL, + tools = NULL, + tool.choice = NULL, + user = NULL + )) { + # Check configurations + genai.openai.config.check(config) + + # Get api url + api.url = paste0( + "https://api.openai.com/", + genai.openai.object$version, + "/chat/completions" + ) + if (genai.openai.object$proxy) { + api.url = paste0( + "https://api.genai.gd.edu.kg/openai/", + genai.openai.object$version, + "/chat/completions" + ) + } + + # Initialize the request body + requestNewContent = list(list(role = "user", + content = prompt)) + requestBody = as.list(genai.openai.object$chat.history) + requestBody$messages = append(requestBody$messages, requestNewContent) + + # Get the generation configuration + if (length(config) > 0) { + requestBody = genai.openai.generation.config(requestBody, config) + } + + # Convert the request as JSON format + requestBodyJSON = jsonlite::toJSON(c(model = genai.openai.object$model, + requestBody), + auto_unbox = TRUE, + pretty = TRUE) + + # Send request and get response + response = httr::POST( + url = api.url, + body = requestBodyJSON, + httr::add_headers( + "Content-Type" = "application/json", + "Authorization" = paste("Bearer", genai.openai.object$api) + ) + ) + if (!is.null(genai.openai.object$organization.id) && + is.character(genai.openai.object$organization.id)) { + response = httr::POST( + url = api.url, + body = requestBodyJSON, + httr::add_headers( + "Content-Type" = "application/json", + "Authorization" = paste("Bearer", genai.openai.object$api), + "OpenAI-Organization" = genai.openai.object$organization.id + ) + ) + } + responseJSON = httr::content(response, "parsed") + + # Check for response error + if (!is.null(responseJSON$error)) { + stop(responseJSON$error$message) + } + + # Save the most recent prompt to the chat history + genai.openai.object$chat.history$messages = append(genai.openai.object$chat.history$messages, + requestNewContent) + + # Save the most recent model response to the chat history + respondContent = list(list( + role = "assistant", + content = responseJSON$choices[[1]]$message$content + )) + genai.openai.object$chat.history$messages = append(genai.openai.object$chat.history$messages, + respondContent) + + # Print detail if verbose is TRUE + if (verbose) { + genai.openai.formated.confguration(requestBody, prompt) + cat("=============================================================================\n") + cat(" Chat history \n") + cat("-----------------------------------------------------------------------------\n\n") + genai.openai.chat.history.print(genai.openai.object, from = 1, to = NULL) + cat("=============================================================================\n\n\n\n") + } + + # Get the response text + return (responseJSON$choices[[1]]$message$content) +} diff --git a/R/src/R/genai.openai.chat.edit.R b/R/src/R/genai.openai.chat.edit.R new file mode 100644 index 0000000..f4ea8c4 --- /dev/null +++ b/R/src/R/genai.openai.chat.edit.R @@ -0,0 +1,129 @@ +#' @noRd +genai.openai.chat.edit = function(genai.openai.object, + prompt, + message.to.edit, + verbose, + config = list( + frequency.penalty = NULL, + logit.bias = NULL, + logprobs = NULL, + top.logprobs = NULL, + max.tokens = NULL, + presence.penalty = NULL, + response.format = NULL, + seed = NULL, + stop = NULL, + temperature = NULL, + top.p = NULL, + tools = NULL, + tool.choice = NULL, + user = NULL + )) { + # Check if there are messages in the chat history + if (length(genai.openai.object$chat.history$messages) == 0) { + stop("Invalid chat.history. The chat history is empty.") + } + + # Check message.to.edit with chat.history length + if (message.to.edit > length(genai.openai.object$chat.history$messages) || + message.to.edit < 1) { + stop( + "Invalid value for message.to.edit. You can only edit existing messages. Please use 'chat.history.print()' to review the formatted chat history." + ) + } + + # Check message.to.edit (must be a even number) + if (message.to.edit %% 2 == 1) { + stop( + "Invalid value for message.to.edit. You can only edit messages sent by a user role. Please use 'chat.history.print()' to review the formatted chat history." + ) + } + + # Check configurations + genai.openai.config.check(config) + + # Get api url + api.url = paste0( + "https://api.openai.com/", + genai.openai.object$version, + "/chat/completions" + ) + if (genai.openai.object$proxy) { + api.url = paste0( + "https://api.genai.gd.edu.kg/openai/", + genai.openai.object$version, + "/chat/completions" + ) + } + + # Initialize the request body + requestNewContent = list(list(role = "user", + content = prompt)) + requestBody = as.list(genai.openai.object$chat.history) + requestBody$messages = append(requestBody$messages[1:message.to.edit - 1], + requestNewContent) + + # Get the generation configuration + if (length(config) > 0) { + requestBody = genai.openai.generation.config(requestBody, config) + } + + # Convert the request as JSON format + requestBodyJSON = jsonlite::toJSON(c(model = genai.openai.object$model, + requestBody), + auto_unbox = TRUE, + pretty = TRUE) + + # Send request and get response + response = httr::POST( + url = api.url, + body = requestBodyJSON, + httr::add_headers( + "Content-Type" = "application/json", + "Authorization" = paste("Bearer", genai.openai.object$api) + ) + ) + if (!is.null(genai.openai.object$organization.id) && + is.character(genai.openai.object$organization.id)) { + response = httr::POST( + url = api.url, + body = requestBodyJSON, + httr::add_headers( + "Content-Type" = "application/json", + "Authorization" = paste("Bearer", genai.openai.object$api), + "OpenAI-Organization" = genai.openai.object$organization.id + ) + ) + } + responseJSON = httr::content(response, "parsed") + + # Check for response error + if (!is.null(responseJSON$error)) { + stop(responseJSON$error$message) + } + + # Save the most recent prompt to the chat history + genai.openai.object$chat.history$messages = append(genai.openai.object$chat.history$messages[1:message.to.edit - 1], + requestNewContent) + + # Save the most recent model response to the chat history + respondContent = list(list( + role = "assistant", + content = responseJSON$choices[[1]]$message$content + )) + genai.openai.object$chat.history$messages = append(genai.openai.object$chat.history$messages, + respondContent) + + # Print detail if verbose is TRUE + if (verbose) { + genai.openai.formated.confguration(requestBody, prompt) + cat("=============================================================================\n") + cat(" Chat history \n") + cat("-----------------------------------------------------------------------------\n\n") + genai.openai.chat.history.print(genai.openai.object, from = 1, to = NULL) + cat("=============================================================================\n\n\n\n") + } + + # Get the response text + return (responseJSON$choices[[1]]$message$content) +} diff --git a/R/src/R/genai.openai.chat.history.convert.R b/R/src/R/genai.openai.chat.history.convert.R new file mode 100644 index 0000000..75c423a --- /dev/null +++ b/R/src/R/genai.openai.chat.history.convert.R @@ -0,0 +1,23 @@ +#' @noRd +genai.openai.chat.history.convert = function(from.genai.openai.object, + to.genai.object) { + if (class(to.genai.object)[1] == "genai.google") { + openai.history = from.genai.openai.object$chat.history$messages[2:length(from.genai.openai.object$chat.history$messages)] + contents = lapply(openai.history, function(entry) { + list( + role = ifelse(entry$role == "assistant", "model", "user"), + parts = list(text = entry$content) + ) + }) + google.history = contents + return(google.history) + } + else if (class(to.genai.object)[1] == "genai.moonshot") { + moonshot.history = from.genai.openai.object$chat.history$messages + moonshot.history[[1]]$content = "You are Kimi, an Artificial Intelligence Assistant powered by Moonshot AI, and you are better at conversations in Chinese and English. You will provide users with safe, helpful and accurate answers. At the same time, you will reject answers to questions about terrorism, racism, pornography, etc. Moonshot AI is a proper noun and cannot be translated into other languages." + return(moonshot.history) + } + else { + stop("Invalid value for to.genai.object.") + } +} diff --git a/R/src/R/genai.openai.chat.history.export.R b/R/src/R/genai.openai.chat.history.export.R new file mode 100644 index 0000000..55799d1 --- /dev/null +++ b/R/src/R/genai.openai.chat.history.export.R @@ -0,0 +1,4 @@ +#' @noRd +genai.openai.chat.history.export = function(genai.openai.object) { + return (genai.openai.object$chat.history$messages) +} diff --git a/R/src/R/genai.openai.chat.history.import.R b/R/src/R/genai.openai.chat.history.import.R new file mode 100644 index 0000000..2e80a1f --- /dev/null +++ b/R/src/R/genai.openai.chat.history.import.R @@ -0,0 +1,22 @@ +#' @noRd +genai.openai.chat.history.import = function(genai.openai.object, + new.chat.history) { + # Imported chat history is a list + if (is.list(new.chat.history)) { + expected.format = list( + role = NA, + content = NA + ) + for (message in new.chat.history) { + if (!identical(names(message), names(expected.format)) || + !is.character(message$role) || + !is.character(message$content)) { + stop("Invalid value for new.chat.history. Please make sure the format of the imported chat history is correct.") + } + } + genai.openai.object$chat.history$messages = new.chat.history + } + else { + stop("Invalid new.chat.history. Please make sure it is a list.") + } +} diff --git a/R/src/R/genai.openai.chat.history.print.R b/R/src/R/genai.openai.chat.history.print.R new file mode 100644 index 0000000..5279eb4 --- /dev/null +++ b/R/src/R/genai.openai.chat.history.print.R @@ -0,0 +1,40 @@ +#' @noRd +genai.openai.chat.history.print = function(genai.openai.object, + from, + to) { + if (!is.numeric(from) || from < 1) { + stop("Invalid value for from. It should be an integer greater than or equal to 1.") + } + + if (is.numeric(to) && to < from) { + stop("Invalid value for to. It should be an integer greater than or equal to from") + } + + chat.length = length(genai.openai.object$chat.history$messages) + + if (is.numeric(to) && to > chat.length) { + stop("Invalid value for to. It should be an integer less than or equal to ", chat.length, ".") + } + + if (is.numeric(to)) { + chat.length = to + } + + if (chat.length > 0) { + for (i in from:chat.length) { + cat( + sprintf( + "-------------------------------- Message %2d ---------------------------------\n", + i + ) + ) + cat("Role:", + genai.openai.object$chat.history$messages[[i]]$role, + "\n") + cat("Text: ") + cat(paste(strwrap(genai.openai.object$chat.history$messages[[i]]$content, + width = 76, exdent = 0), collapse = "\n")) + cat("\n\n") + } + } +} diff --git a/R/src/R/genai.openai.chat.history.reset.R b/R/src/R/genai.openai.chat.history.reset.R new file mode 100644 index 0000000..e71a819 --- /dev/null +++ b/R/src/R/genai.openai.chat.history.reset.R @@ -0,0 +1,9 @@ +#' @noRd +genai.openai.chat.history.reset = function(genai.openai.object) { + genai.openai.object$chat.history$messages = list( + list( + role = "system", + content = "You are a helpful assistant." + ) + ) +} diff --git a/R/src/R/genai.openai.chat.history.save.R b/R/src/R/genai.openai.chat.history.save.R new file mode 100644 index 0000000..89337dc --- /dev/null +++ b/R/src/R/genai.openai.chat.history.save.R @@ -0,0 +1,6 @@ +#' @noRd +genai.openai.chat.history.save = function(genai.openai.object, + file.name) { + write(jsonlite::toJSON(genai.openai.object$chat.history$messages, pretty = TRUE), + paste0(file.name, ".json")) +} diff --git a/R/src/R/genai.openai.class.R b/R/src/R/genai.openai.class.R new file mode 100644 index 0000000..de40919 --- /dev/null +++ b/R/src/R/genai.openai.class.R @@ -0,0 +1,105 @@ +#' @noRd +genai.openai.class = R6Class( + classname = "genai.openai", + public = list( + # Initialize method + initialize = function(api, model, version, proxy = FALSE, organization.id = NULL) { + genai.openai.check(api, model, version, proxy, organization.id) + private$api = api + private$model = model + private$version = version + private$proxy = proxy + if (!is.null(organization.id) && is.character(organization.id)) { + private$organization.id = organization.id + } + }, + # Chat generation + chat = function(prompt, + verbose = FALSE, + config = list()) { + genai.openai.chat(private, + prompt, + verbose, + config) + }, + # Chat edit + chat.edit = function(prompt, + message.to.edit, + verbose = FALSE, + config = list()) { + genai.openai.chat.edit(private, + prompt, + message.to.edit, + verbose, + config) + }, + # Convert chat history + chat.history.convert = function(from.genai.object, to.genai.object) { + genai.openai.chat.history.convert(private, to.genai.object) + }, + # Export chat history + chat.history.export = function() { + genai.openai.chat.history.export(private) + }, + # Import chat history + chat.history.import = function(new.chat.history) { + genai.openai.chat.history.import(private, new.chat.history) + }, + # Print chat history + chat.history.print = function(from = 1, to = NULL) { + genai.openai.chat.history.print(private, from, to) + }, + # Reset chat history + chat.history.reset = function() { + genai.openai.chat.history.reset(private) + }, + # Save chat history + chat.history.save = function(file.name) { + genai.openai.chat.history.save(private, file.name) + }, + # Image generation + img = function(prompt, + verbose = FALSE, + config = list()) { + genai.openai.img(private, + prompt, + verbose, + config) + }, + # Text generation + txt = function(prompt, + verbose = FALSE, + config = list()) { + genai.openai.txt(private, + prompt, + verbose, + config) + }, + # Text generation with image as input + txt.image = function(prompt, + image.path, + verbose = FALSE, + config = list()) { + genai.openai.txt.image(private, + prompt, + image.path, + verbose, + config) + } + ), + private = list( + api = NULL, + organization.id = NULL, + model = NULL, + version = NULL, + proxy = FALSE, + chat.history = listenv::listenv( + messages = list( + list( + role = "system", + content = "You are a helpful assistant." + ) + ) + ) + ) +) diff --git a/R/src/R/genai.openai.img.R b/R/src/R/genai.openai.img.R new file mode 100644 index 0000000..0831760 --- /dev/null +++ b/R/src/R/genai.openai.img.R @@ -0,0 +1,87 @@ +#' @noRd +genai.openai.img = function(genai.openai.object, + prompt, + verbose, + config = list( + quality = NULL, + size = NULL, + style = NULL, + user = NULL + )) { + # Check configurations + genai.openai.img.config.check(config) + + # Get api url + api.url = paste0( + "https://api.openai.com/", + genai.openai.object$version, + "/images/generations" + ) + if (genai.openai.object$proxy) { + api.url = paste0( + "https://api.genai.gd.edu.kg/openai/", + genai.openai.object$version, + "/images/generations" + ) + } + + # Initialize the request body + requestBody = list( + model = genai.openai.object$model, + prompt = prompt, + response_format = "b64_json" + ) + + # Get the generation configuration + if (length(config) > 0) { + requestBody = genai.openai.img.generation.config(requestBody, config) + } + + # Convert the request as JSON format + requestBodyJSON = jsonlite::toJSON(requestBody, + auto_unbox = TRUE, + pretty = TRUE) + + # Send request and get response + response = httr::POST( + url = api.url, + body = requestBodyJSON, + httr::add_headers( + "Content-Type" = "application/json", + "Authorization" = paste("Bearer", genai.openai.object$api) + ) + ) + if (!is.null(genai.openai.object$organization.id) && + is.character(genai.openai.object$organization.id)) { + response = httr::POST( + url = api.url, + body = requestBodyJSON, + httr::add_headers( + "Content-Type" = "application/json", + "Authorization" = paste("Bearer", genai.openai.object$api), + "OpenAI-Organization" = genai.openai.object$organization.id + ) + ) + } + responseJSON = httr::content(response, "parsed") + + # Check for response error + if (!is.null(responseJSON$error)) { + stop(responseJSON$error$message) + } + + # Print detail if verbose is TRUE + if (verbose) { + genai.openai.img.formated.confguration(requestBody, prompt) + cat("\n") + } + + # Store the image + image.data = base64enc::base64decode(responseJSON$data[[1]]$b64_json[1]) + tmp.img = tempfile(fileext = ".png") + writeBin(image.data, tmp.img) + export.img = ggplotify::as.ggplot(magick::image_read(tmp.img)) + + # Return the image + return (export.img) +} diff --git a/R/src/R/genai.openai.txt.R b/R/src/R/genai.openai.txt.R new file mode 100644 index 0000000..ba69fbf --- /dev/null +++ b/R/src/R/genai.openai.txt.R @@ -0,0 +1,95 @@ +#' @noRd +genai.openai.txt = function(genai.openai.object, + prompt, + verbose, + config = list( + frequency.penalty = NULL, + logit.bias = NULL, + logprobs = NULL, + top.logprobs = NULL, + max.tokens = NULL, + presence.penalty = NULL, + response.format = NULL, + seed = NULL, + stop = NULL, + temperature = NULL, + top.p = NULL, + tools = NULL, + tool.choice = NULL, + user = NULL + )) { + # Check configurations + genai.openai.config.check(config) + + # Get api url + api.url = paste0( + "https://api.openai.com/", + genai.openai.object$version, + "/chat/completions" + ) + if (genai.openai.object$proxy) { + api.url = paste0( + "https://api.genai.gd.edu.kg/openai/", + genai.openai.object$version, + "/chat/completions" + ) + } + + # Initialize the request body + requestBody = list( + model = genai.openai.object$model, + messages = list( + list(role = "system", + content = "You are a helpful assistant."), + list(role = "user", + content = prompt) + ) + ) + + # Get the generation configuration + if (length(config) > 0) { + requestBody = genai.openai.generation.config(requestBody, config) + } + + # Convert the request as JSON format + requestBodyJSON = jsonlite::toJSON(requestBody, + auto_unbox = TRUE, + pretty = TRUE) + + # Send request and get response + response = httr::POST( + url = api.url, + body = requestBodyJSON, + httr::add_headers( + "Content-Type" = "application/json", + "Authorization" = paste("Bearer", genai.openai.object$api) + ) + ) + if (!is.null(genai.openai.object$organization.id) && + is.character(genai.openai.object$organization.id)) { + response = httr::POST( + url = api.url, + body = requestBodyJSON, + httr::add_headers( + "Content-Type" = "application/json", + "Authorization" = paste("Bearer", genai.openai.object$api), + "OpenAI-Organization" = genai.openai.object$organization.id + ) + ) + } + responseJSON = httr::content(response, "parsed") + + # Check for response error + if (!is.null(responseJSON$error)) { + stop(responseJSON$error$message) + } + + # Print detail if verbose is TRUE + if (verbose) { + genai.openai.formated.confguration(requestBody, prompt) + cat("\n") + } + + # Get the response text + return (responseJSON$choices[[1]]$message$content) +} diff --git a/R/src/R/genai.openai.txt.image.R b/R/src/R/genai.openai.txt.image.R new file mode 100644 index 0000000..c1b1087 --- /dev/null +++ b/R/src/R/genai.openai.txt.image.R @@ -0,0 +1,113 @@ +#' @noRd +genai.openai.txt.image = function(genai.openai.object, + prompt, + image.path, + verbose, + config = list( + frequency.penalty = NULL, + logit.bias = NULL, + logprobs = NULL, + top.logprobs = NULL, + max.tokens = NULL, + presence.penalty = NULL, + response.format = NULL, + seed = NULL, + stop = NULL, + temperature = NULL, + top.p = NULL, + tools = NULL, + tool.choice = NULL, + user = NULL + )) { + # Check configurations + genai.openai.config.check(config) + + # Get api url + api.url = paste0( + "https://api.openai.com/", + genai.openai.object$version, + "/chat/completions" + ) + if (genai.openai.object$proxy) { + api.url = paste0( + "https://api.genai.gd.edu.kg/openai/", + genai.openai.object$version, + "/chat/completions" + ) + } + + # Convert image to base64 code + img.info = image.to.data.uri(image.path) + + # Initialize the request body + requestBody = list(model = genai.openai.object$model, + messages = list( + list(role = "system", + content = "You are a helpful assistant."), + list(role = "user", + content = list( + list(type = "text", + text = prompt), + list(type = "image_url", + image_url = list( + url = paste0("data:image/", + img.info[1], + ";base64,", + img.info[2]) + )) + )) + )) + + # Get the generation configuration + if (length(config) > 0) { + requestBody = genai.openai.generation.config(requestBody, config) + } + + # Convert the request as JSON format + requestBodyJSON = jsonlite::toJSON(requestBody, + auto_unbox = TRUE, + pretty = TRUE) + + # Send request and get response + response = httr::POST( + url = api.url, + body = requestBodyJSON, + httr::add_headers( + "Content-Type" = "application/json", + "Authorization" = paste("Bearer", genai.openai.object$api) + ) + ) + if (!is.null(genai.openai.object$organization.id) && + is.character(genai.openai.object$organization.id)) { + response = httr::POST( + url = api.url, + body = requestBodyJSON, + httr::add_headers( + "Content-Type" = "application/json", + "Authorization" = paste("Bearer", genai.openai.object$api), + "OpenAI-Organization" = genai.openai.object$organization.id + ) + ) + } + responseJSON = httr::content(response, "parsed") + + # Check for response error + if (!is.null(responseJSON$error)) { + stop(responseJSON$error$message) + } + + # Print detail if verbose is TRUE + if (verbose) { + genai.openai.formated.confguration(requestBody, prompt) + cat("=============================================================================\n") + cat(" Image Path\n") + cat("-----------------------------------------------------------------------------\n") + cat(paste(strwrap(image.path, width = 76, exdent = 0), collapse = "\n")) + cat("\n") + cat("=============================================================================\n\n\n\n") + cat("\n") + } + + # Get the response text + return (responseJSON$choices[[1]]$message$content) +} diff --git a/R/src/R/genai.openai.utils.R b/R/src/R/genai.openai.utils.R new file mode 100644 index 0000000..d22dcfa --- /dev/null +++ b/R/src/R/genai.openai.utils.R @@ -0,0 +1,233 @@ +#' @noRd +genai.openai.check = function(api, model, version, proxy, organization.id) { + json.data = jsonlite::fromJSON("https://genai.gd.edu.kg/model.json") + if (is.na(match(model, json.data$openai$model))) { + stop( + "Invalid value for model. Refer to 'available.models()' to view the supported models." + ) + } + if (is.na(match(version, json.data$openai$version))) { + stop( + "Invalid value for version. Refer to 'available.models()' to view the supported versions." + ) + } + if (!proxy %in% c(TRUE, FALSE)) { + stop("Invalid value for proxy. It must be either TRUE or FALSE.") + } + if (!is.null(organization.id) && !is.character(organization.id)) { + stop("Invalid value for organization.id. It must be either NULL (by default) or a character string.") + } + + # Check connection + api.url = paste0( + "https://api.openai.com/", + version, + "/models" + ) + if (proxy) { + api.url = paste0( + "https://api.genai.gd.edu.kg/openai/", + version, + "/models") + } + response = httr::GET(url = api.url, + httr::add_headers( + "Content-Type" = "application/json", + "Authorization" = paste("Bearer", api) + ) + ) + if (!is.null(organization.id) && is.character(organization.id)) { + response = httr::GET(url = api.url, + httr::add_headers( + "Content-Type" = "application/json", + "Authorization" = paste("Bearer", api), + "OpenAI-Organization" = organization.id + ) + ) + } + responseJSON = httr::content(response, "parsed") + if (!is.null(responseJSON$error)) { + stop(responseJSON$error$message) + } + if (response$status_code != 200) { + stop( + "Invalid parameter(s) detected. Please check the values for api, model, version, and proxy." + ) + } +} + +#' @noRd +genai.openai.config.check = function(config) { + if (!is.list(config)) { + stop("Invalid configuration. It must be a list.") + } + config.names = c( + "frequency.penalty", + "logit.bias", + "logprobs", + "top.logprobs", + "max.tokens", + "presence.penalty", + "response.format", + "seed", + "stop", + "temperature", + "top.p", + "tools", + "tool.choice", + "user" + ) + wrong.config = setdiff(names(config), config.names) + if (length(wrong.config) > 0) { + stop("Invalid configuration(s) detected: ", + paste0(wrong.config, collapse = ", ")) + } + if (length(unique(names(config))) != length(names(config))) { + stop("Invalid configurations. Duplicate parameters detected.") + } +} + +#' @noRd +genai.openai.img.config.check = function(config) { + if (!is.list(config)) { + stop("Invalid configuration. It must be a list.") + } + config.names = c( + "quality", + "size", + "style", + "user" + ) + wrong.config = setdiff(names(config), config.names) + if (length(wrong.config) > 0) { + stop("Invalid configuration(s) detected: ", + paste0(wrong.config, collapse = ", ")) + } + if (length(unique(names(config))) != length(names(config))) { + stop("Invalid configurations. Duplicate parameters detected.") + } +} + +#' @noRd +genai.openai.generation.config = function(requestBody, config) { + config.names = c( + frequency.penalty = "frequency_penalty", + logit.bias = "logit_bias", + logprobs = "logprobs", + top.logprobs = "top_logprobs", + max.tokens = "max_tokens", + presence.penalty = "presence_penalty", + response.format = "response_format", + seed = "seed", + stop = "stop", + temperature = "temperature", + top.p = "top_p", + tools = "tools", + tool.choice = "tool_choice", + user = "user" + ) + for (param_name in names(config)) { + if (!is.null(config[[param_name]])) { + requestBody[[config.names[param_name]]] = config[[param_name]] + } + } + return(requestBody) +} + +#' @noRd +genai.openai.img.generation.config = function(requestBody, config) { + config.names = c( + quality = "quality", + size = "size", + style = "style", + user = "user" + ) + for (param_name in names(config)) { + if (!is.null(config[[param_name]])) { + requestBody[[config.names[param_name]]] = config[[param_name]] + } + } + return(requestBody) +} + +#' @noRd +genai.openai.formated.confguration = function(request.body, prompt) { + config.names = c( + frequency.penalty = "frequency_penalty", + logit.bias = "logit_bias", + logprobs = "logprobs", + top.logprobs = "top_logprobs", + max.tokens = "max_tokens", + presence.penalty = "presence_penalty", + response.format = "response_format", + seed = "seed", + stop = "stop", + temperature = "temperature", + top.p = "top_p", + tools = "tools", + tool.choice = "tool_choice", + user = "user" + ) + intersect.param = intersect(names(request.body), config.names) + if (length(intersect.param) > 0) { + cat("=============================================================================\n") + cat(" Generation Configuration\n") + cat("-----------------------------------------------------------------------------\n") + for (param in intersect.param) { + if (is.list(request.body[[param]])) { + cat("stop:", + paste0(request.body[[param]], + collapse = ", "), + "\n") + } + else { + cat(paste0(param, ":"), + request.body[[param]], + "\n") + } + } + cat("=============================================================================\n\n\n\n") + } + cat("=============================================================================\n") + cat(" Prompt\n") + cat("-----------------------------------------------------------------------------\n") + cat(paste(strwrap(prompt, width = 76, exdent = 0), collapse = "\n")) + cat("\n") + cat("=============================================================================\n\n\n\n") +} + +#' @noRd +genai.openai.img.formated.confguration = function(request.body, prompt) { + config.names = c( + quality = "quality", + size = "size", + style = "style", + user = "user" + ) + intersect.param = intersect(names(request.body), config.names) + if (length(intersect.param) > 0) { + cat("=============================================================================\n") + cat(" Generation Configuration\n") + cat("-----------------------------------------------------------------------------\n") + for (param in intersect.param) { + if (is.list(request.body[[param]])) { + cat("stop:", + paste0(request.body[[param]], + collapse = ", "), + "\n") + } + else { + cat(paste0(param, ":"), + request.body[[param]], + "\n") + } + } + cat("=============================================================================\n\n\n\n") + } + cat("=============================================================================\n") + cat(" Prompt\n") + cat("-----------------------------------------------------------------------------\n") + cat(paste(strwrap(prompt, width = 76, exdent = 0), collapse = "\n")) + cat("\n") + cat("=============================================================================\n\n\n\n") +} diff --git a/R/src/R/genai.utils.R b/R/src/R/genai.utils.R new file mode 100644 index 0000000..7fae9a4 --- /dev/null +++ b/R/src/R/genai.utils.R @@ -0,0 +1,11 @@ +#' @noRd +image.to.data.uri = function(image.path) { + image.data = "" + if (grepl("^https?://", tolower(image.path))) { + response = httr::GET(image.path) + image.data = base64enc::base64encode(httr::content(response, type = "raw")) + } else { + image.data = base64enc::base64encode(readBin(image.path, "raw", file.info(image.path)$size)) + } + return(c(tools::file_ext(image.path), image.data)) +} diff --git a/R/src/R/img.R b/R/src/R/img.R new file mode 100644 index 0000000..8fb9dc7 --- /dev/null +++ b/R/src/R/img.R @@ -0,0 +1,83 @@ +#' Image Generation with Text as the Input +#' +#' This function establishes a connection to a generative AI model through a generative AI object. +#' It generates an image response based on the provided prompt. +#' +#' @param genai.object A generative AI object containing necessary and correct information. +#' @param prompt A character string representing the query for image generation. +#' @param verbose Optional. Default to \code{FALSE}. A boolean value determining whether or not to print +#' out the details of the image request. +#' @param config Optional. Default to \code{list()}. A list of configuration parameters for image generation. +#' +#' @return If successful, a image in \code{ggplot} format will be returned. If the API response indicates +#' an error, the function halts execution and provides an error message. +#' +#' @details Providing accurate and valid information for each argument is crucial for successful image +#' generation by the generative AI model. If any parameter is incorrect, the function responds with an +#' error message based on the API feedback. To view all supported generative AI models, use the +#' function \code{\link{available.models}}. +#' +#' This function is only available when using OpenAI's models. +#' +#' For \strong{OpenAI} models, available configurations are as follows. For more detail, please refer to +#' \code{https://platform.openai.com/docs/api-reference/images/create}. +#' +#' \itemize{ +#' \item \code{quality} +#' +#' Optional. A character string. The quality of the image that will be generated. \code{hd} creates +#' images with finer details and greater consistency across the image. +#' +#' \item \code{size} +#' +#' Optional. A character string. The size of the generated images. Must be one of \code{256x256}, +#' \code{512x512}, or \code{1024x1024} for \code{dall-e-2}. Must be one of \code{1024x1024}, \code{1792x1024}, or +#' \code{1024x1792} for \code{dall-e-3} models. +#' +#' \item \code{style} +#' +#' Optional. The style of the generated images. Must be one of \code{vivid} or \code{natural}. Vivid causes +#' the model to lean towards generating hyper-real and dramatic images. Natural causes the model to produce +#' more natural, less hyper-real looking images. +#' +#' \item \code{user} +#' +#' Optional. A character string. A unique identifier representing your end-user, which can help OpenAI to monitor +#' and detect abuse. +#' } +#' +#' @seealso +#' \href{https://genai.gd.edu.kg/r/documentation/}{GenAI - R Package "GenAI" Documentation} +#' +#' \href{https://colab.research.google.com/github/GitData-GA/GenAI/blob/gh-pages/r/example/img.ipynb}{Live Demo in Colab} +#' +#' @examples +#' \dontrun{ +#' # Assuming there is a GenAI object named 'genai.model' supporting this +#' # function, please refer to the "Live Demo in Colab" above for real +#' # examples. The following examples are just some basic guidelines. +#' +#' # Method 1 (recommended): use the pipe operator "%>%" +#' generated.image = genai.model %>% +#' img(prompt = "A very cute panda eating banboo.") +#' generated.image +#' +#' # Method 2: use the reference operator "$" +#' generated.image = genai.model$img(prompt = "A very cute sea otter on a rock.") +#' generated.image +#' +#' # Method 3: use the function img() directly +#' generated.image = img(genai.object = genai.model, +#' prompt = "A very cute bear.") +#' generated.image +#' } +#' +#' @export +img = function(genai.object, + prompt, + verbose = FALSE, + config = list()) { + genai.object$img(prompt, + verbose, + config) +} diff --git a/R/src/R/imports.R b/R/src/R/imports.R new file mode 100644 index 0000000..849fdbd --- /dev/null +++ b/R/src/R/imports.R @@ -0,0 +1,8 @@ +#' @import R6 +#' @importFrom jsonlite toJSON +#' @importFrom httr GET POST add_headers content +#' @importFrom listenv listenv +#' @importFrom magrittr %>% +#' @importFrom magick image_read +#' @importFrom ggplotify as.ggplot +NULL diff --git a/R/src/R/txt.R b/R/src/R/txt.R new file mode 100644 index 0000000..f550c46 --- /dev/null +++ b/R/src/R/txt.R @@ -0,0 +1,205 @@ +#' Text Generation with Text as the Input +#' +#' This function establishes a connection to a generative AI model through a generative AI object. +#' It generates a text response based on the provided prompt. +#' +#' @param genai.object A generative AI object containing necessary and correct information. +#' @param prompt A character string representing the query for text generation. +#' @param verbose Optional. Default to \code{FALSE}. A boolean value determining whether or not to print +#' out the details of the text request. +#' @param config Optional. Default to \code{list()}. A list of configuration parameters for text generation. +#' +#' @return If successful, a text response will be returned. If the API response indicates +#' an error, the function halts execution and provides an error message. +#' +#' @details Providing accurate and valid information for each argument is crucial for successful text +#' generation by the generative AI model. If any parameter is incorrect, the function responds with an +#' error message based on the API feedback. To view all supported generative AI models, use the +#' function \code{\link{available.models}}. +#' +#' For \strong{Google Generative AI} models, available configurations are as follows. For more detail, +#' please refer +#' to \code{https://ai.google.dev/api/rest/v1/HarmCategory}, +#' \code{https://ai.google.dev/api/rest/v1/SafetySetting}, and +#' \code{https://ai.google.dev/api/rest/v1/GenerationConfig}. +#' +#' \itemize{ +#' \item \code{harm.category.dangerous.content} +#' +#' Optional. An integer, from 1 to 5 inclusive, representing the threshold for dangerous content, +#' with a higher value representing a lower probability of being blocked. +#' +#' \item \code{harm.category.harassment} +#' +#' Optional. An integer, from 1 to 5 inclusive, representing the threshold for harasment content, +#' with a higher value representing a lower probability of being blocked. +#' +#' \item \code{harm.category.hate.speech} +#' +#' Optional. An integer, from 1 to 5 inclusive, representing the threshold for hate speech and +#' content, with a higher value representing a lower probability of being blocked. +#' +#' \item \code{harm.category.sexually.explicit} +#' +#' Optional. An integer, from 1 to 5 inclusive, representing the threshold for sexually explicit +#' content, with a higher value representing a lower probability of being blocked. +#' +#' \item \code{stop.sequences} +#' +#' Optional. A list of character sequences (up to 5) that will stop output generation. If specified, +#' the API will stop at the first appearance of a stop sequence. The stop sequence will not be +#' included as part of the response. +#' +#' \item \code{max.output.tokens} +#' +#' Optional. An integer, value varies by model, representing maximum number of tokens to include +#' in a candidate. +#' +#' \item \code{temperature} +#' +#' Optional. A number, from 0.0 to 1.0 inclusive, controlling the randomness of the output. +#' +#' \item \code{top.p} +#' +#' Optional. A number, value varies by model, representing maximum cumulative probability of tokens +#' to consider when sampling. +#' +#' \item \code{top.k} +#' +#' Optional. A number, value varies by model, representing maximum number of tokens to consider when sampling. +#' } +#' +#' For \strong{Moonshot AI} models, available configurations are as follows. For more detail, please refer to +#' \code{https://platform.moonshot.cn/api.html#chat-completion}. +#' +#' \itemize{ +#' \item \code{max.tokens} +#' +#' Optional. An integer. The maximum number of tokens that will be generated when the chat completes. +#' If the chat is not finished by the maximum number of tokens generated, the finish reason will be +#' "length", otherwise it will be "stop". +#' +#' \item \code{temperature} +#' +#' Optional. A number. What sampling temperature to use, between 0 and 1. Higher values (e.g. 0.7) will +#' make the output more random, while lower values (e.g. 0.2) will make it more focused and deterministic. +#' +#' \item \code{top.p} +#' +#' Optional. A number. Another sampling temperature. +#' } +#' +#' For \strong{OpenAI} models, available configurations are as follows. For more detail, please refer to +#' \code{https://platform.openai.com/docs/api-reference/chat/create}. +#' +#' \itemize{ +#' \item \code{frequency.penalty} +#' +#' Optional. A number from -2.0 to 2.0 inclusive. Positive values penalize new tokens based on their +#' existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim. +#' +#' \item \code{logit.bias} +#' +#' Optional. A map. Modify the likelihood of specified tokens appearing in the completion. Accepts a JSON object +#' that maps tokens (specified by their token ID in the tokenizer) to an associated bias value from -100 to +#' 100. Mathematically, the bias is added to the logits generated by the model prior to sampling. The exact +#' effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection; +#' values like -100 or 100 should result in a ban or exclusive selection of the relevant token. +#' +#' \item \code{logprobs} +#' +#' Optional. A boolean value. Whether to return log probabilities of the output tokens or not. If true, returns the log +#' probabilities of each output token returned in the content of message +#' +#' \item \code{top.logprobs} +#' +#' Optional. An integer between 0 and 5 specifying the number of most likely tokens to return at each token +#' position, each with an associated log probability. \code{logprobs} must be set to \code{TRUE} if this +#' parameter is used. +#' +#' \item \code{max.tokens} +#' +#' Optional. An integer. The maximum number of tokens that can be generated in the chat completion. The total length of +#' input tokens and generated tokens is limited by the model's context length. +#' +#' \item \code{presence.penalty} +#' +#' Optional. A Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear +#' in the text so far, increasing the model's likelihood to talk about new topics. +#' +#' \item \code{response.format} +#' +#' Optional. An object specifying the format that the model must output. Compatible with GPT-4 Turbo and +#' all GPT-3.5 Turbo models newer than \code{gpt-3.5-turbo-1106}. +#' +#' \item \code{seed} +#' +#' Optional. An integer. If specified, our system will make a best effort to sample deterministically, such that repeated +#' requests with the same seed and parameters should return the same result. +#' +#' \item \code{stop} +#' +#' Optional. A character string or list contains up to 4 sequences where the API will stop generating further tokens. +#' +#' \item \code{temperature} +#' +#' Optional. A number. What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output +#' more random, while lower values like 0.2 will make it more focused and deterministic. +#' +#' \item \code{top.p} +#' +#' Optional. A number. An alternative to sampling with temperature, called nucleus sampling, where the model considers +#' the results of the tokens with \code{top.p} probability mass. So 0.1 means only the tokens comprising the top +#' 10% probability mass are considered. +#' +#' \item \code{tools} +#' +#' Optional. A list of tools the model may call. Currently, only functions are supported as a tool. Use this +#' to provide a list of functions the model may generate JSON inputs for. +#' +#' \item \code{tool.choice} +#' +#' Optional. A character string or object. Controls which (if any) function is called by the model. \code{none} means +#' the model will not call a function and instead generates a message. \code{auto} means the model can pick +#' between generating a message or calling a function. +#' +#' \item \code{user} +#' +#' Optional. A character string. A unique identifier representing your end-user, which can help OpenAI to monitor +#' and detect abuse. +#' } +#' +#' @seealso +#' \href{https://genai.gd.edu.kg/r/documentation/}{GenAI - R Package "GenAI" Documentation} +#' +#' \href{https://colab.research.google.com/github/GitData-GA/GenAI/blob/gh-pages/r/example/txt.ipynb}{Live Demo in Colab} +#' +#' @examples +#' \dontrun{ +#' # Assuming there is a GenAI object named 'genai.model' supporting this +#' # function, please refer to the "Live Demo in Colab" above for real +#' # examples. The following examples are just some basic guidelines. +#' +#' # Method 1 (recommended): use the pipe operator "%>%" +#' genai.model %>% +#' txt(prompt = "Write a story about Mars in 50 words.") %>% +#' cat() +#' +#' # Method 2: use the reference operator "$" +#' cat(genai.model$txt(prompt = "Write a story about Jupiter in 50 words.")) +#' +#' # Method 3: use the function txt() directly +#' # Set verbose to TRUE to see the detail +#' cat(txt(genai.object = genai.model, +#' prompt = "Write a story about Earth in 50 words.")) +#' } +#' +#' @export +txt = function(genai.object, + prompt, + verbose = FALSE, + config = list()) { + genai.object$txt(prompt, + verbose, + config) +} diff --git a/R/src/R/txt.image.R b/R/src/R/txt.image.R new file mode 100644 index 0000000..66d2d86 --- /dev/null +++ b/R/src/R/txt.image.R @@ -0,0 +1,193 @@ +#' Text Generation with Text and Image as the Input +#' +#' This function establishes a connection to a generative AI model through a generative AI object. +#' It generates a text response based on the provided prompt. +#' +#' @param genai.object A generative AI object containing necessary and correct information. +#' @param prompt A character string representing the query for text generation. +#' @param image.path A character string representing the path to the image. It should be a link +#' starting with \code{https}/\code{http} or a local directory path to an image. +#' @param verbose Optional. Default to \code{FALSE}. A boolean value determining whether or not to print +#' out the details of the text request. +#' @param config Optional. Default to \code{list()}. A list of configuration parameters for text generation. +#' +#' @return If successful, a text response will be returned. If the API response indicates +#' an error, the function halts execution and provides an error message. +#' +#' @details Providing accurate and valid information for each argument is crucial for successful text +#' generation by the generative AI model. If any parameter is incorrect, the function responds with an +#' error message based on the API feedback. To view all supported generative AI models, use the +#' function \code{\link{available.models}}. +#' +#' For \strong{Google Generative AI} models, available configurations are as follows. For more detail, +#' please refer +#' to \code{https://ai.google.dev/api/rest/v1/HarmCategory}, +#' \code{https://ai.google.dev/api/rest/v1/SafetySetting}, and +#' \code{https://ai.google.dev/api/rest/v1/GenerationConfig}. +#' +#' \itemize{ +#' \item \code{harm.category.dangerous.content} +#' +#' Optional. An integer, from 1 to 5 inclusive, representing the threshold for dangerous content, +#' with a higher value representing a lower probability of being blocked. +#' +#' \item \code{harm.category.harassment} +#' +#' Optional. An integer, from 1 to 5 inclusive, representing the threshold for harasment content, +#' with a higher value representing a lower probability of being blocked. +#' +#' \item \code{harm.category.hate.speech} +#' +#' Optional. An integer, from 1 to 5 inclusive, representing the threshold for hate speech and +#' content, with a higher value representing a lower probability of being blocked. +#' +#' \item \code{harm.category.sexually.explicit} +#' +#' Optional. An integer, from 1 to 5 inclusive, representing the threshold for sexually explicit +#' content, with a higher value representing a lower probability of being blocked. +#' +#' \item \code{stop.sequences} +#' +#' Optional. A list of character sequences (up to 5) that will stop output generation. If specified, +#' the API will stop at the first appearance of a stop sequence. The stop sequence will not be +#' included as part of the response. +#' +#' \item \code{max.output.tokens} +#' +#' Optional. An integer, value varies by model, representing maximum number of tokens to include +#' in a candidate. +#' +#' \item \code{temperature} +#' +#' Optional. A number, from 0.0 to 1.0 inclusive, controlling the randomness of the output. +#' +#' \item \code{top.p} +#' +#' Optional. A number, value varies by model, representing maximum cumulative probability of tokens +#' to consider when sampling. +#' +#' \item \code{top.k} +#' +#' Optional. A number, value varies by model, representing maximum number of tokens to consider when sampling. +#' } +#' +#' For \strong{OpenAI} models, available configurations are as follows. For more detail, please refer to +#' \code{https://platform.openai.com/docs/api-reference/chat/create}. +#' +#' \itemize{ +#' \item \code{frequency.penalty} +#' +#' Optional. A number from -2.0 to 2.0 inclusive. Positive values penalize new tokens based on their +#' existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim. +#' +#' \item \code{logit.bias} +#' +#' Optional. A map. Modify the likelihood of specified tokens appearing in the completion. Accepts a JSON object +#' that maps tokens (specified by their token ID in the tokenizer) to an associated bias value from -100 to +#' 100. Mathematically, the bias is added to the logits generated by the model prior to sampling. The exact +#' effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection; +#' values like -100 or 100 should result in a ban or exclusive selection of the relevant token. +#' +#' \item \code{logprobs} +#' +#' Optional. A boolean value. Whether to return log probabilities of the output tokens or not. If true, returns the log +#' probabilities of each output token returned in the content of message +#' +#' \item \code{top.logprobs} +#' +#' Optional. An integer between 0 and 5 specifying the number of most likely tokens to return at each token +#' position, each with an associated log probability. \code{logprobs} must be set to \code{TRUE} if this +#' parameter is used. +#' +#' \item \code{max.tokens} +#' +#' Optional. An integer. The maximum number of tokens that can be generated in the chat completion. The total length of +#' input tokens and generated tokens is limited by the model's context length. +#' +#' \item \code{presence.penalty} +#' +#' Optional. A Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear +#' in the text so far, increasing the model's likelihood to talk about new topics. +#' +#' \item \code{response.format} +#' +#' Optional. An object specifying the format that the model must output. Compatible with GPT-4 Turbo and +#' all GPT-3.5 Turbo models newer than \code{gpt-3.5-turbo-1106}. +#' +#' \item \code{seed} +#' +#' Optional. An integer. If specified, our system will make a best effort to sample deterministically, such that repeated +#' requests with the same seed and parameters should return the same result. +#' +#' \item \code{stop} +#' +#' Optional. A character string or list contains up to 4 sequences where the API will stop generating further tokens. +#' +#' \item \code{temperature} +#' +#' Optional. A number. What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output +#' more random, while lower values like 0.2 will make it more focused and deterministic. +#' +#' \item \code{top.p} +#' +#' Optional. A number. An alternative to sampling with temperature, called nucleus sampling, where the model considers +#' the results of the tokens with \code{top.p} probability mass. So 0.1 means only the tokens comprising the top +#' 10% probability mass are considered. +#' +#' \item \code{tools} +#' +#' Optional. A list of tools the model may call. Currently, only functions are supported as a tool. Use this +#' to provide a list of functions the model may generate JSON inputs for. +#' +#' \item \code{tool.choice} +#' +#' Optional. A character string or object. Controls which (if any) function is called by the model. \code{none} means +#' the model will not call a function and instead generates a message. \code{auto} means the model can pick +#' between generating a message or calling a function. +#' +#' \item \code{user} +#' +#' Optional. A character string. A unique identifier representing your end-user, which can help OpenAI to monitor +#' and detect abuse. +#' } +#' +#' @seealso +#' \href{https://genai.gd.edu.kg/r/documentation/}{GenAI - R Package "GenAI" Documentation} +#' +#' \href{https://colab.research.google.com/github/GitData-GA/GenAI/blob/gh-pages/r/example/txt_image.ipynb}{Live Demo in Colab} +#' +#' @examples +#' \dontrun{ +#' # Assuming there is a GenAI object named 'genai.model' supporting this +#' # function, an image in your current directory named 'example.png', and +#' # an online image 'https://example.com/example.png/', please refer to +#' # the "Live Demo in Colab" above for real examples. The following examples +#' # are just some basic guidelines. +#' +#' # Method 1 (recommended): use the pipe operator "%>%" +#' genai.model %>% +#' txt.image(prompt = "Please describe the following image.", +#' image.path = "https://example.com/example.png/") %>% +#' cat() +#' +#' # Method 2: use the reference operator "$" +#' cat(genai.model$txt.image(prompt = "Please describe the following image.", +#' image.path = "https://example.com/example.png/")) +#' +#' # Method 3: use the function txt.image() directly +#' cat(txt.image(genai.object = genai.model, +#' prompt = "Please describe the following image.", +#' image.path = "example.png")) +#' } +#' +#' @export +txt.image = function(genai.object, + prompt, + image.path, + verbose = FALSE, + config = list()) { + genai.object$txt.image(prompt, + image.path, + verbose, + config) +} diff --git a/R/src/man/available.models.Rd b/R/src/man/available.models.Rd new file mode 100644 index 0000000..a857b0d --- /dev/null +++ b/R/src/man/available.models.Rd @@ -0,0 +1,37 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/available.models.R +\name{available.models} +\alias{available.models} +\title{Get Supported Generative AI Models} +\usage{ +available.models() +} +\value{ +If successful, the function returns a list containing generative AI +service providers and their corresponding models. If the function encounters an error, +it will halt execution and provide an error message. +} +\description{ +This function sends a request to GenAI database API to retrieve information +about available generative AI models. +} +\details{ +The function utilizes the GenAI database API to fetch the latest information about +available Generative AI models. The retrieved data includes details about different models +offered by various service providers. +} +\examples{ +\dontrun{ +# Assuming there is a GenAI object named 'genai.model' supporting this +# function, please refer to the "Live Demo in Colab" above for real +# examples. The following examples are just some basic guidelines. + +all.models = available.models() \%>\% print() +} + +} +\seealso{ +\href{https://genai.gd.edu.kg/r/documentation/}{GenAI - R Package "GenAI" Documentation} + +\href{https://colab.research.google.com/github/GitData-GA/GenAI/blob/gh-pages/r/example/available_models.ipynb}{Live Demo in Colab} +} diff --git a/R/src/man/chat.Rd b/R/src/man/chat.Rd new file mode 100644 index 0000000..3d92b88 --- /dev/null +++ b/R/src/man/chat.Rd @@ -0,0 +1,216 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/chat.R +\name{chat} +\alias{chat} +\title{Chat Generation with Text as the Input} +\usage{ +chat(genai.object, prompt, verbose = FALSE, config = list()) +} +\arguments{ +\item{genai.object}{A generative AI object containing necessary and correct information.} + +\item{prompt}{A character string representing the query for chat generation.} + +\item{verbose}{Optional. Default to \code{FALSE}. A boolean value determining whether or not to print +out the details of the chat request.} + +\item{config}{Optional. Default to \code{list()}. A list of configuration parameters for chat generation.} +} +\value{ +If successful, the most recent chat response will be returned. If the API response indicates +an error, the function halts execution and provides an error message. +} +\description{ +This function establishes a connection to a generative AI model through a generative AI object. +It generates a chat response based on the provided prompt and stores it in the chat history along +with the generative AI object. +} +\details{ +Providing accurate and valid information for each argument is crucial for successful chat +generation by the generative AI model. If any parameter is incorrect, the function responds with an +error message based on the API feedback. To view all supported generative AI models, use the +function \code{\link{available.models}}. + +In addition, this function modifies the chat history along with the generative AI object directly, +meaning the chat history is mutable. You can print out the chat history using the +function \code{\link{chat.history.print}} or simply use \code{verbose = TRUE} in this function. If you +want to edit a message, use the function \code{\link{chat.edit}}. To reset the chat history along with +the generative AI object, use the function \code{\link{chat.history.reset}}. + +For \strong{Google Generative AI} models, available configurations are as follows. For more detail, +please refer +to \code{https://ai.google.dev/api/rest/v1/HarmCategory}, +\code{https://ai.google.dev/api/rest/v1/SafetySetting}, and +\code{https://ai.google.dev/api/rest/v1/GenerationConfig}. + +\itemize{ + \item \code{harm.category.dangerous.content} + + Optional. An integer, from 1 to 5 inclusive, representing the threshold for dangerous content, + with a higher value representing a lower probability of being blocked. + + \item \code{harm.category.harassment} + + Optional. An integer, from 1 to 5 inclusive, representing the threshold for harasment content, + with a higher value representing a lower probability of being blocked. + + \item \code{harm.category.hate.speech} + + Optional. An integer, from 1 to 5 inclusive, representing the threshold for hate speech and + content, with a higher value representing a lower probability of being blocked. + + \item \code{harm.category.sexually.explicit} + + Optional. An integer, from 1 to 5 inclusive, representing the threshold for sexually explicit + content, with a higher value representing a lower probability of being blocked. + + \item \code{stop.sequences} + + Optional. A list of character sequences (up to 5) that will stop output generation. If specified, + the API will stop at the first appearance of a stop sequence. The stop sequence will not be + included as part of the response. + + \item \code{max.output.tokens} + + Optional. An integer, value varies by model, representing maximum number of tokens to include + in a candidate. + + \item \code{temperature} + + Optional. A number, from 0.0 to 1.0 inclusive, controlling the randomness of the output. + + \item \code{top.p} + + Optional. A number, value varies by model, representing maximum cumulative probability of tokens + to consider when sampling. + + \item \code{top.k} + + Optional. A number, value varies by model, representing maximum number of tokens to consider when sampling. +} + +For \strong{Moonshot AI} models, available configurations are as follows. For more detail, please refer to +\code{https://platform.moonshot.cn/api.html#chat-completion}. + +\itemize{ + \item \code{max.tokens} + + Optional. An integer. The maximum number of tokens that will be generated when the chat completes. + If the chat is not finished by the maximum number of tokens generated, the finish reason will be + "length", otherwise it will be "stop". + + \item \code{temperature} + + Optional. A number. What sampling temperature to use, between 0 and 1. Higher values (e.g. 0.7) will + make the output more random, while lower values (e.g. 0.2) will make it more focused and deterministic. + + \item \code{top.p} + + Optional. A number. Another sampling temperature. +} + +For \strong{OpenAI} models, available configurations are as follows. For more detail, please refer to +\code{https://platform.openai.com/docs/api-reference/chat/create}. + +\itemize{ + \item \code{frequency.penalty} + + Optional. A number from -2.0 to 2.0 inclusive. Positive values penalize new tokens based on their + existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim. + + \item \code{logit.bias} + + Optional. A map. Modify the likelihood of specified tokens appearing in the completion. Accepts a JSON object + that maps tokens (specified by their token ID in the tokenizer) to an associated bias value from -100 to + 100. Mathematically, the bias is added to the logits generated by the model prior to sampling. The exact + effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection; + values like -100 or 100 should result in a ban or exclusive selection of the relevant token. + + \item \code{logprobs} + + Optional. A boolean value. Whether to return log probabilities of the output tokens or not. If true, returns the log + probabilities of each output token returned in the content of message + + \item \code{top.logprobs} + + Optional. An integer between 0 and 5 specifying the number of most likely tokens to return at each token + position, each with an associated log probability. \code{logprobs} must be set to \code{TRUE} if this + parameter is used. + + \item \code{max.tokens} + + Optional. An integer. The maximum number of tokens that can be generated in the chat completion. The total length of + input tokens and generated tokens is limited by the model's context length. + + \item \code{presence.penalty} + + Optional. A Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear + in the text so far, increasing the model's likelihood to talk about new topics. + + \item \code{response.format} + + Optional. An object specifying the format that the model must output. Compatible with GPT-4 Turbo and + all GPT-3.5 Turbo models newer than \code{gpt-3.5-turbo-1106}. + + \item \code{seed} + + Optional. An integer. If specified, our system will make a best effort to sample deterministically, such that repeated + requests with the same seed and parameters should return the same result. + + \item \code{stop} + + Optional. A character string or list contains up to 4 sequences where the API will stop generating further tokens. + + \item \code{temperature} + + Optional. A number. What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output + more random, while lower values like 0.2 will make it more focused and deterministic. + + \item \code{top.p} + + Optional. A number. An alternative to sampling with temperature, called nucleus sampling, where the model considers + the results of the tokens with \code{top.p} probability mass. So 0.1 means only the tokens comprising the top + 10% probability mass are considered. + + \item \code{tools} + + Optional. A list of tools the model may call. Currently, only functions are supported as a tool. Use this + to provide a list of functions the model may generate JSON inputs for. + + \item \code{tool.choice} + + Optional. A character string or object. Controls which (if any) function is called by the model. \code{none} means + the model will not call a function and instead generates a message. \code{auto} means the model can pick + between generating a message or calling a function. + + \item \code{user} + + Optional. A character string. A unique identifier representing your end-user, which can help OpenAI to monitor + and detect abuse. +} +} +\examples{ +\dontrun{ +# Assuming there is a GenAI object named 'genai.model' supporting this +# function, please refer to the "Live Demo in Colab" above for real +# examples. The following examples are just some basic guidelines. + +# Method 1 (recommended): use the pipe operator "\%>\%" +genai.model \%>\% + chat(prompt = "Write a story about Mars in 50 words.") \%>\% + cat() + +# Method 2: use the reference operator "$" +cat(genai.model$chat(prompt = "Write a story about Jupiter in 50 words.")) + +# Method 3: use the function chat() directly +cat(chat(genai.object = genai.model, + prompt = "Summarize the chat.")) +} + +} +\seealso{ +\href{https://genai.gd.edu.kg/r/documentation/}{GenAI - R Package "GenAI" Documentation} + +\href{https://colab.research.google.com/github/GitData-GA/GenAI/blob/gh-pages/r/example/chat.ipynb}{Live Demo in Colab} +} diff --git a/R/src/man/chat.edit.Rd b/R/src/man/chat.edit.Rd new file mode 100644 index 0000000..ae3004e --- /dev/null +++ b/R/src/man/chat.edit.Rd @@ -0,0 +1,228 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/chat.edit.R +\name{chat.edit} +\alias{chat.edit} +\title{Chat Edit with New Text as the Input} +\usage{ +chat.edit( + genai.object, + prompt, + message.to.edit, + verbose = FALSE, + config = list() +) +} +\arguments{ +\item{genai.object}{A generative AI object containing necessary and correct information.} + +\item{prompt}{A character string representing the query for chat generation.} + +\item{message.to.edit}{An integer representing the index of the message to be edited.} + +\item{verbose}{Optional. Default to \code{FALSE}. A boolean value determining whether or not to print +out the details of the chat request.} + +\item{config}{Optional. Default to \code{list()}. A list of configuration parameters for chat generation.} +} +\value{ +If successful, the most recent chat response will be returned. If the API response indicates +an error, the function halts execution and provides an error message. +} +\description{ +This function establishes a connection to a generative AI model through a generative AI object. +It generates a chat response based on the new prompt and stores it in the chat history along +with the generative AI object. +} +\details{ +Providing accurate and valid information for each argument is crucial for successful chat +generation by the generative AI model. If any parameter is incorrect, the function responds with an +error message based on the API feedback. To view all supported generative AI models, use the +function \code{\link{available.models}}. + +In addition, this function modifies the chat history along with the generative AI object directly, +meaning the chat history is mutable. You can print out the chat history using the +function \code{\link{chat.history.print}} or simply use \code{verbose = TRUE} in this function. To reset the chat history along with +the chat history along with the generative AI object, use the function \code{\link{chat.history.reset}}. + +For \strong{Google Generative AI} models, available configurations are as follows. For more detail, +please refer +to \code{https://ai.google.dev/api/rest/v1/HarmCategory}, +\code{https://ai.google.dev/api/rest/v1/SafetySetting}, and +\code{https://ai.google.dev/api/rest/v1/GenerationConfig}. + +\itemize{ + \item \code{harm.category.dangerous.content} + + Optional. An integer, from 1 to 5 inclusive, representing the threshold for dangerous content, + with a higher value representing a lower probability of being blocked. + + \item \code{harm.category.harassment} + + Optional. An integer, from 1 to 5 inclusive, representing the threshold for harasment content, + with a higher value representing a lower probability of being blocked. + + \item \code{harm.category.hate.speech} + + Optional. An integer, from 1 to 5 inclusive, representing the threshold for hate speech and + content, with a higher value representing a lower probability of being blocked. + + \item \code{harm.category.sexually.explicit} + + Optional. An integer, from 1 to 5 inclusive, representing the threshold for sexually explicit + content, with a higher value representing a lower probability of being blocked. + + \item \code{stop.sequences} + + Optional. A list of character sequences (up to 5) that will stop output generation. If specified, + the API will stop at the first appearance of a stop sequence. The stop sequence will not be + included as part of the response. + + \item \code{max.output.tokens} + + Optional. An integer, value varies by model, representing maximum number of tokens to include + in a candidate. + + \item \code{temperature} + + Optional. A number, from 0.0 to 1.0 inclusive, controlling the randomness of the output. + + \item \code{top.p} + + Optional. A number, value varies by model, representing maximum cumulative probability of tokens + to consider when sampling. + + \item \code{top.k} + + Optional. A number, value varies by model, representing maximum number of tokens to consider when sampling. +} + +For \strong{Moonshot AI} models, available configurations are as follows. For more detail, please refer to +\code{https://platform.moonshot.cn/api.html#chat-completion}. + +\itemize{ + \item \code{max.tokens} + + Optional. An integer. The maximum number of tokens that will be generated when the chat completes. + If the chat is not finished by the maximum number of tokens generated, the finish reason will be + "length", otherwise it will be "stop". + + \item \code{temperature} + + Optional. A number. What sampling temperature to use, between 0 and 1. Higher values (e.g. 0.7) will + make the output more random, while lower values (e.g. 0.2) will make it more focused and deterministic. + + \item \code{top.p} + + Optional. A number. Another sampling temperature. +} + +For \strong{OpenAI} models, available configurations are as follows. For more detail, please refer to +\code{https://platform.openai.com/docs/api-reference/chat/create}. + +\itemize{ + \item \code{frequency.penalty} + + Optional. A number from -2.0 to 2.0 inclusive. Positive values penalize new tokens based on their + existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim. + + \item \code{logit.bias} + + Optional. A map. Modify the likelihood of specified tokens appearing in the completion. Accepts a JSON object + that maps tokens (specified by their token ID in the tokenizer) to an associated bias value from -100 to + 100. Mathematically, the bias is added to the logits generated by the model prior to sampling. The exact + effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection; + values like -100 or 100 should result in a ban or exclusive selection of the relevant token. + + \item \code{logprobs} + + Optional. A boolean value. Whether to return log probabilities of the output tokens or not. If true, returns the log + probabilities of each output token returned in the content of message + + \item \code{top.logprobs} + + Optional. An integer between 0 and 5 specifying the number of most likely tokens to return at each token + position, each with an associated log probability. \code{logprobs} must be set to \code{TRUE} if this + parameter is used. + + \item \code{max.tokens} + + Optional. An integer. The maximum number of tokens that can be generated in the chat completion. The total length of + input tokens and generated tokens is limited by the model's context length. + + \item \code{presence.penalty} + + Optional. A Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear + in the text so far, increasing the model's likelihood to talk about new topics. + + \item \code{response.format} + + Optional. An object specifying the format that the model must output. Compatible with GPT-4 Turbo and + all GPT-3.5 Turbo models newer than \code{gpt-3.5-turbo-1106}. + + \item \code{seed} + + Optional. An integer. If specified, our system will make a best effort to sample deterministically, such that repeated + requests with the same seed and parameters should return the same result. + + \item \code{stop} + + Optional. A character string or list contains up to 4 sequences where the API will stop generating further tokens. + + \item \code{temperature} + + Optional. A number. What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output + more random, while lower values like 0.2 will make it more focused and deterministic. + + \item \code{top.p} + + Optional. A number. An alternative to sampling with temperature, called nucleus sampling, where the model considers + the results of the tokens with \code{top.p} probability mass. So 0.1 means only the tokens comprising the top + 10% probability mass are considered. + + \item \code{tools} + + Optional. A list of tools the model may call. Currently, only functions are supported as a tool. Use this + to provide a list of functions the model may generate JSON inputs for. + + \item \code{tool.choice} + + Optional. A character string or object. Controls which (if any) function is called by the model. \code{none} means + the model will not call a function and instead generates a message. \code{auto} means the model can pick + between generating a message or calling a function. + + \item \code{user} + + Optional. A character string. A unique identifier representing your end-user, which can help OpenAI to monitor + and detect abuse. +} +} +\examples{ +\dontrun{ +# Assuming there is a GenAI object named 'genai.model' supporting this +# function, please refer to the "Live Demo in Colab" above for real +# examples. The following examples are just some basic guidelines. + +# Method 1 (recommended): use the pipe operator "\%>\%" +genai.model \%>\% + chat.edit(prompt = "What is XGBoost?", + message.to.edit = 5, + verbose = TRUE, + config = parameters) \%>\% + cat() + +# Method 2: use the reference operator "$" +cat(genai.model$chat.edit(prompt = "What is CatBoost?", + message.to.edit = 3)) + +# Method 3: use the function chat.edit() directly +cat(chat.edit(genai.object = genai.model, + prompt = "What is LightGBM?", + message.to.edit = 1)) +} + +} +\seealso{ +\href{https://genai.gd.edu.kg/r/documentation/}{GenAI - R Package "GenAI" Documentation} + +\href{https://colab.research.google.com/github/GitData-GA/GenAI/blob/gh-pages/r/example/chat_edit.ipynb}{Live Demo in Colab} +} diff --git a/R/src/man/chat.history.convert.Rd b/R/src/man/chat.history.convert.Rd new file mode 100644 index 0000000..792c41d --- /dev/null +++ b/R/src/man/chat.history.convert.Rd @@ -0,0 +1,51 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/chat.history.convert.R +\name{chat.history.convert} +\alias{chat.history.convert} +\title{Chat History Convert} +\usage{ +chat.history.convert(from.genai.object, to.genai.object) +} +\arguments{ +\item{from.genai.object}{A source generative AI object containing necessary and correct information.} + +\item{to.genai.object}{A target generative AI object containing necessary and correct information.} +} +\value{ +If successful, the converted chat history list will be returned. +} +\description{ +This function converts the chat history along with a generative AI object to a valid format +for another generative AI object. +} +\details{ +Providing accurate and valid information for each argument is crucial for successful chat +generation by the generative AI model. If any parameter is incorrect, the function responds with an +error message based on the API feedback. To view all supported generative AI models, use the +function \code{\link{available.models}}. Moreover, you can print out the chat history using the +function \code{\link{chat.history.print}} or simply use \code{verbose = TRUE} during the chat. +} +\examples{ +\dontrun{ +# Assuming there are two GenAI objects named 'genai.model' and 'another.genai.model' +# supporting this function, please refer to the "Live Demo in Colab" above for +# real examples. The following examples are just some basic guidelines. + +# Method 1 (recommended): use the pipe operator "\%>\%" +converted.history = genai.model \%>\% + chat.history.convert(to.genai.object = another.genai.model) + +# Method 2: use the reference operator "$" +converted.history = genai.model$chat.history.convert(to.genai.object = another.genai.model) + +# Method 3: use the function chat.history.convert() directly +converted.history = chat.history.convert(from.genai.object = genai.model, + to.genai.object = another.genai.model) +} + +} +\seealso{ +\href{https://genai.gd.edu.kg/r/documentation/}{GenAI - R Package "GenAI" Documentation} + +\href{https://colab.research.google.com/github/GitData-GA/GenAI/blob/gh-pages/r/example/chat_history_convert.ipynb}{Live Demo in Colab} +} diff --git a/R/src/man/chat.history.export.Rd b/R/src/man/chat.history.export.Rd new file mode 100644 index 0000000..3f0f0c4 --- /dev/null +++ b/R/src/man/chat.history.export.Rd @@ -0,0 +1,46 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/chat.history.export.R +\name{chat.history.export} +\alias{chat.history.export} +\title{Chat History Export} +\usage{ +chat.history.export(genai.object) +} +\arguments{ +\item{genai.object}{A generative AI object containing necessary and correct information.} +} +\value{ +If successful, the chat history list will be returned. +} +\description{ +This function exports the chat history along with a generative AI object as a list. +} +\details{ +Providing accurate and valid information for each argument is crucial for successful chat +generation by the generative AI model. If any parameter is incorrect, the function responds with an +error message based on the API feedback. To view all supported generative AI models, use the +function \code{\link{available.models}}. +} +\examples{ +\dontrun{ +# Assuming there is a GenAI object named 'genai.model' supporting this +# function, please refer to the "Live Demo in Colab" above for real +# examples. The following examples are just some basic guidelines. + +# Method 1 (recommended): use the pipe operator "\%>\%" +exported.history = genai.model \%>\% + chat.history.export() + +# Method 2: use the reference operator "$" +exported.history = genai.model$chat.history.export() + +# Method 3: use the function chat.history.export() directly +exported.history = chat.history.export(genai.object = genai.model) +} + +} +\seealso{ +\href{https://genai.gd.edu.kg/r/documentation/}{GenAI - R Package "GenAI" Documentation} + +\href{https://colab.research.google.com/github/GitData-GA/GenAI/blob/gh-pages/r/example/chat_history_export.ipynb}{Live Demo in Colab} +} diff --git a/R/src/man/chat.history.import.Rd b/R/src/man/chat.history.import.Rd new file mode 100644 index 0000000..c304db8 --- /dev/null +++ b/R/src/man/chat.history.import.Rd @@ -0,0 +1,47 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/chat.history.import.R +\name{chat.history.import} +\alias{chat.history.import} +\title{Chat History Import} +\usage{ +chat.history.import(genai.object, new.chat.history) +} +\arguments{ +\item{genai.object}{A generative AI object containing necessary and correct information.} + +\item{new.chat.history}{A list containing a chat history in correct format.} +} +\description{ +This function imports a chat history in list format to a generative AI object. +} +\details{ +Providing accurate and valid information for each argument is crucial for successful chat +generation by the generative AI model. If any parameter is incorrect, the function responds with an +error message based on the API feedback. To view all supported generative AI models, use the +function \code{\link{available.models}}. +} +\examples{ +\dontrun{ +# Assuming there is a GenAI object named 'genai.model' supporting this +# function and a valid chat history list named 'new.history', please +# refer to the "Live Demo in Colab" above for real examples. The +# following examples are just some basic guidelines. + +# Method 1 (recommended): use the pipe operator "\%>\%" +genai.model \%>\% + chat.history.import(new.chat.history = new.history) + +# Method 2: use the reference operator "$" +genai.model$chat.history.import(new.chat.history = new.history) + +# Method 3: use the function chat.history.import() directly +chat.history.import(genai.object = genai.model, + new.chat.history = new.history) +} + +} +\seealso{ +\href{https://genai.gd.edu.kg/r/documentation/}{GenAI - R Package "GenAI" Documentation} + +\href{https://colab.research.google.com/github/GitData-GA/GenAI/blob/gh-pages/r/example/chat_history_import.ipynb}{Live Demo in Colab} +} diff --git a/R/src/man/chat.history.print.Rd b/R/src/man/chat.history.print.Rd new file mode 100644 index 0000000..189c13d --- /dev/null +++ b/R/src/man/chat.history.print.Rd @@ -0,0 +1,51 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/chat.history.print.R +\name{chat.history.print} +\alias{chat.history.print} +\title{Chat History Print} +\usage{ +chat.history.print(genai.object, from = 1, to = NULL) +} +\arguments{ +\item{genai.object}{A generative AI object containing necessary and correct information.} + +\item{from}{Optional. Default to 1. An integer representing the first message in the chat history that needs +to be printed.} + +\item{to}{Optional. Default to \code{NULL}, prints until the last message in the chat history. An integer +representing the last message in the chat history that needs to be printed.} +} +\description{ +This function prints out the chat history along with a generative AI object. +} +\details{ +Providing accurate and valid information for each argument is crucial for successful chat +generation by the generative AI model. If any parameter is incorrect, the function responds with an +error message based on the API feedback. To view all supported generative AI models, use the +function \code{\link{available.models}}. +} +\examples{ +\dontrun{ +# Assuming there is a GenAI object named 'genai.model' supporting this +# function, please refer to the "Live Demo in Colab" above for real +# examples. The following examples are just some basic guidelines. + +# Method 1 (recommended): use the pipe operator "\%>\%" +genai.model \%>\% + chat.history.print() + +# Method 2: use the reference operator "$" +genai.model$chat.history.print(from = 3) + +# Method 3: use the function chat.history.print() directly +chat.history.print(genai.object = genai.model, + from = 3, + to = 5) +} + +} +\seealso{ +\href{https://genai.gd.edu.kg/r/documentation/}{GenAI - R Package "GenAI" Documentation} + +\href{https://colab.research.google.com/github/GitData-GA/GenAI/blob/gh-pages/r/example/chat_history_print.ipynb}{Live Demo in Colab} +} diff --git a/R/src/man/chat.history.reset.Rd b/R/src/man/chat.history.reset.Rd new file mode 100644 index 0000000..ea6baaf --- /dev/null +++ b/R/src/man/chat.history.reset.Rd @@ -0,0 +1,43 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/chat.history.reset.R +\name{chat.history.reset} +\alias{chat.history.reset} +\title{Chat History Reset} +\usage{ +chat.history.reset(genai.object) +} +\arguments{ +\item{genai.object}{A generative AI object containing necessary and correct information.} +} +\description{ +This function resets the chat history along with a generative AI object. +} +\details{ +Providing accurate and valid information for each argument is crucial for successful chat +generation by the generative AI model. If any parameter is incorrect, the function responds with an +error message based on the API feedback. To view all supported generative AI models, use the +function \code{\link{available.models}}. +} +\examples{ +\dontrun{ +# Assuming there is a GenAI object named 'genai.model' supporting this +# function, please refer to the "Live Demo in Colab" above for real +# examples. The following examples are just some basic guidelines. + +# Method 1 (recommended): use the pipe operator "\%>\%" +genai.model \%>\% + chat.history.reset() + +# Method 2: use the reference operator "$" +genai.model$chat.history.reset() + +# Method 3: use the function chat.history.reset() directly +chat.history.reset(genai.object = genai.model) +} + +} +\seealso{ +\href{https://genai.gd.edu.kg/r/documentation/}{GenAI - R Package "GenAI" Documentation} + +\href{https://colab.research.google.com/github/GitData-GA/GenAI/blob/gh-pages/r/example/chat_history_reset.ipynb}{Live Demo in Colab} +} diff --git a/R/src/man/chat.history.save.Rd b/R/src/man/chat.history.save.Rd new file mode 100644 index 0000000..191dd97 --- /dev/null +++ b/R/src/man/chat.history.save.Rd @@ -0,0 +1,50 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/chat.history.save.R +\name{chat.history.save} +\alias{chat.history.save} +\title{Chat History Save} +\usage{ +chat.history.save(genai.object, file.name) +} +\arguments{ +\item{genai.object}{A generative AI object containing necessary and correct information.} + +\item{file.name}{A character string representing the name of the JSON file for the chat history.} +} +\value{ +If successful, the chat history will be saved as a JSON file in your current or specified +directory. +} +\description{ +This function saves a chat history along with a generative AI object as a JSON file. +} +\details{ +Providing accurate and valid information for each argument is crucial for successful chat +generation by the generative AI model. If any parameter is incorrect, the function responds with an +error message based on the API feedback. To view all supported generative AI models, use the +function \code{\link{available.models}}. +} +\examples{ +\dontrun{ +# Assuming there is a GenAI object named 'genai.model' supporting this +# function, please refer to the "Live Demo in Colab" above for real +# examples. The following examples are just some basic guidelines. + +# Method 1 (recommended): use the pipe operator "\%>\%" +genai.model \%>\% + chat.history.save(file.name = "saved_history") + +# Method 2: use the reference operator "$" +genai.model$chat.history.save(file.name = "saved_history") + +# Method 3: use the function chat.history.save() directly +chat.history.save(genai.object = genai.model, + file.name = "saved_history") +} + +} +\seealso{ +\href{https://genai.gd.edu.kg/r/documentation/}{GenAI - R Package "GenAI" Documentation} + +\href{https://colab.research.google.com/github/GitData-GA/GenAI/blob/gh-pages/r/example/chat_history_save.ipynb}{Live Demo in Colab} +} diff --git a/R/src/man/genai.google.Rd b/R/src/man/genai.google.Rd new file mode 100644 index 0000000..e49e5d2 --- /dev/null +++ b/R/src/man/genai.google.Rd @@ -0,0 +1,67 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/genai.google.R +\name{genai.google} +\alias{genai.google} +\title{Google Generative AI Object Creation} +\usage{ +genai.google(api, model, version, proxy = FALSE) +} +\arguments{ +\item{api}{A character string representing the API key required for accessing the model.} + +\item{model}{A character string representing the specific model.} + +\item{version}{A character string representing the version of the chosen model.} + +\item{proxy}{Optional. Default to \code{FALSE}. A boolean value indicating whether to use a +proxy for accessing the API URL. If your local internet cannot access the API, set this +parameter to \code{TRUE}.} +} +\value{ +If successful, the function returns a Google generative AI object. If the API response +indicates an error, the function halts execution and provides an error message. +} +\description{ +This function establishes a connection to a Google generative AI model by providing essential +parameters. +} +\details{ +Providing accurate and valid information for each argument is crucial for successful text +generation by the generative AI model. If any parameter is incorrect, the function responds with an +error message based on the API feedback. To view all supported generative AI models, use the +function \code{\link{available.models}}. + +Please refer to \code{https://ai.google.dev/tutorials/setup} for the API key. + +The API proxy service is designed to address the needs of users who hold a valid API key but find +themselves outside their home countries or regions due to reasons such as travel, work, or study +in locations that may not be covered by certain Generative AI service providers. + +Please be aware that although GenAI and its affiliated organization - GitData - do not gather user +information through this service, the server providers for GenAI API proxy service and the Generative +AI service providers may engage in such data collection. Furthermore, the proxy service cannot +guarantee a consistent connection speed. Users are strongly encouraged to utilize this service +with caution and at their own discretion. +} +\examples{ +\dontrun{ +# Please change YOUR_GOOGLE_API to your own API key of Google Generative AI +Sys.setenv(GOOGLE_API = "YOUR_GOOGLE_API") + +all.models = available.models() \%>\% print() + +# Create a Google Generative AI object +google = genai.google(api = Sys.getenv("GOOGLE_API"), + model = all.models$google$model[1], + version = all.models$google$version[1], + proxy = FALSE) +} + +} +\seealso{ +\href{https://genai.gd.edu.kg/r/documentation/}{GenAI - R Package "GenAI" Documentation} + +\href{https://genai.gd.edu.kg/api/}{GenAI - Generative Artificial Intelligence API Proxy Service} + +\href{https://colab.research.google.com/github/GitData-GA/GenAI/blob/gh-pages/r/example/genai_google.ipynb}{Live Demo in Colab} +} diff --git a/R/src/man/genai.moonshot.Rd b/R/src/man/genai.moonshot.Rd new file mode 100644 index 0000000..9cf4f34 --- /dev/null +++ b/R/src/man/genai.moonshot.Rd @@ -0,0 +1,66 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/genai.moonshot.R +\name{genai.moonshot} +\alias{genai.moonshot} +\title{Moonshot AI Object Creation} +\usage{ +genai.moonshot(api, model, version, proxy = FALSE) +} +\arguments{ +\item{api}{A character string representing the API key required for accessing the model.} + +\item{model}{A character string representing the specific model.} + +\item{version}{A character string representing the version of the chosen model.} + +\item{proxy}{Optional. Default to \code{FALSE}. A boolean value indicating whether to use a +proxy for accessing the API URL. If your local internet cannot access the API, set this +parameter to \code{TRUE}.} +} +\value{ +If successful, the function returns an moonshot object. If the API response +indicates an error, the function halts execution and provides an error message. +} +\description{ +This function establishes a connection to a Moonshot AI model by providing essential parameters. +} +\details{ +Providing accurate and valid information for each argument is crucial for successful text +generation by the generative AI model. If any parameter is incorrect, the function responds with an +error message based on the API feedback. To view all supported generative AI models, use the +function \code{\link{available.models}}. + +Please refer to \code{https://platform.moonshot.cn/console/api-keys} for the API key. + +The API proxy service is designed to address the needs of users who hold a valid API key but find +themselves outside their home countries or regions due to reasons such as travel, work, or study +in locations that may not be covered by certain Generative AI service providers. + +Please be aware that although GenAI and its affiliated organization - GitData - do not gather user +information through this service, the server providers for GenAI API proxy service and the Generative +AI service providers may engage in such data collection. Furthermore, the proxy service cannot +guarantee a consistent connection speed. Users are strongly encouraged to utilize this service +with caution and at their own discretion. +} +\examples{ +\dontrun{ +# Please change YOUR_MOONSHOT_API to your own API key of Moonshot AI +Sys.setenv(MOONSHOT_API = "YOUR_MOONSHOT_API") + +all.models = available.models() \%>\% print() + +# Create an moonshot object +moonshot = genai.moonshot(api = Sys.getenv("MOONSHOT_API"), + model = all.models$moonshot$model[1], + version = all.models$moonshot$version[1], + proxy = FALSE) +} + +} +\seealso{ +\href{https://genai.gd.edu.kg/r/documentation/}{GenAI - R Package "GenAI" Documentation} + +\href{https://genai.gd.edu.kg/api/}{GenAI - Generative Artificial Intelligence API Proxy Service} + +\href{https://colab.research.google.com/github/GitData-GA/GenAI/blob/gh-pages/r/example/genai_moonshot.ipynb}{Live Demo in Colab} +} diff --git a/R/src/man/genai.openai.Rd b/R/src/man/genai.openai.Rd new file mode 100644 index 0000000..106b8f9 --- /dev/null +++ b/R/src/man/genai.openai.Rd @@ -0,0 +1,74 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/genai.openai.R +\name{genai.openai} +\alias{genai.openai} +\title{OpenAI Object Creation} +\usage{ +genai.openai(api, model, version, proxy = FALSE, organization.id = NULL) +} +\arguments{ +\item{api}{A character string representing the API key required for accessing the model.} + +\item{model}{A character string representing the specific model.} + +\item{version}{A character string representing the version of the chosen model.} + +\item{proxy}{Optional. Default to \code{FALSE}. A boolean value indicating whether to use a +proxy for accessing the API URL. If your local internet cannot access the API, set this +parameter to \code{TRUE}.} + +\item{organization.id}{Optional. Default to \code{NULL}. A character string representing the +organization ID.} +} +\value{ +If successful, the function returns an OpenAI object. If the API response +indicates an error, the function halts execution and provides an error message. +} +\description{ +This function establishes a connection to an OpenAI model by providing essential parameters. +} +\details{ +Providing accurate and valid information for each argument is crucial for successful text +generation by the generative AI model. If any parameter is incorrect, the function responds with an +error message based on the API feedback. To view all supported generative AI models, use the +function \code{\link{available.models}}. + +Please refer to \code{https://platform.openai.com/api-keys} for the API key. Moreover, please refer +to \code{https://platform.openai.com/account/organization} for the optional organization ID. + +The API proxy service is designed to address the needs of users who hold a valid API key but find +themselves outside their home countries or regions due to reasons such as travel, work, or study +in locations that may not be covered by certain Generative AI service providers. + +Please be aware that although GenAI and its affiliated organization - GitData - do not gather user +information through this service, the server providers for GenAI API proxy service and the Generative +AI service providers may engage in such data collection. Furthermore, the proxy service cannot +guarantee a consistent connection speed. Users are strongly encouraged to utilize this service +with caution and at their own discretion. +} +\examples{ +\dontrun{ +# Please change YOUR_OPENAI_API to your own API key of OpenAI +Sys.setenv(OPENAI_API = "YOUR_OPENAI_API") + +# Oprional. Please change YOUR_OPENAI_ORG to your own organization ID for OpenAI +Sys.setenv(OPENAI_ORG = "YOUR_OPENAI_ORG") + +all.models = available.models() \%>\% print() + +# Create an OpenAI object +openai = genai.openai(api = Sys.getenv("OPENAI_API"), + model = all.models$openai$model[1], + version = all.models$openai$version[1], + proxy = FALSE, + organization.id = Sys.getenv("OPENAI_ORG")) +} + +} +\seealso{ +\href{https://genai.gd.edu.kg/r/documentation/}{GenAI - R Package "GenAI" Documentation} + +\href{https://genai.gd.edu.kg/api/}{GenAI - Generative Artificial Intelligence API Proxy Service} + +\href{https://colab.research.google.com/github/GitData-GA/GenAI/blob/gh-pages/r/example/genai_openai.ipynb}{Live Demo in Colab} +} diff --git a/R/src/man/img.Rd b/R/src/man/img.Rd new file mode 100644 index 0000000..dd97ec6 --- /dev/null +++ b/R/src/man/img.Rd @@ -0,0 +1,88 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/img.R +\name{img} +\alias{img} +\title{Image Generation with Text as the Input} +\usage{ +img(genai.object, prompt, verbose = FALSE, config = list()) +} +\arguments{ +\item{genai.object}{A generative AI object containing necessary and correct information.} + +\item{prompt}{A character string representing the query for image generation.} + +\item{verbose}{Optional. Default to \code{FALSE}. A boolean value determining whether or not to print +out the details of the image request.} + +\item{config}{Optional. Default to \code{list()}. A list of configuration parameters for image generation.} +} +\value{ +If successful, a image in \code{ggplot} format will be returned. If the API response indicates +an error, the function halts execution and provides an error message. +} +\description{ +This function establishes a connection to a generative AI model through a generative AI object. +It generates an image response based on the provided prompt. +} +\details{ +Providing accurate and valid information for each argument is crucial for successful image +generation by the generative AI model. If any parameter is incorrect, the function responds with an +error message based on the API feedback. To view all supported generative AI models, use the +function \code{\link{available.models}}. + +This function is only available when using OpenAI's models. + +For \strong{OpenAI} models, available configurations are as follows. For more detail, please refer to +\code{https://platform.openai.com/docs/api-reference/images/create}. + +\itemize{ + \item \code{quality} + + Optional. A character string. The quality of the image that will be generated. \code{hd} creates + images with finer details and greater consistency across the image. + + \item \code{size} + + Optional. A character string. The size of the generated images. Must be one of \code{256x256}, + \code{512x512}, or \code{1024x1024} for \code{dall-e-2}. Must be one of \code{1024x1024}, \code{1792x1024}, or + \code{1024x1792} for \code{dall-e-3} models. + + \item \code{style} + + Optional. The style of the generated images. Must be one of \code{vivid} or \code{natural}. Vivid causes + the model to lean towards generating hyper-real and dramatic images. Natural causes the model to produce + more natural, less hyper-real looking images. + + \item \code{user} + + Optional. A character string. A unique identifier representing your end-user, which can help OpenAI to monitor + and detect abuse. +} +} +\examples{ +\dontrun{ +# Assuming there is a GenAI object named 'genai.model' supporting this +# function, please refer to the "Live Demo in Colab" above for real +# examples. The following examples are just some basic guidelines. + +# Method 1 (recommended): use the pipe operator "\%>\%" +generated.image = genai.model \%>\% + img(prompt = "A very cute panda eating banboo.") +generated.image + +# Method 2: use the reference operator "$" +generated.image = genai.model$img(prompt = "A very cute sea otter on a rock.") +generated.image + +# Method 3: use the function img() directly +generated.image = img(genai.object = genai.model, + prompt = "A very cute bear.") +generated.image +} + +} +\seealso{ +\href{https://genai.gd.edu.kg/r/documentation/}{GenAI - R Package "GenAI" Documentation} + +\href{https://colab.research.google.com/github/GitData-GA/GenAI/blob/gh-pages/r/example/img.ipynb}{Live Demo in Colab} +} diff --git a/R/src/man/txt.Rd b/R/src/man/txt.Rd new file mode 100644 index 0000000..585509b --- /dev/null +++ b/R/src/man/txt.Rd @@ -0,0 +1,210 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/txt.R +\name{txt} +\alias{txt} +\title{Text Generation with Text as the Input} +\usage{ +txt(genai.object, prompt, verbose = FALSE, config = list()) +} +\arguments{ +\item{genai.object}{A generative AI object containing necessary and correct information.} + +\item{prompt}{A character string representing the query for text generation.} + +\item{verbose}{Optional. Default to \code{FALSE}. A boolean value determining whether or not to print +out the details of the text request.} + +\item{config}{Optional. Default to \code{list()}. A list of configuration parameters for text generation.} +} +\value{ +If successful, a text response will be returned. If the API response indicates +an error, the function halts execution and provides an error message. +} +\description{ +This function establishes a connection to a generative AI model through a generative AI object. +It generates a text response based on the provided prompt. +} +\details{ +Providing accurate and valid information for each argument is crucial for successful text +generation by the generative AI model. If any parameter is incorrect, the function responds with an +error message based on the API feedback. To view all supported generative AI models, use the +function \code{\link{available.models}}. + +For \strong{Google Generative AI} models, available configurations are as follows. For more detail, +please refer +to \code{https://ai.google.dev/api/rest/v1/HarmCategory}, +\code{https://ai.google.dev/api/rest/v1/SafetySetting}, and +\code{https://ai.google.dev/api/rest/v1/GenerationConfig}. + +\itemize{ + \item \code{harm.category.dangerous.content} + + Optional. An integer, from 1 to 5 inclusive, representing the threshold for dangerous content, + with a higher value representing a lower probability of being blocked. + + \item \code{harm.category.harassment} + + Optional. An integer, from 1 to 5 inclusive, representing the threshold for harasment content, + with a higher value representing a lower probability of being blocked. + + \item \code{harm.category.hate.speech} + + Optional. An integer, from 1 to 5 inclusive, representing the threshold for hate speech and + content, with a higher value representing a lower probability of being blocked. + + \item \code{harm.category.sexually.explicit} + + Optional. An integer, from 1 to 5 inclusive, representing the threshold for sexually explicit + content, with a higher value representing a lower probability of being blocked. + + \item \code{stop.sequences} + + Optional. A list of character sequences (up to 5) that will stop output generation. If specified, + the API will stop at the first appearance of a stop sequence. The stop sequence will not be + included as part of the response. + + \item \code{max.output.tokens} + + Optional. An integer, value varies by model, representing maximum number of tokens to include + in a candidate. + + \item \code{temperature} + + Optional. A number, from 0.0 to 1.0 inclusive, controlling the randomness of the output. + + \item \code{top.p} + + Optional. A number, value varies by model, representing maximum cumulative probability of tokens + to consider when sampling. + + \item \code{top.k} + + Optional. A number, value varies by model, representing maximum number of tokens to consider when sampling. +} + +For \strong{Moonshot AI} models, available configurations are as follows. For more detail, please refer to +\code{https://platform.moonshot.cn/api.html#chat-completion}. + +\itemize{ + \item \code{max.tokens} + + Optional. An integer. The maximum number of tokens that will be generated when the chat completes. + If the chat is not finished by the maximum number of tokens generated, the finish reason will be + "length", otherwise it will be "stop". + + \item \code{temperature} + + Optional. A number. What sampling temperature to use, between 0 and 1. Higher values (e.g. 0.7) will + make the output more random, while lower values (e.g. 0.2) will make it more focused and deterministic. + + \item \code{top.p} + + Optional. A number. Another sampling temperature. +} + +For \strong{OpenAI} models, available configurations are as follows. For more detail, please refer to +\code{https://platform.openai.com/docs/api-reference/chat/create}. + +\itemize{ + \item \code{frequency.penalty} + + Optional. A number from -2.0 to 2.0 inclusive. Positive values penalize new tokens based on their + existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim. + + \item \code{logit.bias} + + Optional. A map. Modify the likelihood of specified tokens appearing in the completion. Accepts a JSON object + that maps tokens (specified by their token ID in the tokenizer) to an associated bias value from -100 to + 100. Mathematically, the bias is added to the logits generated by the model prior to sampling. The exact + effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection; + values like -100 or 100 should result in a ban or exclusive selection of the relevant token. + + \item \code{logprobs} + + Optional. A boolean value. Whether to return log probabilities of the output tokens or not. If true, returns the log + probabilities of each output token returned in the content of message + + \item \code{top.logprobs} + + Optional. An integer between 0 and 5 specifying the number of most likely tokens to return at each token + position, each with an associated log probability. \code{logprobs} must be set to \code{TRUE} if this + parameter is used. + + \item \code{max.tokens} + + Optional. An integer. The maximum number of tokens that can be generated in the chat completion. The total length of + input tokens and generated tokens is limited by the model's context length. + + \item \code{presence.penalty} + + Optional. A Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear + in the text so far, increasing the model's likelihood to talk about new topics. + + \item \code{response.format} + + Optional. An object specifying the format that the model must output. Compatible with GPT-4 Turbo and + all GPT-3.5 Turbo models newer than \code{gpt-3.5-turbo-1106}. + + \item \code{seed} + + Optional. An integer. If specified, our system will make a best effort to sample deterministically, such that repeated + requests with the same seed and parameters should return the same result. + + \item \code{stop} + + Optional. A character string or list contains up to 4 sequences where the API will stop generating further tokens. + + \item \code{temperature} + + Optional. A number. What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output + more random, while lower values like 0.2 will make it more focused and deterministic. + + \item \code{top.p} + + Optional. A number. An alternative to sampling with temperature, called nucleus sampling, where the model considers + the results of the tokens with \code{top.p} probability mass. So 0.1 means only the tokens comprising the top + 10% probability mass are considered. + + \item \code{tools} + + Optional. A list of tools the model may call. Currently, only functions are supported as a tool. Use this + to provide a list of functions the model may generate JSON inputs for. + + \item \code{tool.choice} + + Optional. A character string or object. Controls which (if any) function is called by the model. \code{none} means + the model will not call a function and instead generates a message. \code{auto} means the model can pick + between generating a message or calling a function. + + \item \code{user} + + Optional. A character string. A unique identifier representing your end-user, which can help OpenAI to monitor + and detect abuse. +} +} +\examples{ +\dontrun{ +# Assuming there is a GenAI object named 'genai.model' supporting this +# function, please refer to the "Live Demo in Colab" above for real +# examples. The following examples are just some basic guidelines. + +# Method 1 (recommended): use the pipe operator "\%>\%" +genai.model \%>\% + txt(prompt = "Write a story about Mars in 50 words.") \%>\% + cat() + +# Method 2: use the reference operator "$" +cat(genai.model$txt(prompt = "Write a story about Jupiter in 50 words.")) + +# Method 3: use the function txt() directly +# Set verbose to TRUE to see the detail +cat(txt(genai.object = genai.model, + prompt = "Write a story about Earth in 50 words.")) +} + +} +\seealso{ +\href{https://genai.gd.edu.kg/r/documentation/}{GenAI - R Package "GenAI" Documentation} + +\href{https://colab.research.google.com/github/GitData-GA/GenAI/blob/gh-pages/r/example/txt.ipynb}{Live Demo in Colab} +} diff --git a/R/src/man/txt.image.Rd b/R/src/man/txt.image.Rd new file mode 100644 index 0000000..e54c154 --- /dev/null +++ b/R/src/man/txt.image.Rd @@ -0,0 +1,197 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/txt.image.R +\name{txt.image} +\alias{txt.image} +\title{Text Generation with Text and Image as the Input} +\usage{ +txt.image(genai.object, prompt, image.path, verbose = FALSE, config = list()) +} +\arguments{ +\item{genai.object}{A generative AI object containing necessary and correct information.} + +\item{prompt}{A character string representing the query for text generation.} + +\item{image.path}{A character string representing the path to the image. It should be a link +starting with \code{https}/\code{http} or a local directory path to an image.} + +\item{verbose}{Optional. Default to \code{FALSE}. A boolean value determining whether or not to print +out the details of the text request.} + +\item{config}{Optional. Default to \code{list()}. A list of configuration parameters for text generation.} +} +\value{ +If successful, a text response will be returned. If the API response indicates +an error, the function halts execution and provides an error message. +} +\description{ +This function establishes a connection to a generative AI model through a generative AI object. +It generates a text response based on the provided prompt. +} +\details{ +Providing accurate and valid information for each argument is crucial for successful text +generation by the generative AI model. If any parameter is incorrect, the function responds with an +error message based on the API feedback. To view all supported generative AI models, use the +function \code{\link{available.models}}. + +For \strong{Google Generative AI} models, available configurations are as follows. For more detail, +please refer +to \code{https://ai.google.dev/api/rest/v1/HarmCategory}, +\code{https://ai.google.dev/api/rest/v1/SafetySetting}, and +\code{https://ai.google.dev/api/rest/v1/GenerationConfig}. + +\itemize{ + \item \code{harm.category.dangerous.content} + + Optional. An integer, from 1 to 5 inclusive, representing the threshold for dangerous content, + with a higher value representing a lower probability of being blocked. + + \item \code{harm.category.harassment} + + Optional. An integer, from 1 to 5 inclusive, representing the threshold for harasment content, + with a higher value representing a lower probability of being blocked. + + \item \code{harm.category.hate.speech} + + Optional. An integer, from 1 to 5 inclusive, representing the threshold for hate speech and + content, with a higher value representing a lower probability of being blocked. + + \item \code{harm.category.sexually.explicit} + + Optional. An integer, from 1 to 5 inclusive, representing the threshold for sexually explicit + content, with a higher value representing a lower probability of being blocked. + + \item \code{stop.sequences} + + Optional. A list of character sequences (up to 5) that will stop output generation. If specified, + the API will stop at the first appearance of a stop sequence. The stop sequence will not be + included as part of the response. + + \item \code{max.output.tokens} + + Optional. An integer, value varies by model, representing maximum number of tokens to include + in a candidate. + + \item \code{temperature} + + Optional. A number, from 0.0 to 1.0 inclusive, controlling the randomness of the output. + + \item \code{top.p} + + Optional. A number, value varies by model, representing maximum cumulative probability of tokens + to consider when sampling. + + \item \code{top.k} + + Optional. A number, value varies by model, representing maximum number of tokens to consider when sampling. +} + +For \strong{OpenAI} models, available configurations are as follows. For more detail, please refer to +\code{https://platform.openai.com/docs/api-reference/chat/create}. + +\itemize{ + \item \code{frequency.penalty} + + Optional. A number from -2.0 to 2.0 inclusive. Positive values penalize new tokens based on their + existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim. + + \item \code{logit.bias} + + Optional. A map. Modify the likelihood of specified tokens appearing in the completion. Accepts a JSON object + that maps tokens (specified by their token ID in the tokenizer) to an associated bias value from -100 to + 100. Mathematically, the bias is added to the logits generated by the model prior to sampling. The exact + effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection; + values like -100 or 100 should result in a ban or exclusive selection of the relevant token. + + \item \code{logprobs} + + Optional. A boolean value. Whether to return log probabilities of the output tokens or not. If true, returns the log + probabilities of each output token returned in the content of message + + \item \code{top.logprobs} + + Optional. An integer between 0 and 5 specifying the number of most likely tokens to return at each token + position, each with an associated log probability. \code{logprobs} must be set to \code{TRUE} if this + parameter is used. + + \item \code{max.tokens} + + Optional. An integer. The maximum number of tokens that can be generated in the chat completion. The total length of + input tokens and generated tokens is limited by the model's context length. + + \item \code{presence.penalty} + + Optional. A Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear + in the text so far, increasing the model's likelihood to talk about new topics. + + \item \code{response.format} + + Optional. An object specifying the format that the model must output. Compatible with GPT-4 Turbo and + all GPT-3.5 Turbo models newer than \code{gpt-3.5-turbo-1106}. + + \item \code{seed} + + Optional. An integer. If specified, our system will make a best effort to sample deterministically, such that repeated + requests with the same seed and parameters should return the same result. + + \item \code{stop} + + Optional. A character string or list contains up to 4 sequences where the API will stop generating further tokens. + + \item \code{temperature} + + Optional. A number. What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output + more random, while lower values like 0.2 will make it more focused and deterministic. + + \item \code{top.p} + + Optional. A number. An alternative to sampling with temperature, called nucleus sampling, where the model considers + the results of the tokens with \code{top.p} probability mass. So 0.1 means only the tokens comprising the top + 10% probability mass are considered. + + \item \code{tools} + + Optional. A list of tools the model may call. Currently, only functions are supported as a tool. Use this + to provide a list of functions the model may generate JSON inputs for. + + \item \code{tool.choice} + + Optional. A character string or object. Controls which (if any) function is called by the model. \code{none} means + the model will not call a function and instead generates a message. \code{auto} means the model can pick + between generating a message or calling a function. + + \item \code{user} + + Optional. A character string. A unique identifier representing your end-user, which can help OpenAI to monitor + and detect abuse. +} +} +\examples{ +\dontrun{ +# Assuming there is a GenAI object named 'genai.model' supporting this +# function, an image in your current directory named 'example.png', and +# an online image 'https://example.com/example.png/', please refer to +# the "Live Demo in Colab" above for real examples. The following examples +# are just some basic guidelines. + +# Method 1 (recommended): use the pipe operator "\%>\%" +genai.model \%>\% + txt.image(prompt = "Please describe the following image.", + image.path = "https://example.com/example.png/") \%>\% + cat() + +# Method 2: use the reference operator "$" +cat(genai.model$txt.image(prompt = "Please describe the following image.", + image.path = "https://example.com/example.png/")) + +# Method 3: use the function txt.image() directly +cat(txt.image(genai.object = genai.model, + prompt = "Please describe the following image.", + image.path = "example.png")) +} + +} +\seealso{ +\href{https://genai.gd.edu.kg/r/documentation/}{GenAI - R Package "GenAI" Documentation} + +\href{https://colab.research.google.com/github/GitData-GA/GenAI/blob/gh-pages/r/example/txt_image.ipynb}{Live Demo in Colab} +}