diff --git a/DESCRIPTION b/DESCRIPTION
index 80ed1b7..21c48b9 100644
--- a/DESCRIPTION
+++ b/DESCRIPTION
@@ -1,6 +1,6 @@
Package: chattr
Title: Integrates LLM's with the RStudio IDE
-Version: 0.0.0.9005
+Version: 0.0.0.9006
Authors@R: c(
person("Edgar", "Ruiz", , "edgar@posit.co", role = c("aut", "cre")),
person(given = "Posit Software, PBC", role = c("cph", "fnd"))
diff --git a/R/backend-openai-core.R b/R/backend-openai-core.R
index 2d7962b..f66cc2a 100644
--- a/R/backend-openai-core.R
+++ b/R/backend-openai-core.R
@@ -11,7 +11,7 @@ openai_token <- function() {
if (is.null(ret)) {
abort("No token found
- Add your key to the \"OPENAI_API_KEY\" environment variable
- - or - Add \"open-ai-api-key\" to a `config` YAML file")
+ - or - Add \"openai-api-key\" to a `config` YAML file")
}
ret
@@ -47,7 +47,7 @@ openai_stream_ide <- function(defaults, req_body) {
} else {
if (!ui_current_console()) ide_paste_text("\n\n")
openai_request(defaults, req_body) %>%
- req_stream(
+ req_perform_stream(
function(x) {
openai_stream_ide_delta(x, defaults)
TRUE
@@ -115,7 +115,7 @@ openai_stream_file <- function(defaults,
ch_env$stream$response <- NULL
openai_request(defaults, req_body) %>%
- req_stream(
+ req_perform_stream(
function(x) {
openai_stream_file_delta(x, defaults, r_file_stream)
TRUE
diff --git a/R/chattr-defaults.R b/R/chattr-defaults.R
index 0f96977..66e6929 100644
--- a/R/chattr-defaults.R
+++ b/R/chattr-defaults.R
@@ -67,7 +67,7 @@ chattr_defaults <- function(type = "default",
if (!is.na(env_model)) {
check_files <- package_file("configs", path_ext_set(env_model, "yml"))
} else {
- check_files <- package_file("configs", "gpt35.yml")
+ check_files <- package_file("configs", "gpt4.yml")
}
if (file_exists(yaml_file)) {
diff --git a/R/chatter-use.R b/R/chattr-use.R
similarity index 98%
rename from R/chatter-use.R
rename to R/chattr-use.R
index c415b0d..6741d5b 100644
--- a/R/chatter-use.R
+++ b/R/chattr-use.R
@@ -12,7 +12,7 @@ chattr_use <- function(model_label = NULL) {
}
if (is.null(model_label)) {
- model_label <- "gpt35"
+ model_label <- "gpt4"
}
use_switch("configs", path_ext_set(model_label, "yml"))
diff --git a/README.md b/README.md
index b748011..1f0ae5f 100644
--- a/README.md
+++ b/README.md
@@ -73,7 +73,7 @@ back-end provides access to multiple LLM types:
href="https://platform.openai.com/docs/introduction">OpenAI
GPT Models accessible via the OpenAI’s
REST API. chattr provides a convenient way to interact with
-GPT 3.5, and DaVinci 3. |
+GPT 4, 3.5, and DaVinci 3.
Interact
with OpenAI GPT models |
@@ -174,7 +174,7 @@ chattr(preview = TRUE)
#> ── Preview for: Console
#> • Provider: Open AI - Chat Completions
#> • Path/URL: https://api.openai.com/v1/chat/completions
-#> • Model: gpt-3.5-turbo
+#> • Model: gpt-4
#> • temperature: 0.01
#> • max_tokens: 1000
#> • stream: TRUE
diff --git a/README.qmd b/README.qmd
index 64058c5..29b2cde 100644
--- a/README.qmd
+++ b/README.qmd
@@ -73,7 +73,7 @@ provides access to multiple LLM types:
| Provider | Models | Setup Instructions |
|:-----------------:|:----------------------------------:|:-----------------:|
-| [OpenAI](https://platform.openai.com/docs/introduction) | GPT Models accessible via the OpenAI's REST API. `chattr` provides a convenient way to interact with GPT 3.5, and DaVinci 3. | [Interact with OpenAI GPT models](`r url`articles/openai-gpt.html) |
+| [OpenAI](https://platform.openai.com/docs/introduction) | GPT Models accessible via the OpenAI's REST API. `chattr` provides a convenient way to interact with GPT 4, 3.5, and DaVinci 3. | [Interact with OpenAI GPT models](`r url`articles/openai-gpt.html) |
| [LLamaGPT-Chat](https://github.com/kuvaus/LlamaGPTJ-chat) | LLM models available in your computer. Including GPT-J, LLaMA, and MPT. Tested on a [GPT4ALL](https://gpt4all.io/index.html) model. **LLamaGPT-Chat** is a command line chat program for models written in C++. | [Interact with local models](`r url`articles/backend-llamagpt.html) |
The idea is that as time goes by, more back-ends will be added.
diff --git a/inst/configs/gpt4.yml b/inst/configs/gpt4.yml
new file mode 100644
index 0000000..121056d
--- /dev/null
+++ b/inst/configs/gpt4.yml
@@ -0,0 +1,32 @@
+default:
+ prompt: |
+ {readLines(system.file('prompt/base.txt', package = 'chattr'))}
+ provider: Open AI - Chat Completions
+ path: https://api.openai.com/v1/chat/completions
+ model: gpt-4
+ max_data_files: 0
+ max_data_frames: 0
+ include_doc_contents: FALSE
+ include_history: TRUE
+ system_msg: You are a helpful coding assistant
+ model_arguments:
+ temperature: 0.01
+ max_tokens: 1000
+ stream: TRUE
+chat:
+ prompt: |
+ {readLines(system.file('prompt/base.txt', package = 'chattr'))}
+ For code output, use RMarkdown code chunks
+ Avoid all code chunk options
+console:
+ prompt: |
+ {readLines(system.file('prompt/base.txt', package = 'chattr'))}
+ For any line that is not code, prefix with a: #
+ Keep each line of explanations to no more than 80 characters
+ DO NOT use Markdown for the code
+script:
+ prompt: |
+ {readLines(system.file('prompt/base.txt', package = 'chattr'))}
+ For any line that is not code, prefix with a: #
+ Keep each line of explanations to no more than 80 characters
+ DO NOT use Markdown for the code
diff --git a/man/chattr_use.Rd b/man/chattr_use.Rd
index 0f64586..e735290 100644
--- a/man/chattr_use.Rd
+++ b/man/chattr_use.Rd
@@ -1,5 +1,5 @@
% Generated by roxygen2: do not edit by hand
-% Please edit documentation in R/chatter-use.R
+% Please edit documentation in R/chattr-use.R
\name{chattr_use}
\alias{chattr_use}
\title{Sets the LLM model to use in your session}
diff --git a/tests/testthat/_snaps/app_ui.md b/tests/testthat/_snaps/app_ui.md
index 78ee768..59f75a5 100644
--- a/tests/testthat/_snaps/app_ui.md
+++ b/tests/testthat/_snaps/app_ui.md
@@ -2,7 +2,7 @@
Code
chattr_use("gpt35")
- Message
+ Message
-- chattr
* Provider: Open AI - Chat Completions
diff --git a/tests/testthat/_snaps/backend-llamagpt.md b/tests/testthat/_snaps/backend-llamagpt.md
index 3f71f0c..389dbe7 100644
--- a/tests/testthat/_snaps/backend-llamagpt.md
+++ b/tests/testthat/_snaps/backend-llamagpt.md
@@ -2,7 +2,7 @@
Code
chattr_use("llamagpt")
- Message
+ Message
-- chattr
* Provider: LlamaGPT
@@ -13,7 +13,7 @@
Code
chattr_test(defaults = chattr_defaults())
- Message
+ Message
-- Testing chattr
* Provider: LlamaGPT
@@ -34,7 +34,7 @@
Code
ch_llamagpt_printout(chattr_defaults(), output = "xxx\n> ")
- Message
+ Message
-- chattr --
@@ -54,7 +54,7 @@
Code
chattr_use("gpt35")
- Message
+ Message
-- chattr
* Provider: Open AI - Chat Completions
diff --git a/tests/testthat/_snaps/backend-openai-core.md b/tests/testthat/_snaps/backend-openai-core.md
index 5625e64..b18edde 100644
--- a/tests/testthat/_snaps/backend-openai-core.md
+++ b/tests/testthat/_snaps/backend-openai-core.md
@@ -2,7 +2,7 @@
Code
openai_request(chattr_defaults(), list())
- Message
+ Message
POST https://api.openai.com/v1/chat/completions
Headers:
diff --git a/tests/testthat/_snaps/backend-openai.md b/tests/testthat/_snaps/backend-openai.md
index de377c5..bf6c2df 100644
--- a/tests/testthat/_snaps/backend-openai.md
+++ b/tests/testthat/_snaps/backend-openai.md
@@ -2,7 +2,7 @@
Code
chattr_use("gpt35")
- Message
+ Message
-- chattr
* Provider: Open AI - Chat Completions
@@ -13,7 +13,7 @@
Code
app_init_openai(chattr_defaults())
- Message
+ Message
* Provider: Open AI - Chat Completions
* Path/URL: https://api.openai.com/v1/chat/completions
* Model: gpt-3.5-turbo
@@ -26,7 +26,7 @@
Code
chattr_use("llamagpt")
- Message
+ Message
-- chattr
* Provider: LlamaGPT
@@ -37,7 +37,7 @@
Code
app_init_openai(chattr_defaults())
- Message
+ Message
* Provider: LlamaGPT
* Path/URL: ~/LlamaGPTJ-chat/build/bin/chat
* Model: ~/ggml-gpt4all-j-v1.3-groovy.bin
diff --git a/tests/testthat/_snaps/ch-defaults-save.md b/tests/testthat/_snaps/ch-defaults-save.md
index 1632cff..a50e717 100644
--- a/tests/testthat/_snaps/ch-defaults-save.md
+++ b/tests/testthat/_snaps/ch-defaults-save.md
@@ -2,7 +2,7 @@
Code
chattr_use("gpt35")
- Message
+ Message
-- chattr
* Provider: Open AI - Chat Completions
diff --git a/tests/testthat/_snaps/ch_defaults.md b/tests/testthat/_snaps/ch_defaults.md
index ae3ba30..1b4079b 100644
--- a/tests/testthat/_snaps/ch_defaults.md
+++ b/tests/testthat/_snaps/ch_defaults.md
@@ -2,7 +2,7 @@
Code
chattr_use("llamagpt")
- Message
+ Message
-- chattr
* Provider: LlamaGPT
@@ -13,7 +13,7 @@
Code
chattr_defaults()
- Message
+ Message
-- chattr ----------------------------------------------------------------------
diff --git a/tests/testthat/_snaps/chatter-use.md b/tests/testthat/_snaps/chatter-use.md
index d8f932a..b8005ab 100644
--- a/tests/testthat/_snaps/chatter-use.md
+++ b/tests/testthat/_snaps/chatter-use.md
@@ -2,22 +2,26 @@
Code
capture.output(ch_get_ymls())
- Message
+ Message
-- chattr - Available models
1: Open AI - Completions - text-davinci-003 (davinci)
2: Open AI - Chat Completions - gpt-3.5-turbo (gpt35)
- 3: LlamaGPT - ~/ggml-gpt4all-j-v1.3-groovy.bin (llamagpt)
+ 3: Open AI - Chat Completions - gpt-4 (gpt4)
+ 4: LlamaGPT - ~/ggml-gpt4all-j-v1.3-groovy.bin (llamagpt)
Output
- [1] "$davinci"
- [2] "[1] \"1: Open AI - Completions - text-davinci-003 (davinci) \\n\""
- [3] ""
- [4] "$gpt35"
- [5] "[1] \"2: Open AI - Chat Completions - gpt-3.5-turbo (gpt35) \\n\""
- [6] ""
- [7] "$llamagpt"
- [8] "[1] \"3: LlamaGPT - ~/ggml-gpt4all-j-v1.3-groovy.bin (llamagpt) \\n\""
- [9] ""
+ [1] "$davinci"
+ [2] "[1] \"1: Open AI - Completions - text-davinci-003 (davinci) \\n\""
+ [3] ""
+ [4] "$gpt35"
+ [5] "[1] \"2: Open AI - Chat Completions - gpt-3.5-turbo (gpt35) \\n\""
+ [6] ""
+ [7] "$gpt4"
+ [8] "[1] \"3: Open AI - Chat Completions - gpt-4 (gpt4) \\n\""
+ [9] ""
+ [10] "$llamagpt"
+ [11] "[1] \"4: LlamaGPT - ~/ggml-gpt4all-j-v1.3-groovy.bin (llamagpt) \\n\""
+ [12] ""
diff --git a/tests/testthat/_snaps/chattr-test.md b/tests/testthat/_snaps/chattr-test.md
index 98695b4..13bf28f 100644
--- a/tests/testthat/_snaps/chattr-test.md
+++ b/tests/testthat/_snaps/chattr-test.md
@@ -2,7 +2,7 @@
Code
chattr_use("gpt35")
- Message
+ Message
-- chattr
* Provider: Open AI - Chat Completions
@@ -13,7 +13,7 @@
Code
chattr_test()
- Message
+ Message
-- Testing chattr
* Provider: Open AI - Chat Completions
@@ -27,7 +27,7 @@
Code
chattr_use("llamagpt")
- Message
+ Message
-- chattr
* Provider: LlamaGPT
@@ -38,7 +38,7 @@
Code
chattr_test()
- Message
+ Message
-- Testing chattr
* Provider: LlamaGPT
diff --git a/tests/testthat/_snaps/chattr.md b/tests/testthat/_snaps/chattr.md
index c50b46b..659c402 100644
--- a/tests/testthat/_snaps/chattr.md
+++ b/tests/testthat/_snaps/chattr.md
@@ -2,7 +2,7 @@
Code
chattr_use("gpt35")
- Message
+ Message
-- chattr
* Provider: Open AI - Chat Completions
@@ -13,7 +13,7 @@
Code
chattr("test", preview = TRUE)
- Message
+ Message
-- chattr ----------------------------------------------------------------------
@@ -45,7 +45,7 @@
Code
chattr("test", preview = TRUE, prompt_build = FALSE)
- Message
+ Message
-- chattr ----------------------------------------------------------------------
@@ -64,7 +64,7 @@
Code
chattr("test", preview = TRUE, stream = FALSE)
- Message
+ Message
-- chattr ----------------------------------------------------------------------
@@ -96,7 +96,7 @@
Code
chattr(preview = TRUE)
- Message
+ Message
-- chattr ----------------------------------------------------------------------
@@ -164,7 +164,7 @@
Code
ch_use_openai_davinci()
- Message
+ Message
-- chattr
* Provider: Open AI - Completions
@@ -175,7 +175,7 @@
Code
chattr("test", preview = TRUE)
- Message
+ Message
-- chattr ----------------------------------------------------------------------
@@ -225,7 +225,7 @@
Code
chattr(preview = TRUE)
- Message
+ Message
-- chattr ----------------------------------------------------------------------
diff --git a/vignettes/openai-gpt.Rmd b/vignettes/openai-gpt.Rmd
index b952421..f1ff863 100644
--- a/vignettes/openai-gpt.Rmd
+++ b/vignettes/openai-gpt.Rmd
@@ -64,7 +64,7 @@ chattr_test()
## Change the model
-By default, `chattr` is setup to interact with GPT 3.5 (`gpt-3.5-turbo`). To
+By default, `chattr` is setup to interact with GPT 3.5 (`gpt-4`). To
switch to the DaVinci model (`text-davinci-003`) run:
```{r}
@@ -73,15 +73,15 @@ library(chattr)
chattr_use("davinci")
```
-To switch back to GPT 3.5, run:
+To switch back to GPT 4, run:
```{r}
-chattr_use("gpt35")
+chattr_use("gpt4")
```
If you wish to switch to a model other than the two mentioned above, you will
first need to note which of the two OpenAI endpoints you will need to reach:
-`completions` or `chat/completions`. "gpt35" points to the `chat/completions`
+`completions` or `chat/completions`. "gpt4" points to the `chat/completions`
endpoint, and "davinci" to the `completions` endpoint.
For example, if you wish to switch to the Curie model, first run: