From 7f0933e52db6d3923bbc9afbbd1fc974be48e2ba Mon Sep 17 00:00:00 2001 From: Edgar Ruiz <77294576+edgararuiz@users.noreply.github.com> Date: Mon, 2 Sep 2024 09:23:30 -0500 Subject: [PATCH] Revert "Add support for openai-compatible models from 01AI, moonshot, Qwen,and GLM" --- inst/configs/glm4.yml | 33 --------------------------------- inst/configs/moonshot8k.yml | 33 --------------------------------- inst/configs/qwen.yml | 33 --------------------------------- inst/configs/yi.yml | 33 --------------------------------- 4 files changed, 132 deletions(-) delete mode 100644 inst/configs/glm4.yml delete mode 100644 inst/configs/moonshot8k.yml delete mode 100644 inst/configs/qwen.yml delete mode 100644 inst/configs/yi.yml diff --git a/inst/configs/glm4.yml b/inst/configs/glm4.yml deleted file mode 100644 index d5bafea..0000000 --- a/inst/configs/glm4.yml +++ /dev/null @@ -1,33 +0,0 @@ -default: - prompt: | - {readLines(system.file('prompt/base.txt', package = 'chattr'))} - provider: OpenAI - Chat Completions - path: https://open.bigmodel.cn/api/paas/v4/chat/completions - label: glm-4-0520 (GLM) - model: glm-4-0520 - max_data_files: 0 - max_data_frames: 0 - include_doc_contents: FALSE - include_history: TRUE - system_msg: You are a helpful coding assistant - model_arguments: - temperature: 0.03 - max_tokens: 1000 - stream: TRUE -chat: - prompt: | - {readLines(system.file('prompt/base.txt', package = 'chattr'))} - For code output, use RMarkdown code chunks - Avoid all code chunk options -console: - prompt: | - {readLines(system.file('prompt/base.txt', package = 'chattr'))} - For any line that is not code, prefix with a: # - Keep each line of explanations to no more than 80 characters - DO NOT use Markdown for the code -script: - prompt: | - {readLines(system.file('prompt/base.txt', package = 'chattr'))} - For any line that is not code, prefix with a: # - Keep each line of explanations to no more than 80 characters - DO NOT use Markdown for the code diff --git a/inst/configs/moonshot8k.yml b/inst/configs/moonshot8k.yml deleted file mode 100644 index 486e9e6..0000000 --- a/inst/configs/moonshot8k.yml +++ /dev/null @@ -1,33 +0,0 @@ -default: - prompt: | - {readLines(system.file('prompt/base.txt', package = 'chattr'))} - provider: OpenAI - Chat Completions - path: https://api.moonshot.cn/v1/chat/completions - label: moonshot-v1-8k (Moonshot AI) - model: moonshot-v1-8k - max_data_files: 0 - max_data_frames: 0 - include_doc_contents: FALSE - include_history: TRUE - system_msg: You are a helpful coding assistant - model_arguments: - temperature: 0.03 - max_tokens: 1000 - stream: TRUE -chat: - prompt: | - {readLines(system.file('prompt/base.txt', package = 'chattr'))} - For code output, use RMarkdown code chunks - Avoid all code chunk options -console: - prompt: | - {readLines(system.file('prompt/base.txt', package = 'chattr'))} - For any line that is not code, prefix with a: # - Keep each line of explanations to no more than 80 characters - DO NOT use Markdown for the code -script: - prompt: | - {readLines(system.file('prompt/base.txt', package = 'chattr'))} - For any line that is not code, prefix with a: # - Keep each line of explanations to no more than 80 characters - DO NOT use Markdown for the code diff --git a/inst/configs/qwen.yml b/inst/configs/qwen.yml deleted file mode 100644 index 3b01f73..0000000 --- a/inst/configs/qwen.yml +++ /dev/null @@ -1,33 +0,0 @@ -default: - prompt: | - {readLines(system.file('prompt/base.txt', package = 'chattr'))} - provider: OpenAI - Chat Completions - path: https://dashscope.aliyuncs.com/compatible-mode/v1/chat/completions - label: qwen-turbo (alibaba) - model: qwen-turbo - max_data_files: 0 - max_data_frames: 0 - include_doc_contents: FALSE - include_history: TRUE - system_msg: You are a helpful coding assistant - model_arguments: - temperature: 0.03 - max_tokens: 1000 - stream: TRUE -chat: - prompt: | - {readLines(system.file('prompt/base.txt', package = 'chattr'))} - For code output, use RMarkdown code chunks - Avoid all code chunk options -console: - prompt: | - {readLines(system.file('prompt/base.txt', package = 'chattr'))} - For any line that is not code, prefix with a: # - Keep each line of explanations to no more than 80 characters - DO NOT use Markdown for the code -script: - prompt: | - {readLines(system.file('prompt/base.txt', package = 'chattr'))} - For any line that is not code, prefix with a: # - Keep each line of explanations to no more than 80 characters - DO NOT use Markdown for the code diff --git a/inst/configs/yi.yml b/inst/configs/yi.yml deleted file mode 100644 index 683dd96..0000000 --- a/inst/configs/yi.yml +++ /dev/null @@ -1,33 +0,0 @@ -default: - prompt: | - {readLines(system.file('prompt/base.txt', package = 'chattr'))} - provider: OpenAI - Chat Completions - path: https://api.lingyiwanwu.com/v1/chat/completions - label: yi-34b-chat-0205 (01.AI) - model: yi-34b-chat-0205 - max_data_files: 0 - max_data_frames: 0 - include_doc_contents: FALSE - include_history: TRUE - system_msg: You are a helpful coding assistant - model_arguments: - temperature: 0.03 - max_tokens: 1000 - stream: TRUE -chat: - prompt: | - {readLines(system.file('prompt/base.txt', package = 'chattr'))} - For code output, use RMarkdown code chunks - Avoid all code chunk options -console: - prompt: | - {readLines(system.file('prompt/base.txt', package = 'chattr'))} - For any line that is not code, prefix with a: # - Keep each line of explanations to no more than 80 characters - DO NOT use Markdown for the code -script: - prompt: | - {readLines(system.file('prompt/base.txt', package = 'chattr'))} - For any line that is not code, prefix with a: # - Keep each line of explanations to no more than 80 characters - DO NOT use Markdown for the code