From af407cc957b16c5a5e226a3f1c505a82239a505d Mon Sep 17 00:00:00 2001 From: Yagil Burowski Date: Tue, 26 Mar 2024 17:44:08 -0400 Subject: [PATCH 1/2] Add Stable Code Instruct 3B --- models/google-gemma-2b.json | 2 +- models/stable-code-instruct-3b.json | 42 +++++++++++++++++++++++++++++ schema.json | 2 +- 3 files changed, 44 insertions(+), 2 deletions(-) create mode 100644 models/stable-code-instruct-3b.json diff --git a/models/google-gemma-2b.json b/models/google-gemma-2b.json index 60ff91b..cd24d7f 100644 --- a/models/google-gemma-2b.json +++ b/models/google-gemma-2b.json @@ -2,7 +2,7 @@ "_descriptorVersion": "0.0.1", "datePublished": "2024-02-21T16:54:57.000Z", "name": "Google's Gemma 2B Instruct", - "description": "** Requires LM Studio 0.2.15 or newer ** Gemma is a family of lightweight LLMs built from the same research and technology Google used to create the Gemini models. Gemma models are available in two sizes, 2 billion and 7 billion parameters. These models are trained on up to 6T tokens of primarily English web documents, mathematics, and code, using a transformer architecture with enhancements like Multi-Query Attention, RoPE Embeddings, GeGLU Activations, and advanced normalization techniques.", + "description": "Gemma is a family of lightweight LLMs built from the same research and technology Google used to create the Gemini models. Gemma models are available in two sizes, 2 billion and 7 billion parameters. These models are trained on up to 6T tokens of primarily English web documents, mathematics, and code, using a transformer architecture with enhancements like Multi-Query Attention, RoPE Embeddings, GeGLU Activations, and advanced normalization techniques.", "author": { "name": "Google DeepMind", "url": "https://deepmind.google", diff --git a/models/stable-code-instruct-3b.json b/models/stable-code-instruct-3b.json new file mode 100644 index 0000000..79d8415 --- /dev/null +++ b/models/stable-code-instruct-3b.json @@ -0,0 +1,42 @@ +{ + "_descriptorVersion": "0.0.1", + "datePublished": "2024-03-20T00:31:49.000Z", + "name": "Stable Code Instruct 3B ", + "description": "Stable Code Instruct 3B is a decoder-only language model with 2.7 billion parameters, developed from the stable-code-3b. It has been trained on a combination of publicly available and synthetic datasets, with the latter generated through Direct Preference Optimization (DPO). This model has shown competitive performance in comparison to other models of similar size, as evidenced by its results on the MultiPL-E metrics across various programming languages using the BigCode Evaluation Harness, and on code-related tasks in MT Bench. It is fine-tuned for use in general code/software engineering conversations and SQL query generation and discussion.", + "author": { + "name": "Stability AI", + "url": "https://stability.ai/", + "blurb": "Stability AI is developing cutting-edge open AI models for Image, Language, Audio, Video, 3D and Biology." + }, + "numParameters": "3B", + "resources": { + "canonicalUrl": "https://huggingface.co/stabilityai/stable-code-instruct-3b", + "downloadUrl": "https://huggingface.co/bartowski/stable-code-instruct-3b-GGUF", + "paperUrl": "https://drive.google.com/file/d/16-DGsR5-qwoPztZ6HcM7KSRUxIXrjlSm/view" + }, + "trainedFor": "instruct", + "arch": "stablelm", + "files": { + "highlighted": { + "most_capable": { + "name": "stable-code-instruct-3b-Q8_0.gguf" + } + }, + "all": [ + { + "name": "stable-code-instruct-3b-Q8_0.gguf", + "url": "https://huggingface.co/bartowski/stable-code-instruct-3b-GGUF/resolve/main/stable-code-instruct-3b-Q8_0.gguf", + "sizeBytes": 2972926176, + "quantization": "Q8_0", + "format": "gguf", + "sha256checksum": "2ffc06aacad9b90fe633c3920d3784618d7419e5704151e9ab7087a5958a3c63", + "publisher": { + "name": "Bartowski", + "socialUrl": "https://huggingface.co/bartowski" + }, + "respository": "bartowski/stable-code-instruct-3b-GGUF", + "repositoryUrl": "https://huggingface.co/bartowski/stable-code-instruct-3b-GGUF" + } + ] + } +} \ No newline at end of file diff --git a/schema.json b/schema.json index fcfc55f..05a8d1b 100644 --- a/schema.json +++ b/schema.json @@ -59,7 +59,7 @@ }, "arch": { "type": "string", - "enum": ["llama", "pythia", "gpt-neo-x", "gpt-j", "mpt", "replit", "starcoder", "falcon", "mistral", "stablelm", "phi2", "qwen2", "gemma"] + "enum": ["llama", "pythia", "gpt-neo-x", "gpt-j", "mpt", "replit", "starcoder", "falcon", "mistral", "stablelm", "phi2", "qwen2", "gemma", "command-r"] }, "description": { "type": "string" From 9aafbf89dc1a514802ef30d03932505e2e1040e3 Mon Sep 17 00:00:00 2001 From: Yagil Burowski Date: Tue, 26 Mar 2024 17:52:52 -0400 Subject: [PATCH 2/2] Add Hermes 2 Pro Mistral 7B --- models/Hermes-2-Pro-Mistral-7B.json | 41 +++++++++++++++++++++++++++++ 1 file changed, 41 insertions(+) create mode 100644 models/Hermes-2-Pro-Mistral-7B.json diff --git a/models/Hermes-2-Pro-Mistral-7B.json b/models/Hermes-2-Pro-Mistral-7B.json new file mode 100644 index 0000000..c7a3c46 --- /dev/null +++ b/models/Hermes-2-Pro-Mistral-7B.json @@ -0,0 +1,41 @@ +{ + "_descriptorVersion": "0.0.1", + "datePublished": "2024-03-12T06:52:19.000Z", + "name": "Hermes 2 Pro Mistral 7B", + "description": "Hermes 2 Pro, an updated version of Nous Hermes 2, incorporates an enhanced and cleaned OpenHermes 2.5 Dataset alongside a new in-house developed dataset for Function Calling and JSON Mode. This version retains its robust performance in general tasks and conversations while showing notable improvements in Function Calling, JSON Structured Outputs, achieving a 90% score in function calling evaluation conducted with Fireworks.AI, and 84% in structured JSON Output evaluation. It introduces a special system prompt and a multi-turn function calling structure, incorporating a chatml role to streamline and simplify function calling.", + "author": { + "name": "NousResearch", + "url": "https://twitter.com/NousResearch", + "blurb": "We are dedicated to advancing the field of natural language processing, in collaboration with the open-source community, through bleeding-edge research and a commitment to symbiotic development." + }, + "numParameters": "7B", + "resources": { + "canonicalUrl": "https://huggingface.co/NousResearch/Hermes-2-Pro-Mistral-7B", + "downloadUrl": "https://huggingface.co/NousResearch/Hermes-2-Pro-Mistral-7B-GGUF" + }, + "trainedFor": "chat", + "arch": "mistral", + "files": { + "highlighted": { + "economical": { + "name": "Hermes-2-Pro-Mistral-7B.Q4_0.gguf" + } + }, + "all": [ + { + "name": "Hermes-2-Pro-Mistral-7B.Q4_0.gguf", + "url": "https://huggingface.co/NousResearch/Hermes-2-Pro-Mistral-7B-GGUF/resolve/main/Hermes-2-Pro-Mistral-7B.Q4_0.gguf", + "sizeBytes": 4109098752, + "quantization": "q4_0", + "format": "gguf", + "sha256checksum": "f446c3125026f7af6757dd097dda02280adc85e908c058bd6f1c41a118354745", + "publisher": { + "name": "NousResearch", + "socialUrl": "https://twitter.com/NousResearch" + }, + "respository": "NousResearch/Hermes-2-Pro-Mistral-7B-GGUF", + "repositoryUrl": "https://huggingface.co/NousResearch/Hermes-2-Pro-Mistral-7B-GGUF" + } + ] + } +} \ No newline at end of file