From 5e831551939d1b32559f479bf8024b8b4b4e8af3 Mon Sep 17 00:00:00 2001 From: Yagil Burowski Date: Tue, 23 Jul 2024 10:28:52 -0400 Subject: [PATCH 1/2] Update catalog: phi-3 --- models/phi-3.json | 41 +++++++++++++++++++++++++++++++++++++++++ schema.json | 4 ++-- 2 files changed, 43 insertions(+), 2 deletions(-) create mode 100644 models/phi-3.json diff --git a/models/phi-3.json b/models/phi-3.json new file mode 100644 index 0000000..19b01cf --- /dev/null +++ b/models/phi-3.json @@ -0,0 +1,41 @@ +{ + "_descriptorVersion": "0.0.1", + "datePublished": "2024-07-02T14:09:26", + "name": "Phi 3", + "description": "The Phi-3-Mini-4K-Instruct is a 3.8B parameters, lightweight, state-of-the-art open model trained with the Phi-3 datasets that includes both synthetic data and the filtered publicly available websites data with a focus on high-quality and reasoning dense properties.", + "author": { + "name": "Microsoft Research", + "url": "https://www.microsoft.com/en-us/research/", + "blurb": "Advancing science and technology to benefit humanity" + }, + "numParameters": "3B", + "resources": { + "canonicalUrl": "https://huggingface.co/microsoft/Phi-3-mini-4k-instruct", + "downloadUrl": "https://huggingface.co/lmstudio-community/Phi-3.1-mini-4k-instruct-GGUF" + }, + "trainedFor": "chat", + "arch": "phi3", + "files": { + "highlighted": { + "economical": { + "name": "Phi-3.1-mini-4k-instruct-Q5_K_M.gguf" + } + }, + "all": [ + { + "name": "Phi-3.1-mini-4k-instruct-Q5_K_M.gguf", + "url": "https://huggingface.co/lmstudio-community/Phi-3.1-mini-4k-instruct-GGUF/resolve/main/Phi-3.1-mini-4k-instruct-Q5_K_M.gguf", + "sizeBytes": 2815275232, + "quantization": "Q5_K_M", + "format": "gguf", + "sha256checksum": "bb076f8f9e6c188a8251c626e4d89442c291215c82b2cb06e1efed0941fc443a", + "publisher": { + "name": "LM Studio Community", + "socialUrl": "https://twitter.com/LMStudioAI" + }, + "respository": "lmstudio-community/Phi-3.1-mini-4k-instruct-GGUF", + "repositoryUrl": "https://huggingface.co/lmstudio-community/Phi-3.1-mini-4k-instruct-GGUF" + } + ] + } + } diff --git a/schema.json b/schema.json index 028b3b2..786fddc 100644 --- a/schema.json +++ b/schema.json @@ -51,7 +51,7 @@ }, "numParameters": { "type": "string", - "enum": ["1.5B", "2B", "3B", "4B", "6.7B", "7B", "13B", "15B", "30B", "65B", "unknown"] + "enum": ["1.5B", "2B", "3B", "4B", "6.7B", "7B", "8B", "13B", "15B", "30B", "65B", "unknown"] }, "trainedFor": { "type": "string", @@ -59,7 +59,7 @@ }, "arch": { "type": "string", - "enum": ["llama", "pythia", "gpt-neo-x", "gpt-j", "mpt", "replit", "starcoder", "falcon", "mistral", "stablelm", "phi2", "qwen2", "gemma", "command-r"] + "enum": ["llama", "pythia", "gpt-neo-x", "gpt-j", "mpt", "replit", "starcoder", "falcon", "mistral", "stablelm", "phi2", "qwen2", "gemma", "command-r", "phi3"] }, "description": { "type": "string" From e28d17a0d12d7865154046c85b58506d1de1192c Mon Sep 17 00:00:00 2001 From: Yagil Burowski Date: Tue, 23 Jul 2024 10:36:42 -0400 Subject: [PATCH 2/2] Gemma 2 and Phi 3 --- models/Llama-3-8B-Instruct.json | 2 +- models/gemma-2-9b.json | 41 +++++++++++++++++++++++++++++++++ models/phi-3.json | 2 +- schema.json | 4 ++-- 4 files changed, 45 insertions(+), 4 deletions(-) create mode 100644 models/gemma-2-9b.json diff --git a/models/Llama-3-8B-Instruct.json b/models/Llama-3-8B-Instruct.json index 368f995..c9d63bf 100644 --- a/models/Llama-3-8B-Instruct.json +++ b/models/Llama-3-8B-Instruct.json @@ -31,7 +31,7 @@ "format": "gguf", "sha256checksum": "ab9e4eec7e80892fd78f74d9a15d0299f1e22121cea44efd68a7a02a3fe9a1da", "publisher": { - "name": "LM Studio Community", + "name": "lmstudio-community", "socialUrl": "https://huggingface.co/lmstudio-community" }, "respository": "lmstudio-community/Meta-Llama-3-8B-Instruct-GGUF", diff --git a/models/gemma-2-9b.json b/models/gemma-2-9b.json new file mode 100644 index 0000000..425f5b6 --- /dev/null +++ b/models/gemma-2-9b.json @@ -0,0 +1,41 @@ +{ + "_descriptorVersion": "0.0.1", + "datePublished": "2024-06-28T05:10:58.000Z", + "name": "Gemma 2 9B Instruct", + "description": "Gemma is a family of lightweight, state-of-the-art open models from Google, built from the same research and technology used to create the Gemini models", + "author": { + "name": "Google DeepMind", + "url": "https://deepmind.google", + "blurb": "We’re a team of scientists, engineers, ethicists and more, working to build the next generation of AI systems safely and responsibly." + }, + "numParameters": "9B", + "resources": { + "canonicalUrl": "https://huggingface.co/google/gemma-2-9b-it", + "downloadUrl": "https://huggingface.co/lmstudio-community/gemma-2-9b-it-GGUF" + }, + "trainedFor": "chat", + "arch": "gemma2", + "files": { + "highlighted": { + "economical": { + "name": "gemma-2-9b-it-Q4_K_M.gguf" + } + }, + "all": [ + { + "name": "gemma-2-9b-it-Q4_K_M.gguf", + "url": "https://huggingface.co/lmstudio-community/gemma-2-9b-it-GGUF/resolve/main/gemma-2-9b-it-Q4_K_M.gguf", + "sizeBytes": 5761057728, + "quantization": "Q4_K_M", + "format": "gguf", + "sha256checksum": "13b2a7b4115bbd0900162edcebe476da1ba1fc24e718e8b40d32f6e300f56dfe", + "publisher": { + "name": "lmstudio-community", + "socialUrl": "https://twitter.com/LMStudioAI" + }, + "respository": "lmstudio-community/gemma-2-9b-it-GGUF", + "repositoryUrl": "https://huggingface.co/lmstudio-community/gemma-2-9b-it-GGUF" + } + ] + } + } diff --git a/models/phi-3.json b/models/phi-3.json index 19b01cf..ab7adb7 100644 --- a/models/phi-3.json +++ b/models/phi-3.json @@ -30,7 +30,7 @@ "format": "gguf", "sha256checksum": "bb076f8f9e6c188a8251c626e4d89442c291215c82b2cb06e1efed0941fc443a", "publisher": { - "name": "LM Studio Community", + "name": "lmstudio-community", "socialUrl": "https://twitter.com/LMStudioAI" }, "respository": "lmstudio-community/Phi-3.1-mini-4k-instruct-GGUF", diff --git a/schema.json b/schema.json index 786fddc..dc0a668 100644 --- a/schema.json +++ b/schema.json @@ -51,7 +51,7 @@ }, "numParameters": { "type": "string", - "enum": ["1.5B", "2B", "3B", "4B", "6.7B", "7B", "8B", "13B", "15B", "30B", "65B", "unknown"] + "enum": ["1.5B", "2B", "3B", "4B", "6.7B", "7B", "8B", "9B", "13B", "15B", "30B", "65B", "unknown"] }, "trainedFor": { "type": "string", @@ -59,7 +59,7 @@ }, "arch": { "type": "string", - "enum": ["llama", "pythia", "gpt-neo-x", "gpt-j", "mpt", "replit", "starcoder", "falcon", "mistral", "stablelm", "phi2", "qwen2", "gemma", "command-r", "phi3"] + "enum": ["llama", "pythia", "gpt-neo-x", "gpt-j", "mpt", "replit", "starcoder", "falcon", "mistral", "stablelm", "phi2", "qwen2", "gemma", "gemma2", "command-r", "phi3"] }, "description": { "type": "string"