From a29c197de2bd9ea64d4f893433ab833cf99d9a9f Mon Sep 17 00:00:00 2001 From: Colin Kealty Date: Sun, 19 May 2024 11:09:15 -0400 Subject: [PATCH] Add Smaug support --- convert-hf-to-gguf-update.py | 1 + convert-hf-to-gguf.py | 3 +++ llama.cpp | 4 ++++ llama.h | 1 + 4 files changed, 9 insertions(+) diff --git a/convert-hf-to-gguf-update.py b/convert-hf-to-gguf-update.py index 45404b32b75ae..1e3ad09e2f188 100755 --- a/convert-hf-to-gguf-update.py +++ b/convert-hf-to-gguf-update.py @@ -81,6 +81,7 @@ class TOKENIZER_TYPE(IntEnum): {"name": "jina-v2-en", "tokt": TOKENIZER_TYPE.WPM, "repo": "https://huggingface.co/jinaai/jina-embeddings-v2-base-en", }, # WPM! {"name": "jina-v2-es", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/jinaai/jina-embeddings-v2-base-es", }, {"name": "jina-v2-de", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/jinaai/jina-embeddings-v2-base-de", }, + {"name": "smaug-bpe", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/abacusai/Smaug-Llama-3-70B-Instruct", }, ] diff --git a/convert-hf-to-gguf.py b/convert-hf-to-gguf.py index bd303150ae6b9..1702fbc8d1edb 100755 --- a/convert-hf-to-gguf.py +++ b/convert-hf-to-gguf.py @@ -473,6 +473,9 @@ def get_vocab_base_pre(self, tokenizer) -> str: if chkhsh == "27949a2493fc4a9f53f5b9b029c82689cfbe5d3a1929bb25e043089e28466de6": # ref: https://huggingface.co/jinaai/jina-embeddings-v2-base-de res = "jina-v2-de" + if chkhsh == "c136ed14d01c2745d4f60a9596ae66800e2b61fa45643e72436041855ad4089d": + # ref: https://huggingface.co/abacusai/Smaug-Llama-3-70B-Instruct + res = "smaug-bpe" if res is None: logger.warning("\n") diff --git a/llama.cpp b/llama.cpp index 06ff4da617f5d..c2d37b4360bd9 100644 --- a/llama.cpp +++ b/llama.cpp @@ -4472,6 +4472,9 @@ static void llm_load_vocab( } else if ( tokenizer_pre == "dbrx") { vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_DBRX; + } else if ( + tokenizer_pre == "smaug-bpe") { + vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_SMAUG; } else { throw std::runtime_error(format("unknown pre-tokenizer type: '%s'", tokenizer_pre.c_str())); } @@ -12310,6 +12313,7 @@ struct llm_tokenizer_bpe { }); break; case LLAMA_VOCAB_PRE_TYPE_DBRX: + case LLAMA_VOCAB_PRE_TYPE_SMAUG: word_collection = unicode_regex_split(text, { // same as llama3 "(?:'[sS]|'[tT]|'[rR][eE]|'[vV][eE]|'[mM]|'[lL][lL]|'[dD])|[^\\r\\n\\p{L}\\p{N}]?\\p{L}+|\\p{N}{1,3}| ?[^\\s\\p{L}\\p{N}]+[\\r\\n]*|\\s*[\\r\\n]+|\\s+(?!\\S)|\\s+", diff --git a/llama.h b/llama.h index b7bf2afcb403e..9e4115ed2e93a 100644 --- a/llama.h +++ b/llama.h @@ -85,6 +85,7 @@ extern "C" { LLAMA_VOCAB_PRE_TYPE_QWEN2 = 11, LLAMA_VOCAB_PRE_TYPE_OLMO = 12, LLAMA_VOCAB_PRE_TYPE_DBRX = 13, + LLAMA_VOCAB_PRE_TYPE_SMAUG = 14, }; // note: these values should be synchronized with ggml_rope