-
Notifications
You must be signed in to change notification settings - Fork 10.1k
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Add Falcon3 support and Fix issue #10875 #10883
Changes from 4 commits
d146334
fc05540
b3d022a
d8d2f37
92e41ec
a1f146d
64d8687
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -525,6 +525,13 @@ def get_vocab_base(self) -> tuple[list[str], list[int], str]: | |
else: | ||
token: str = reverse_vocab[i] | ||
if token in added_vocab: | ||
# We need to manually encode and decode the added tokens in case special characters | ||
# used for `\n` / `\t` have been manually added in the added tokens | ||
# To avoid unexpected issues - we make sure to encode single-char tokens | ||
if len(token) == 1: | ||
logger.info("Ecode-Decode special characters using AutoTokenizer") | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I was thinking about comparing the token before and after the encoding and print the log only if there is a difference. There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. that's a good idea. Done!
|
||
token = tokenizer.decode(tokenizer.encode(token, add_special_tokens=False)) | ||
|
||
if tokenizer.added_tokens_decoder[i].special or self.does_token_look_special(token): | ||
toktypes.append(gguf.TokenType.CONTROL) | ||
else: | ||
|
@@ -571,6 +578,9 @@ def get_vocab_base_pre(self, tokenizer) -> str: | |
if chkhsh == "8aeee3860c56296a157a1fe2fad249ec40aa59b1bb5709f4ade11c4e6fe652ed": | ||
# ref: https://huggingface.co/tiiuae/falcon-7b | ||
res = "falcon" | ||
if chkhsh == "9d032fcbd5501f4a38150912590928bfb36091efb5df11b8e2124b0390e3fb1e": | ||
# ref: https://huggingface.co/tiiuae/Falcon3-7B-Base | ||
res = "falcon3" | ||
if chkhsh == "0876d13b50744004aa9aeae05e7b0647eac9d801b5ba4668afc01e709c15e19f": | ||
# ref: https://huggingface.co/BAAI/bge-small-en-v1.5 | ||
res = "bert-bge" | ||
|
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -1612,6 +1612,7 @@ enum llm_chat_template { | |
LLM_CHAT_TEMPLATE_MISTRAL_V3_TEKKEN, | ||
LLM_CHAT_TEMPLATE_MISTRAL_V7, | ||
LLM_CHAT_TEMPLATE_PHI_3, | ||
LLM_CHAT_TEMPLATE_FALCON_3, | ||
LLM_CHAT_TEMPLATE_ZEPHYR, | ||
LLM_CHAT_TEMPLATE_MONARCH, | ||
LLM_CHAT_TEMPLATE_GEMMA, | ||
|
@@ -1644,6 +1645,7 @@ static const std::map<std::string, llm_chat_template> LLM_CHAT_TEMPLATES = { | |
{ "mistral-v3-tekken", LLM_CHAT_TEMPLATE_MISTRAL_V3_TEKKEN }, | ||
{ "mistral-v7", LLM_CHAT_TEMPLATE_MISTRAL_V7 }, | ||
{ "phi3", LLM_CHAT_TEMPLATE_PHI_3 }, | ||
{ "falcon3", LLM_CHAT_TEMPLATE_FALCON_3 }, | ||
{ "zephyr", LLM_CHAT_TEMPLATE_ZEPHYR }, | ||
{ "monarch", LLM_CHAT_TEMPLATE_MONARCH }, | ||
{ "gemma", LLM_CHAT_TEMPLATE_GEMMA }, | ||
|
@@ -6473,6 +6475,11 @@ static void llm_load_vocab( | |
} else if ( | ||
tokenizer_pre == "falcon") { | ||
vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_FALCON; | ||
} else if ( | ||
tokenizer_pre == "falcon3") { | ||
vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_LLAMA3; | ||
vocab.tokenizer_ignore_merges = true; | ||
vocab.tokenizer_add_bos = true; | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Let's simplify this by moving the check to the tokenizer_pre == "llama3" ||
tokenizer_pre == "llama-v3" ||
tokenizer_pre == "llama-bpe" ||
tokenizer_pre == "falcon3") { There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Done. |
||
} else if ( | ||
tokenizer_pre == "mpt") { | ||
vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_MPT; | ||
|
@@ -22219,6 +22226,8 @@ static llm_chat_template llama_chat_detect_template(const std::string & tmpl) { | |
} | ||
} else if (tmpl_contains("<|assistant|>") && tmpl_contains("<|end|>")) { | ||
return LLM_CHAT_TEMPLATE_PHI_3; | ||
} else if (tmpl_contains("<|assistant|>") && tmpl_contains("<|user|>")) { | ||
return LLM_CHAT_TEMPLATE_FALCON_3; | ||
} else if (tmpl_contains("<|user|>") && tmpl_contains("<|endoftext|>")) { | ||
return LLM_CHAT_TEMPLATE_ZEPHYR; | ||
} else if (tmpl_contains("bos_token + message['role']")) { | ||
|
@@ -22371,6 +22380,15 @@ static int32_t llama_chat_apply_template_internal( | |
if (add_ass) { | ||
ss << "<|assistant|>\n"; | ||
} | ||
} else if (tmpl == LLM_CHAT_TEMPLATE_FALCON_3) { | ||
// Falcon 3 | ||
for (auto message : chat) { | ||
std::string role(message->role); | ||
ss << "<|" << role << "|>\n" << message->content << "\n"; | ||
} | ||
if (add_ass) { | ||
ss << "<|assistant|>\n"; | ||
} | ||
} else if (tmpl == LLM_CHAT_TEMPLATE_ZEPHYR) { | ||
// zephyr template | ||
for (auto message : chat) { | ||
|
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
I'm looking at the Falcon tokenizer and I don't see any added tokens that have
\n
or\t
: https://huggingface.co/tiiuae/Falcon3-7B-Instruct/raw/main/tokenizer.jsonFor which tokens does this change make a difference?
Maybe also add some logs to know when this path is being triggered so we can spot any potential problems with other models.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Chiming in here ! The added token is
(
\t
is the id 13)the only way to convert it properly to
\n
is to encode / decode using the tokenizerThere was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
I just added a log message inside the if statement.