diff --git a/gpt4all-chat/CHANGELOG.md b/gpt4all-chat/CHANGELOG.md index 2dd567792e0b..49c0734fd6ed 100644 --- a/gpt4all-chat/CHANGELOG.md +++ b/gpt4all-chat/CHANGELOG.md @@ -7,6 +7,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.1.0/). ## [Unreleased] ### Added +- Automatically substitute chat templates that are not compatible with Jinja2Cpp in GGUFs ([#3327](https://github.com/nomic-ai/gpt4all/pull/3327)) - Built-in javascript code interpreter tool plus model ([#3173](https://github.com/nomic-ai/gpt4all/pull/3173)) ### Fixed diff --git a/gpt4all-chat/CMakeLists.txt b/gpt4all-chat/CMakeLists.txt index 60d75a560f67..7947b2d4b91c 100644 --- a/gpt4all-chat/CMakeLists.txt +++ b/gpt4all-chat/CMakeLists.txt @@ -200,6 +200,7 @@ qt_add_executable(chat src/download.cpp src/download.h src/embllm.cpp src/embllm.h src/jinja_helpers.cpp src/jinja_helpers.h + src/jinja_replacements.cpp src/jinja_replacements.h src/llm.cpp src/llm.h src/localdocs.cpp src/localdocs.h src/localdocsmodel.cpp src/localdocsmodel.h diff --git a/gpt4all-chat/metadata/models3.json b/gpt4all-chat/metadata/models3.json index e93bdcfca56f..7a16a2556a0a 100644 --- a/gpt4all-chat/metadata/models3.json +++ b/gpt4all-chat/metadata/models3.json @@ -29,7 +29,8 @@ "description": "", "url": "https://gpt4all.io/models/gguf/Meta-Llama-3-8B-Instruct.Q4_0.gguf", "promptTemplate": "<|start_header_id|>user<|end_header_id|>\n\n%1<|eot_id|><|start_header_id|>assistant<|end_header_id|>\n\n%2<|eot_id|>", - "systemPrompt": "" + "systemPrompt": "", + "chatTemplate": "{%- set loop_messages = messages %}\n{%- for message in loop_messages %}\n {%- set content = '<|start_header_id|>' + message['role'] + '<|end_header_id|>\\n\\n'+ message['content'] | trim + '<|eot_id|>' %}\n {{- content }}\n{%- endfor %}\n{%- if add_generation_prompt %}\n {{- '<|start_header_id|>assistant<|end_header_id|>\\n\\n' }}\n{%- endif %}" }, { "order": "b", @@ -113,7 +114,8 @@ "description": "", "url": "https://huggingface.co/GPT4All-Community/Meta-Llama-3.1-8B-Instruct-128k/resolve/main/Meta-Llama-3.1-8B-Instruct-128k-Q4_0.gguf", "promptTemplate": "<|start_header_id|>user<|end_header_id|>\n\n%1<|eot_id|><|start_header_id|>assistant<|end_header_id|>\n\n%2", - "systemPrompt": "<|start_header_id|>system<|end_header_id|>\nCutting Knowledge Date: December 2023\n\nYou are a helpful assistant.<|eot_id|>" + "systemPrompt": "<|start_header_id|>system<|end_header_id|>\nCutting Knowledge Date: December 2023\n\nYou are a helpful assistant.<|eot_id|>", + "chatTemplate": "{%- set loop_messages = messages %}\n{%- for message in loop_messages %}\n {%- set content = '<|start_header_id|>' + message['role'] + '<|end_header_id|>\\n\\n'+ message['content'] | trim + '<|eot_id|>' %}\n {%- if loop.index0 == 0 %}\n {%- set content = bos_token + content %}\n {%- endif %}\n {{- content }}\n{%- endfor %}\n{{- '<|start_header_id|>assistant<|end_header_id|>\\n\\n' }}" }, { "order": "g", @@ -213,6 +215,7 @@ "url": "https://huggingface.co/lamhieu/ghost-7b-v0.9.1-gguf/resolve/main/ghost-7b-v0.9.1-Q4_0.gguf", "promptTemplate": "<|user|>\n%1\n<|assistant|>\n%2\n", "systemPrompt": "<|system|>\nYou are Ghost created by Lam Hieu. You are a helpful and knowledgeable assistant. You like to help and always give honest information, in its original language. In communication, you are always respectful, equal and promote positive behavior.\n", + "chatTemplate": "{%- for message in messages %}\n {%- if message['role'] == 'user' %}\n {{- '<|user|>\\n' + message['content'] + eos_token }}\n {%- elif message['role'] == 'system' %}\n {{- '<|system|>\\n' + message['content'] + eos_token }}\n {%- elif message['role'] == 'assistant' %}\n {{- '<|assistant|>\\n' + message['content'] + eos_token }}\n {%- endif %}\n {%- if loop.last and add_generation_prompt %}\n {{- '<|assistant|>' }}\n {%- endif %}\n{%- endfor %}", "systemMessage": "You are Ghost created by Lam Hieu. You are a helpful and knowledgeable assistant. You like to help and always give honest information, in its original language. In communication, you are always respectful, equal and promote positive behavior." }, { @@ -298,7 +301,8 @@ "description": "", "url": "https://gpt4all.io/models/gguf/Phi-3-mini-4k-instruct.Q4_0.gguf", "promptTemplate": "<|user|>\n%1<|end|>\n<|assistant|>\n%2<|end|>\n", - "systemPrompt": "" + "systemPrompt": "", + "chatTemplate": "{{- bos_token }}\n{%- for message in messages %}\n {{- '<|' + message['role'] + '|>\\n' + message['content'] + '<|end|>\\n' }}\n{%- endfor %}\n{%- if add_generation_prompt %}\n {{- '<|assistant|>\\n' }}\n{%- else %}\n {{- eos_token }}\n{%- endif %}" }, { "order": "r", @@ -476,6 +480,6 @@ "url": "https://huggingface.co/Qwen/Qwen2-1.5B-Instruct-GGUF/resolve/main/qwen2-1_5b-instruct-q4_0.gguf", "promptTemplate": "<|im_start|>user\n%1<|im_end|>\n<|im_start|>assistant\n%2<|im_end|>", "systemPrompt": "<|im_start|>system\nBelow is an instruction that describes a task. Write a response that appropriately completes the request.<|im_end|>\n", - "chatTemplate": "{%- for message in messages %}\n {%- if loop.first and messages[0]['role'] != 'system' %}\n {{- '<|im_start|>system\\nYou are a helpful assistant.<|im_end|>\\n' }}\n {%- endif %}\n {{- '<|im_start|>' + message['role'] + '\\n' + message['content'] + '<|im_end|>' + '\\n' }}\n{%- endfor %}\n{%- if add_generation_prompt %}\n {{- '<|im_start|>assistant\\n' }}\n{%- endif %}" + "chatTemplate": "{%- for message in messages %}\n {%- if loop.first and messages[0]['role'] != 'system' %}\n {{- '<|im_start|>system\\nYou are a helpful assistant.<|im_end|>\\n' }}\n {%- endif %}\n {{- '<|im_start|>' + message['role'] + '\\n' + message['content'] + '<|im_end|>\\n' }}\n{%- endfor %}\n{%- if add_generation_prompt %}\n {{- '<|im_start|>assistant\\n' }}\n{%- endif %}" } ] diff --git a/gpt4all-chat/src/jinja_replacements.cpp b/gpt4all-chat/src/jinja_replacements.cpp new file mode 100644 index 000000000000..c79a3ea77dbd --- /dev/null +++ b/gpt4all-chat/src/jinja_replacements.cpp @@ -0,0 +1,480 @@ +// The map in this file is automatically generated by Jared. Do not hand-edit it. + +#include "jinja_replacements.h" + +// This is a list of prompt templates known to GPT4All and their associated replacements which are automatically used +// instead when loading the chat template from GGUF. These exist for two primary reasons: +// - HuggingFace model authors make ugly chat templates because they do not expect the end user to see them; +// - and our Jinja2Cpp-based template parsing is not fully compatible with HuggingFace transformers and jinja2. + +// Below is a list of known incompatibilities with the official HF jinja2 implementation. These are not all necessarily +// reflected in the below substitution list, and this cannot be an exhaustive list because there are a plethora of edge +// cases in template parsing in which jinja2 and Jinja2Cpp differ. These are differences that could be reasonably +// expected to affect chat templates that could be seen in the wild, or that cause a crash: +// - Jinja2Cpp crashes (in debug builds) if given the template `a[""(` +// - Jinja2Cpp does not support these jinja2 constructs: +// - `is not none` +// - list slicing, e.g. `messages[1:]` +// - the jinja2.ext.loopcontrols extension, which HF enables by default +// - a missing space after a quote in substitution (e.g. `{{ 'foo'}}`), which *has* been seen in the wild +// - GPT4All does not currently support these HuggingFace template features: +// - customized "tojson" filter (we provide the built-in Jinja2Cpp one) +// - the AssistantTracker extension + + +// The substitution list. +// For templates that apply to models listed in models3.json, these should be copied there as well for best +// compatibility with older versions of GPT4All. + +const std::unordered_map CHAT_TEMPLATE_SUBSTITUTIONS { + // calme-2.1-phi3.5-4b.Q6_K.gguf (reported by ThilotE on Discord) + { + R"TEMPLATE({% for message in messages %}{% if message['role'] == 'system' and message['content'] %}{{'<|system|> +' + message['content'] + '<|end|> +'}}{% elif message['role'] == 'user' %}{{'<|user|> +' + message['content'] + '<|end|> +'}}{% elif message['role'] == 'assistant' %}{{'<|assistant|> +' + message['content'] + '<|end|> +'}}{% endif %}{% endfor %}{% if add_generation_prompt %}{{ '<|assistant|> +' }}{% else %}{{ eos_token }}{% endif %})TEMPLATE", + R"TEMPLATE({%- for message in messages %} + {%- if message['role'] == 'system' and message['content'] %} + {{- '<|system|>\n' + message['content'] + '<|end|>\n' }} + {%- elif message['role'] == 'user' %} + {{- '<|user|>\n' + message['content'] + '<|end|>\n' }} + {%- elif message['role'] == 'assistant' %} + {{- '<|assistant|>\n' + message['content'] + '<|end|>\n' }} + {%- endif %} +{%- endfor %} +{%- if add_generation_prompt %} + {{- '<|assistant|>\n' }} +{%- else %} + {{- eos_token }} +{%- endif %})TEMPLATE", + }, + // gemma-2-9b-it-Q4_0.gguf (nomic-ai/gpt4all#3282) + { + R"TEMPLATE({{ bos_token }}{% if messages[0]['role'] == 'system' %}{{ raise_exception('System role not supported') }}{% endif %}{% for message in messages %}{% if (message['role'] == 'user') != (loop.index0 % 2 == 0) %}{{ raise_exception('Conversation roles must alternate user/assistant/user/assistant/...') }}{% endif %}{% if (message['role'] == 'assistant') %}{% set role = 'model' %}{% else %}{% set role = message['role'] %}{% endif %}{{ '' + role + ' +' + message['content'] | trim + ' +' }}{% endfor %}{% if add_generation_prompt %}{{'model +'}}{% endif %})TEMPLATE", + R"TEMPLATE({{- bos_token }} +{%- if messages[0]['role'] == 'system' %} + {{- raise_exception('System role not supported') }} +{%- endif %} +{%- for message in messages %} + {%- if (message['role'] == 'user') != (loop.index0 % 2 == 0) %} + {{- raise_exception('Conversation roles must alternate user/assistant/user/assistant/...') }} + {%- endif %} + {%- if message['role'] == 'assistant' %} + {%- set role = 'model' %} + {%- else %} + {%- set role = message['role'] %} + {%- endif %} + {{- '' + role + '\n' + message['content'] | trim + '\n' }} +{%- endfor %} +{%- if add_generation_prompt %} + {{- 'model\n' }} +{%- endif %})TEMPLATE", + }, + // ghost-7b-v0.9.1-Q4_0.gguf + { + R"TEMPLATE({% for message in messages %} +{% if message['role'] == 'user' %} +{{ '<|user|> +' + message['content'] + eos_token }} +{% elif message['role'] == 'system' %} +{{ '<|system|> +' + message['content'] + eos_token }} +{% elif message['role'] == 'assistant' %} +{{ '<|assistant|> +' + message['content'] + eos_token }} +{% endif %} +{% if loop.last and add_generation_prompt %} +{{ '<|assistant|>' }} +{% endif %} +{% endfor %})TEMPLATE", + R"TEMPLATE({%- for message in messages %} + {%- if message['role'] == 'user' %} + {{- '<|user|>\n' + message['content'] + eos_token }} + {%- elif message['role'] == 'system' %} + {{- '<|system|>\n' + message['content'] + eos_token }} + {%- elif message['role'] == 'assistant' %} + {{- '<|assistant|>\n' + message['content'] + eos_token }} + {%- endif %} + {%- if loop.last and add_generation_prompt %} + {{- '<|assistant|>' }} + {%- endif %} +{%- endfor %})TEMPLATE", + }, + // Llama-3.2-1B-Instruct-Q4_0.gguf, Llama-3.2-3B-Instruct-Q4_0.gguf, SummLlama3.2-3B-Q4_0.gguf (nomic-ai/gpt4all#3309) + { + R"TEMPLATE({{- bos_token }} +{%- if custom_tools is defined %} + {%- set tools = custom_tools %} +{%- endif %} +{%- if not tools_in_user_message is defined %} + {%- set tools_in_user_message = true %} +{%- endif %} +{%- if not date_string is defined %} + {%- if strftime_now is defined %} + {%- set date_string = strftime_now("%d %b %Y") %} + {%- else %} + {%- set date_string = "26 Jul 2024" %} + {%- endif %} +{%- endif %} +{%- if not tools is defined %} + {%- set tools = none %} +{%- endif %} + +{#- This block extracts the system message, so we can slot it into the right place. #} +{%- if messages[0]['role'] == 'system' %} + {%- set system_message = messages[0]['content']|trim %} + {%- set messages = messages[1:] %} +{%- else %} + {%- set system_message = "" %} +{%- endif %} + +{#- System message #} +{{- "<|start_header_id|>system<|end_header_id|>\n\n" }} +{%- if tools is not none %} + {{- "Environment: ipython\n" }} +{%- endif %} +{{- "Cutting Knowledge Date: December 2023\n" }} +{{- "Today Date: " + date_string + "\n\n" }} +{%- if tools is not none and not tools_in_user_message %} + {{- "You have access to the following functions. To call a function, please respond with JSON for a function call." }} + {{- 'Respond in the format {"name": function name, "parameters": dictionary of argument name and its value}.' }} + {{- "Do not use variables.\n\n" }} + {%- for t in tools %} + {{- t | tojson(indent=4) }} + {{- "\n\n" }} + {%- endfor %} +{%- endif %} +{{- system_message }} +{{- "<|eot_id|>" }} + +{#- Custom tools are passed in a user message with some extra guidance #} +{%- if tools_in_user_message and not tools is none %} + {#- Extract the first user message so we can plug it in here #} + {%- if messages | length != 0 %} + {%- set first_user_message = messages[0]['content']|trim %} + {%- set messages = messages[1:] %} + {%- else %} + {{- raise_exception("Cannot put tools in the first user message when there's no first user message!") }} +{%- endif %} + {{- '<|start_header_id|>user<|end_header_id|>\n\n' -}} + {{- "Given the following functions, please respond with a JSON for a function call " }} + {{- "with its proper arguments that best answers the given prompt.\n\n" }} + {{- 'Respond in the format {"name": function name, "parameters": dictionary of argument name and its value}.' }} + {{- "Do not use variables.\n\n" }} + {%- for t in tools %} + {{- t | tojson(indent=4) }} + {{- "\n\n" }} + {%- endfor %} + {{- first_user_message + "<|eot_id|>"}} +{%- endif %} + +{%- for message in messages %} + {%- if not (message.role == 'ipython' or message.role == 'tool' or 'tool_calls' in message) %} + {{- '<|start_header_id|>' + message['role'] + '<|end_header_id|>\n\n'+ message['content'] | trim + '<|eot_id|>' }} + {%- elif 'tool_calls' in message %} + {%- if not message.tool_calls|length == 1 %} + {{- raise_exception("This model only supports single tool-calls at once!") }} + {%- endif %} + {%- set tool_call = message.tool_calls[0].function %} + {{- '<|start_header_id|>assistant<|end_header_id|>\n\n' -}} + {{- '{"name": "' + tool_call.name + '", ' }} + {{- '"parameters": ' }} + {{- tool_call.arguments | tojson }} + {{- "}" }} + {{- "<|eot_id|>" }} + {%- elif message.role == "tool" or message.role == "ipython" %} + {{- "<|start_header_id|>ipython<|end_header_id|>\n\n" }} + {%- if message.content is mapping or message.content is iterable %} + {{- message.content | tojson }} + {%- else %} + {{- message.content }} + {%- endif %} + {{- "<|eot_id|>" }} + {%- endif %} +{%- endfor %} +{%- if add_generation_prompt %} + {{- '<|start_header_id|>assistant<|end_header_id|>\n\n' }} +{%- endif %})TEMPLATE", + R"TEMPLATE({{- bos_token }} +{%- if not date_string is defined %} + {%- if strftime_now is defined %} + {%- set date_string = strftime_now('%d %b %Y') %} + {%- else %} + {%- set date_string = '26 Jul 2024' %} + {%- endif %} +{%- endif %} + +{#- This block extracts the system message, so we can slot it into the right place. #} +{%- if messages[0]['role'] == 'system' %} + {%- set system_message = messages[0]['content'] | trim %} + {%- set loop_start = 1 %} +{%- else %} + {%- set system_message = '' %} + {%- set loop_start = 0 %} +{%- endif %} + +{#- System message #} +{{- '<|start_header_id|>system<|end_header_id|>\n\n' }} +{{- 'Cutting Knowledge Date: December 2023\n' }} +{{- 'Today Date: ' + date_string + '\n\n' }} +{{- system_message }} +{{- '<|eot_id|>' }} + +{%- for message in messages %} + {%- if loop.index0 >= loop_start %} + {{- '<|start_header_id|>' + message['role'] + '<|end_header_id|>\n\n' + message['content'] | trim + '<|eot_id|>' }} + {%- endif %} +{%- endfor %} +{%- if add_generation_prompt %} + {{- '<|start_header_id|>assistant<|end_header_id|>\n\n' }} +{%- endif %})TEMPLATE", + }, + // Llama-3.3-70B-Instruct-Q4_0.gguf (nomic-ai/gpt4all#3305) + { + R"TEMPLATE({{- bos_token }} +{%- if custom_tools is defined %} + {%- set tools = custom_tools %} +{%- endif %} +{%- if not tools_in_user_message is defined %} + {%- set tools_in_user_message = true %} +{%- endif %} +{%- if not date_string is defined %} + {%- set date_string = "26 Jul 2024" %} +{%- endif %} +{%- if not tools is defined %} + {%- set tools = none %} +{%- endif %} + +{#- This block extracts the system message, so we can slot it into the right place. #} +{%- if messages[0]['role'] == 'system' %} + {%- set system_message = messages[0]['content']|trim %} + {%- set messages = messages[1:] %} +{%- else %} + {%- set system_message = "" %} +{%- endif %} + +{#- System message + builtin tools #} +{{- "<|start_header_id|>system<|end_header_id|>\n\n" }} +{%- if builtin_tools is defined or tools is not none %} + {{- "Environment: ipython\n" }} +{%- endif %} +{%- if builtin_tools is defined %} + {{- "Tools: " + builtin_tools | reject('equalto', 'code_interpreter') | join(", ") + "\n\n"}} +{%- endif %} +{{- "Cutting Knowledge Date: December 2023\n" }} +{{- "Today Date: " + date_string + "\n\n" }} +{%- if tools is not none and not tools_in_user_message %} + {{- "You have access to the following functions. To call a function, please respond with JSON for a function call." }} + {{- 'Respond in the format {"name": function name, "parameters": dictionary of argument name and its value}.' }} + {{- "Do not use variables.\n\n" }} + {%- for t in tools %} + {{- t | tojson(indent=4) }} + {{- "\n\n" }} + {%- endfor %} +{%- endif %} +{{- system_message }} +{{- "<|eot_id|>" }} + +{#- Custom tools are passed in a user message with some extra guidance #} +{%- if tools_in_user_message and not tools is none %} + {#- Extract the first user message so we can plug it in here #} + {%- if messages | length != 0 %} + {%- set first_user_message = messages[0]['content']|trim %} + {%- set messages = messages[1:] %} + {%- else %} + {{- raise_exception("Cannot put tools in the first user message when there's no first user message!") }} +{%- endif %} + {{- '<|start_header_id|>user<|end_header_id|>\n\n' -}} + {{- "Given the following functions, please respond with a JSON for a function call " }} + {{- "with its proper arguments that best answers the given prompt.\n\n" }} + {{- 'Respond in the format {"name": function name, "parameters": dictionary of argument name and its value}.' }} + {{- "Do not use variables.\n\n" }} + {%- for t in tools %} + {{- t | tojson(indent=4) }} + {{- "\n\n" }} + {%- endfor %} + {{- first_user_message + "<|eot_id|>"}} +{%- endif %} + +{%- for message in messages %} + {%- if not (message.role == 'ipython' or message.role == 'tool' or 'tool_calls' in message) %} + {{- '<|start_header_id|>' + message['role'] + '<|end_header_id|>\n\n'+ message['content'] | trim + '<|eot_id|>' }} + {%- elif 'tool_calls' in message %} + {%- if not message.tool_calls|length == 1 %} + {{- raise_exception("This model only supports single tool-calls at once!") }} + {%- endif %} + {%- set tool_call = message.tool_calls[0].function %} + {%- if builtin_tools is defined and tool_call.name in builtin_tools %} + {{- '<|start_header_id|>assistant<|end_header_id|>\n\n' -}} + {{- "<|python_tag|>" + tool_call.name + ".call(" }} + {%- for arg_name, arg_val in tool_call.arguments | items %} + {{- arg_name + '="' + arg_val + '"' }} + {%- if not loop.last %} + {{- ", " }} + {%- endif %} + {%- endfor %} + {{- ")" }} + {%- else %} + {{- '<|start_header_id|>assistant<|end_header_id|>\n\n' -}} + {{- '{"name": "' + tool_call.name + '", ' }} + {{- '"parameters": ' }} + {{- tool_call.arguments | tojson }} + {{- "}" }} + {%- endif %} + {%- if builtin_tools is defined %} + {#- This means we're in ipython mode #} + {{- "<|eom_id|>" }} + {%- else %} + {{- "<|eot_id|>" }} + {%- endif %} + {%- elif message.role == "tool" or message.role == "ipython" %} + {{- "<|start_header_id|>ipython<|end_header_id|>\n\n" }} + {%- if message.content is mapping or message.content is iterable %} + {{- message.content | tojson }} + {%- else %} + {{- message.content }} + {%- endif %} + {{- "<|eot_id|>" }} + {%- endif %} +{%- endfor %} +{%- if add_generation_prompt %} + {{- '<|start_header_id|>assistant<|end_header_id|>\n\n' }} +{%- endif %})TEMPLATE", + R"TEMPLATE({{- bos_token }} +{%- if not date_string is defined %} + {%- if strftime_now is defined %} + {%- set date_string = strftime_now("%d %b %Y") %} + {%- else %} + {%- set date_string = "26 Jul 2024" %} + {%- endif %} +{%- endif %} + +{#- This block extracts the system message, so we can slot it into the right place. #} +{%- if messages[0]['role'] == 'system' %} + {%- set system_message = messages[0]['content'] | trim %} + {%- set loop_start = 1 %} +{%- else %} + {%- set system_message = "" %} + {%- set loop_start = 0 %} +{%- endif %} + +{#- System message #} +{{- "<|start_header_id|>system<|end_header_id|>\n\n" }} +{{- "Cutting Knowledge Date: December 2023\n" }} +{{- "Today Date: " + date_string + "\n\n" }} +{{- system_message }} +{{- "<|eot_id|>" }} + +{%- for message in messages %} + {%- if loop.index0 >= loop_start %} + {{- '<|start_header_id|>' + message['role'] + '<|end_header_id|>\n\n' + message['content'] | trim + '<|eot_id|>' }} + {%- endif %} +{%- endfor %} +{%- if add_generation_prompt %} + {{- '<|start_header_id|>assistant<|end_header_id|>\n\n' }} +{%- endif %})TEMPLATE", + }, + // Meta-Llama-3.1-8B-Instruct-128k-Q4_0.gguf + { + R"TEMPLATE({% set loop_messages = messages %}{% for message in loop_messages %}{% set content = '<|start_header_id|>' + message['role'] + '<|end_header_id|> + +'+ message['content'] | trim + '<|eot_id|>' %}{% if loop.index0 == 0 %}{% set content = bos_token + content %}{% endif %}{{ content }}{% endfor %}{{ '<|start_header_id|>assistant<|end_header_id|> + +' }})TEMPLATE", + R"TEMPLATE({%- set loop_messages = messages %} +{%- for message in loop_messages %} + {%- set content = '<|start_header_id|>' + message['role'] + '<|end_header_id|>\n\n'+ message['content'] | trim + '<|eot_id|>' %} + {%- if loop.index0 == 0 %} + {%- set content = bos_token + content %} + {%- endif %} + {{- content }} +{%- endfor %} +{{- '<|start_header_id|>assistant<|end_header_id|>\n\n' }})TEMPLATE", + }, + // Meta-Llama-3-8B-Instruct.Q4_0.gguf + { + R"TEMPLATE({% set loop_messages = messages %}{% for message in loop_messages %}{% set content = '<|start_header_id|>' + message['role'] + '<|end_header_id|> + +'+ message['content'] | trim + '<|eot_id|>' %}{{ content }}{% endfor %}{% if add_generation_prompt %}{{ '<|start_header_id|>assistant<|end_header_id|> + +' }}{% endif %})TEMPLATE", + R"TEMPLATE({%- set loop_messages = messages %} +{%- for message in loop_messages %} + {%- set content = '<|start_header_id|>' + message['role'] + '<|end_header_id|>\n\n'+ message['content'] | trim + '<|eot_id|>' %} + {{- content }} +{%- endfor %} +{%- if add_generation_prompt %} + {{- '<|start_header_id|>assistant<|end_header_id|>\n\n' }} +{%- endif %})TEMPLATE", + }, + // mistral-7b-openorca.gguf2.Q4_0.gguf, Hermes-3-Llama-3.2-3B.Q4_0.gguf + { + R"TEMPLATE({% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% for message in messages %}{{'<|im_start|>' + message['role'] + ' +' + message['content'] + '<|im_end|>' + ' +'}}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant +' }}{% endif %})TEMPLATE", + R"TEMPLATE({%- if not add_generation_prompt is defined %} + {%- set add_generation_prompt = false %} +{%- endif %} +{%- for message in messages %} + {{- '<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>' + '\n' }} +{%- endfor %} +{%- if add_generation_prompt %} + {{- '<|im_start|>assistant\n' }} +{%- endif %})TEMPLATE", + }, + // Nous-Hermes-2-Mistral-7B-DPO.Q4_0.gguf + { + R"TEMPLATE({% for message in messages %}{{'<|im_start|>' + message['role'] + ' +' + message['content'] + '<|im_end|>' + ' +'}}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant +' }}{% endif %})TEMPLATE", + R"TEMPLATE({%- for message in messages %} + {{- '<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>' + '\n' }} +{%- endfor %} +{%- if add_generation_prompt %} + {{- '<|im_start|>assistant\n' }} +{%- endif %})TEMPLATE", + }, + // Phi-3-mini-4k-instruct.Q4_0.gguf + { + R"TEMPLATE({{ bos_token }}{% for message in messages %}{{'<|' + message['role'] + '|>' + ' +' + message['content'] + '<|end|> +' }}{% endfor %}{% if add_generation_prompt %}{{ '<|assistant|> +' }}{% else %}{{ eos_token }}{% endif %})TEMPLATE", + R"TEMPLATE({{- bos_token }} +{%- for message in messages %} + {{- '<|' + message['role'] + '|>\n' + message['content'] + '<|end|>\n' }} +{%- endfor %} +{%- if add_generation_prompt %} + {{- '<|assistant|>\n' }} +{%- else %} + {{- eos_token }} +{%- endif %})TEMPLATE", + }, + // qwen2-1_5b-instruct-q4_0.gguf, Qwen2-1.5B-Instruct.Q6_K.gguf (nomic-ai/gpt4all#3263), Qwen2-72B-Instruct.Q4_K_M.gguf + { + R"TEMPLATE({% for message in messages %}{% if loop.first and messages[0]['role'] != 'system' %}{{ '<|im_start|>system +You are a helpful assistant.<|im_end|> +' }}{% endif %}{{'<|im_start|>' + message['role'] + ' +' + message['content'] + '<|im_end|>' + ' +'}}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant +' }}{% endif %})TEMPLATE", + R"TEMPLATE({%- for message in messages %} + {%- if loop.first and messages[0]['role'] != 'system' %} + {{- '<|im_start|>system\nYou are a helpful assistant.<|im_end|>\n' }} + {%- endif %} + {{- '<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>\n' }} +{%- endfor %} +{%- if add_generation_prompt %} + {{- '<|im_start|>assistant\n' }} +{%- endif %})TEMPLATE", + }, +}; diff --git a/gpt4all-chat/src/jinja_replacements.h b/gpt4all-chat/src/jinja_replacements.h new file mode 100644 index 000000000000..90f92cba9f34 --- /dev/null +++ b/gpt4all-chat/src/jinja_replacements.h @@ -0,0 +1,6 @@ +#pragma once + +#include +#include + +extern const std::unordered_map CHAT_TEMPLATE_SUBSTITUTIONS; diff --git a/gpt4all-chat/src/modellist.cpp b/gpt4all-chat/src/modellist.cpp index 93b22fb94d1e..63b00b35edae 100644 --- a/gpt4all-chat/src/modellist.cpp +++ b/gpt4all-chat/src/modellist.cpp @@ -1,6 +1,7 @@ #include "modellist.h" #include "download.h" +#include "jinja_replacements.h" #include "mysettings.h" #include "network.h" @@ -352,7 +353,18 @@ QVariant ModelInfo::defaultChatTemplate() const auto path = (dirpath + filename()).toUtf8(); auto res = LLModel::Implementation::chatTemplate(path.constData()); if (res) { - m_modelChatTemplate = QString::fromStdString(*res); + std::string ggufTmpl(std::move(*res)); + if (ggufTmpl.size() >= 2 && ggufTmpl.end()[-2] != '\n' && ggufTmpl.end()[-1] == '\n') + ggufTmpl.erase(ggufTmpl.end() - 1); // strip trailing newline for e.g. Llama-3.2-3B-Instruct + if ( + auto replacement = CHAT_TEMPLATE_SUBSTITUTIONS.find(ggufTmpl); + replacement != CHAT_TEMPLATE_SUBSTITUTIONS.end() + ) { + qWarning() << "automatically substituting chat template for" << filename(); + auto &[badTemplate, goodTemplate] = *replacement; + ggufTmpl = goodTemplate; + } + m_modelChatTemplate = QString::fromStdString(ggufTmpl); } else { qWarning().nospace() << "failed to get chat template for " << filename() << ": " << res.error().c_str(); m_modelChatTemplate = QString(); // do not retry