diff --git a/Makefile b/Makefile index 9f912d812..b564da75d 100644 --- a/Makefile +++ b/Makefile @@ -79,6 +79,9 @@ SIMPLERCFLAGS = FULLCFLAGS = NONECFLAGS = +# prefer bundled glslc +LLAMA_USE_BUNDLED_GLSLC := 1 + CLBLAST_FLAGS = -DGGML_USE_CLBLAST FAILSAFE_FLAGS = -DUSE_FAILSAFE VULKAN_FLAGS = -DGGML_USE_VULKAN -DSD_USE_VULKAN diff --git a/convert_hf_to_gguf.py b/convert_hf_to_gguf.py index 8afb425b1..c20188350 100755 --- a/convert_hf_to_gguf.py +++ b/convert_hf_to_gguf.py @@ -1082,7 +1082,14 @@ class TextModel(ModelBase): self.gguf_writer.add_token_list(tokens) self.gguf_writer.add_token_types(toktypes) special_vocab = gguf.SpecialVocab(self.dir_model, load_merges=False) - special_vocab.chat_template = "rwkv-world" + if special_vocab.chat_template is None: + template_path = Path(__file__).parent / "models" / "templates" / "llama-cpp-rwkv-world.jinja" + if template_path.is_file(): + with open(template_path, "r", encoding="utf-8") as f: + template = f.read() + else: + template = "rwkv-world" + special_vocab.chat_template = template # hack: Add '\n\n' as the EOT token to make it chat normally special_vocab._set_special_token("eot", 261) # hack: Override these as they have already been set (incorrectly) diff --git a/models/templates/llama-cpp-rwkv-world.jinja b/models/templates/llama-cpp-rwkv-world.jinja new file mode 100644 index 000000000..690223f1b --- /dev/null +++ b/models/templates/llama-cpp-rwkv-world.jinja @@ -0,0 +1,34 @@ +{%- if not add_generation_prompt is defined -%} + {%- set add_generation_prompt = true -%} +{%- endif -%} +{%- set ns = namespace(system_prompt='') -%} +{%- for message in messages -%} + {%- if message['role'] == 'system' -%} + {%- set ns.system_prompt = message['content'] -%} + {%- endif -%} +{%- endfor -%} +{{bos_token}} +{%- if ns.system_prompt != '' -%} +{{- 'System: ' + ns.system_prompt + '\n\n' -}} +{%- endif -%} +{%- for message in messages -%} + {%- if message['role'] == 'user' -%} + {{- 'User: ' + message['content']|trim + '\n\n' -}} + {%- endif -%} + {%- if message['role'] == 'assistant' and message['content'] is not none -%} + {%- set content = message['content'] -%} + {%- if '' in content -%} + {%- set content = content.split('')[-1] -%} + {%- endif -%} + {{- 'Assistant: ' + content|trim + '\n\n' -}} + {%- endif -%} +{%- endfor -%} +{%- if add_generation_prompt -%} + {{- 'Assistant:' -}} + {%- if enable_thinking is defined and enable_thinking is false %} + {{- ' \n' }} + {%- endif %} + {%- if enable_thinking is defined and enable_thinking is true %} + {{- ' ' }} + {%- endif %} +{%- endif -%} \ No newline at end of file diff --git a/src/llama-chat.cpp b/src/llama-chat.cpp index cbc19d3c4..0e7e452ce 100644 --- a/src/llama-chat.cpp +++ b/src/llama-chat.cpp @@ -170,7 +170,7 @@ llm_chat_template llm_chat_detect_template(const std::string & tmpl) { // ref: https://huggingface.co/LGAI-EXAONE/EXAONE-3.0-7.8B-Instruct/discussions/8#66bae61b1893d14ee8ed85bb // EXAONE-3.0-7.8B-Instruct return LLM_CHAT_TEMPLATE_EXAONE_3; - } else if (tmpl_contains("rwkv-world")) { + } else if (tmpl_contains("rwkv-world") || tmpl_contains("{{- 'User: ' + message['content']|trim + '\\n\\n' -}}")) { return LLM_CHAT_TEMPLATE_RWKV_WORLD; } else if (tmpl_contains("<|start_of_role|>")) { return LLM_CHAT_TEMPLATE_GRANITE; diff --git a/src/llama-quant.cpp b/src/llama-quant.cpp index f0d40375e..986dabc31 100644 --- a/src/llama-quant.cpp +++ b/src/llama-quant.cpp @@ -887,8 +887,7 @@ static void llama_model_quantize_impl(const std::string & fname_inp, const std:: if (std::regex pattern(tname); std::regex_search(tensor_name, pattern)) { if (qtype != new_type) { LLAMA_LOG_DEBUG("(overriding %s) ", ggml_type_name(new_type)); - new_type = qtype; - break; // if two or more types are specified for the tensor, first match wins + new_type = qtype; // if two or more types are specified for the same tensor, the last match wins } } }