diff --git a/gpttype_adapter.cpp b/gpttype_adapter.cpp index a08690509..6fdd57d95 100644 --- a/gpttype_adapter.cpp +++ b/gpttype_adapter.cpp @@ -3860,7 +3860,7 @@ generation_outputs gpttype_generate(const generation_inputs inputs) // For the record, the GLM4 one didn't break anyone and everyone forgot GLM4 needed this :D if (file_format == FileFormat::GGUF_GENERIC && (file_format_meta.model_architecture == llm_arch::LLM_ARCH_GEMMA4)) { std::string temp = gpttype_get_chat_template(); - if (temp.find("<|channel>thought") != std::string::npos) { + if (temp.find("<|channel>thought\\n") != std::string::npos) { const std::string channel_open = "<|channel>"; const std::string channel_close = ""; const std::string channel_prefix = channel_open + channel_close; diff --git a/kcpp_adapters/AutoGuess-NoThink.json b/kcpp_adapters/AutoGuess-NoThink.json index ab85d9c7d..b6cdc3edb 100644 --- a/kcpp_adapters/AutoGuess-NoThink.json +++ b/kcpp_adapters/AutoGuess-NoThink.json @@ -77,7 +77,7 @@ "assistant_end": "\n" } }, { - "search": ["<|turn>model","<|think|>","<|channel>thought"], + "search": ["<|turn>model","<|think|>","<|channel>thought\\n"], "name": "Google Gemma 4 (26B and 31B)", "adapter": { "system_start": "<|turn>system\n", diff --git a/kcpp_adapters/AutoGuess.json b/kcpp_adapters/AutoGuess.json index bad838d04..d9a96004b 100644 --- a/kcpp_adapters/AutoGuess.json +++ b/kcpp_adapters/AutoGuess.json @@ -77,7 +77,7 @@ "assistant_end": "\n" } }, { - "search": ["<|turn>model","<|think|>","<|channel>thought"], + "search": ["<|turn>model","<|think|>","<|channel>thought\\n"], "name": "Google Gemma 4 (26B and 31B)", "adapter": { "system_start": "<|turn>system\n",