From f9169667996d4753e4e32abb87dcd179f07efb08 Mon Sep 17 00:00:00 2001 From: Concedo <39025047+LostRuins@users.noreply.github.com> Date: Wed, 11 Feb 2026 21:26:16 +0800 Subject: [PATCH] force mistral think tokens to print --- src/llama-vocab.cpp | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/src/llama-vocab.cpp b/src/llama-vocab.cpp index a775db830..f9a48dd83 100644 --- a/src/llama-vocab.cpp +++ b/src/llama-vocab.cpp @@ -2704,6 +2704,13 @@ void llama_vocab::impl::load(llama_model_loader & ml, const LLM_KV & kv) { attr = LLAMA_TOKEN_ATTR_USER_DEFINED; } + + if(t.first=="[THINK]" || t.first=="[/THINK]" || t.first=="" || t.first=="") + { + LLAMA_LOG_WARN("%s: setting token '%s' (%d) attribute to USER_DEFINED (%u), old attributes: %u\n", + __func__, t.first.c_str(), t.second, LLAMA_TOKEN_ATTR_USER_DEFINED, attr); + attr = LLAMA_TOKEN_ATTR_USER_DEFINED; + } } // sanity checks