From d22eca6c47cad97034f05e36b87d85b638fcf303 Mon Sep 17 00:00:00 2001 From: Concedo <39025047+LostRuins@users.noreply.github.com> Date: Sun, 9 Feb 2025 12:33:28 +0800 Subject: [PATCH] fix potential crash in autoguess --- gpttype_adapter.cpp | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) diff --git a/gpttype_adapter.cpp b/gpttype_adapter.cpp index f565b6c26..8f9daddcb 100644 --- a/gpttype_adapter.cpp +++ b/gpttype_adapter.cpp @@ -88,12 +88,12 @@ static gpt_neox_model neox_ctx_v3; static mpt_model mpt_ctx_v3; -static rwkv_v2_context * rwkv_ctx_v2; -static rwkv_context * rwkv_ctx_v3; +static rwkv_v2_context * rwkv_ctx_v2 = nullptr; +static rwkv_context * rwkv_ctx_v3 = nullptr; -static llama_v2_context * llama_ctx_v2; -static llama_v3_context * llama_ctx_v3; -static llama_context * llama_ctx_v4; +static llama_v2_context * llama_ctx_v2 = nullptr; +static llama_v3_context * llama_ctx_v3 = nullptr; +static llama_context * llama_ctx_v4 = nullptr; static llama_context * draft_ctx = nullptr; //will remain null if speculative is unused static clip_ctx * clp_ctx = nullptr; //for llava @@ -2659,6 +2659,10 @@ std::string gpttype_get_chat_template() printf("\nWarning: KCPP text generation not initialized!\n"); return ""; } + if(file_format!=FileFormat::GGUF_GENERIC || !llama_ctx_v4) + { + return ""; + } // copied from examples/server/utils.hpp::llama_get_chat_template std::string template_key = "tokenizer.chat_template"; // call with NULL buffer to get the total size of the string