fix potential crash in autoguess

This commit is contained in:
Concedo 2025-02-09 12:33:28 +08:00
parent 3fa4843850
commit d22eca6c47

View file

@ -88,12 +88,12 @@ static gpt_neox_model neox_ctx_v3;
static mpt_model mpt_ctx_v3; static mpt_model mpt_ctx_v3;
static rwkv_v2_context * rwkv_ctx_v2; static rwkv_v2_context * rwkv_ctx_v2 = nullptr;
static rwkv_context * rwkv_ctx_v3; static rwkv_context * rwkv_ctx_v3 = nullptr;
static llama_v2_context * llama_ctx_v2; static llama_v2_context * llama_ctx_v2 = nullptr;
static llama_v3_context * llama_ctx_v3; static llama_v3_context * llama_ctx_v3 = nullptr;
static llama_context * llama_ctx_v4; static llama_context * llama_ctx_v4 = nullptr;
static llama_context * draft_ctx = nullptr; //will remain null if speculative is unused static llama_context * draft_ctx = nullptr; //will remain null if speculative is unused
static clip_ctx * clp_ctx = nullptr; //for llava static clip_ctx * clp_ctx = nullptr; //for llava
@ -2659,6 +2659,10 @@ std::string gpttype_get_chat_template()
printf("\nWarning: KCPP text generation not initialized!\n"); printf("\nWarning: KCPP text generation not initialized!\n");
return ""; return "";
} }
if(file_format!=FileFormat::GGUF_GENERIC || !llama_ctx_v4)
{
return "";
}
// copied from examples/server/utils.hpp::llama_get_chat_template // copied from examples/server/utils.hpp::llama_get_chat_template
std::string template_key = "tokenizer.chat_template"; std::string template_key = "tokenizer.chat_template";
// call with NULL buffer to get the total size of the string // call with NULL buffer to get the total size of the string