From e68a3cf1dc9586902ddbf30f62c50c5c5e383c05 Mon Sep 17 00:00:00 2001 From: Concedo <39025047+LostRuins@users.noreply.github.com> Date: Sat, 8 Feb 2025 11:15:26 +0800 Subject: [PATCH] fixed some functions when no model is loaded --- gpttype_adapter.cpp | 11 +++++++++++ koboldcpp.py | 2 +- 2 files changed, 12 insertions(+), 1 deletion(-) diff --git a/gpttype_adapter.cpp b/gpttype_adapter.cpp index 49f0afb60..f565b6c26 100644 --- a/gpttype_adapter.cpp +++ b/gpttype_adapter.cpp @@ -2654,6 +2654,11 @@ bool gpttype_generate_abort() std::string gpttype_get_chat_template() { + if(kcpp_data==nullptr) + { + printf("\nWarning: KCPP text generation not initialized!\n"); + return ""; + } // copied from examples/server/utils.hpp::llama_get_chat_template std::string template_key = "tokenizer.chat_template"; // call with NULL buffer to get the total size of the string @@ -2690,6 +2695,12 @@ std::vector gpttype_get_token_arr(const std::string & input, bool addbos) std::string gpttype_detokenize(const std::vector & inputids, bool render_special) { + if(kcpp_data==nullptr) + { + printf("\nWarning: KCPP text generation not initialized!\n"); + return ""; + } + std::string output = ""; for (auto eid : inputids) { diff --git a/koboldcpp.py b/koboldcpp.py index d52b584be..71b7bf78b 100644 --- a/koboldcpp.py +++ b/koboldcpp.py @@ -5343,7 +5343,7 @@ def kcpp_main_process(launch_args, g_memory=None, gui_launcher=False): exitcounter = 999 exit_with_error(3,"Could not load text model: " + modelname) - if (chatcompl_adapter is not None and isinstance(chatcompl_adapter, list) and not args.nomodel): + if (chatcompl_adapter is not None and isinstance(chatcompl_adapter, list) and not args.nomodel and args.model_param): # The chat completions adapter is a list that needs derivation from chat templates # Try to derive chat completions adapter from chat template, now that we have the model loaded ctbytes = handle.get_chat_template()