fixed some functions when no model is loaded

This commit is contained in:
Concedo 2025-02-08 11:15:26 +08:00
parent b100bcb9e6
commit e68a3cf1dc
2 changed files with 12 additions and 1 deletions

View file

@ -2654,6 +2654,11 @@ bool gpttype_generate_abort()
std::string gpttype_get_chat_template()
{
if(kcpp_data==nullptr)
{
printf("\nWarning: KCPP text generation not initialized!\n");
return "";
}
// copied from examples/server/utils.hpp::llama_get_chat_template
std::string template_key = "tokenizer.chat_template";
// call with NULL buffer to get the total size of the string
@ -2690,6 +2695,12 @@ std::vector<int> gpttype_get_token_arr(const std::string & input, bool addbos)
std::string gpttype_detokenize(const std::vector<int> & inputids, bool render_special)
{
if(kcpp_data==nullptr)
{
printf("\nWarning: KCPP text generation not initialized!\n");
return "";
}
std::string output = "";
for (auto eid : inputids)
{

View file

@ -5343,7 +5343,7 @@ def kcpp_main_process(launch_args, g_memory=None, gui_launcher=False):
exitcounter = 999
exit_with_error(3,"Could not load text model: " + modelname)
if (chatcompl_adapter is not None and isinstance(chatcompl_adapter, list) and not args.nomodel):
if (chatcompl_adapter is not None and isinstance(chatcompl_adapter, list) and not args.nomodel and args.model_param):
# The chat completions adapter is a list that needs derivation from chat templates
# Try to derive chat completions adapter from chat template, now that we have the model loaded
ctbytes = handle.get_chat_template()