rename a missed BLAS -> batch

This commit is contained in:
LostRuins Concedo 2025-11-06 16:11:26 +08:00
parent 978d755ddc
commit cfb22b5c9d
3 changed files with 5 additions and 2 deletions

View file

@ -469,7 +469,7 @@ ifndef LLAMA_CUBLAS
ifndef LLAMA_HIPBLAS
ifndef LLAMA_VULKAN
ifndef LLAMA_METAL
NOTIFY_MSG = @echo -e '\n***\nYou did a basic CPU build. For faster speeds, consider installing and linking a GPU BLAS library. For example, set LLAMA_CLBLAST=1 LLAMA_VULKAN=1 to compile with Vulkan and CLBlast support. Add LLAMA_PORTABLE=1 to make a sharable build that other devices can use. Read the KoboldCpp Wiki for more information. This is just a reminder, not an error.\n***\n'
NOTIFY_MSG = @echo -e '\n***\nYou did a basic CPU build. For faster speeds, consider installing and linking a GPU library. For example, set LLAMA_CLBLAST=1 LLAMA_VULKAN=1 to compile with Vulkan and CLBlast support. Add LLAMA_PORTABLE=1 to make a sharable build that other devices can use. Read the KoboldCpp Wiki for more information. This is just a reminder, not an error.\n***\n'
endif
endif
endif

View file

@ -3941,7 +3941,7 @@ generation_outputs gpttype_generate(const generation_inputs inputs)
//print progress
if (!startedsampling && allow_regular_prints)
{
printf("\rProcessing Prompt%s (%d / %zu tokens)", (blasmode ? " [BLAS]" : ""), input_consumed, embd_inp.size());
printf("\rProcessing Prompt%s (%d / %zu tokens)", (blasmode ? " [BATCH]" : ""), input_consumed, embd_inp.size());
}
fflush(stdout);

View file

@ -6593,6 +6593,7 @@ def setuptunnel(global_memory, has_sd):
if global_memory and global_memory["load_complete"]:
print(f"Your remote Kobold API can be found at {tunneloutput}/api")
print(f"Your remote OpenAI Compatible API can be found at {tunneloutput}/v1")
print(f"Your remote llama.cpp secondary WebUI at {tunneloutput}/lcpp/")
if has_sd:
print(f"StableUI is available at {tunneloutput}/sdui/")
print("======\n")
@ -7733,6 +7734,7 @@ def kcpp_main_process(launch_args, g_memory=None, gui_launcher=False):
if not args.remotetunnel:
print(f"Starting Kobold API on port {args.port} at {endpoint_url}/api/")
print(f"Starting OpenAI Compatible API on port {args.port} at {endpoint_url}/v1/")
print(f"Starting llama.cpp secondary WebUI at {endpoint_url}/lcpp/")
if args.sdmodel:
print(f"StableUI is available at {endpoint_url}/sdui/")
elif global_memory:
@ -7742,6 +7744,7 @@ def kcpp_main_process(launch_args, g_memory=None, gui_launcher=False):
remote_url = val
print(f"Your remote Kobold API can be found at {endpoint_url}/api")
print(f"Your remote OpenAI Compatible API can be found at {endpoint_url}/v1")
print(f"Starting llama.cpp secondary WebUI at {endpoint_url}/lcpp/")
if args.sdmodel:
print(f"StableUI is available at {endpoint_url}/sdui/")
global_memory["load_complete"] = True