mirror of
https://github.com/LostRuins/koboldcpp.git
synced 2025-09-11 01:24:36 +00:00
Add additional debug info and increased ctx sizes, fixed a bug loading vulkan config
This commit is contained in:
parent
b56805a2ba
commit
0871c7cbd1
2 changed files with 12 additions and 4 deletions
|
@ -1876,6 +1876,14 @@ generation_outputs gpttype_generate(const generation_inputs inputs)
|
||||||
std::fill(last_n_tokens.begin(), last_n_tokens.end(), 0);
|
std::fill(last_n_tokens.begin(), last_n_tokens.end(), 0);
|
||||||
n_past = 0;
|
n_past = 0;
|
||||||
|
|
||||||
|
if (debugmode==1)
|
||||||
|
{
|
||||||
|
std::string outstr = "";
|
||||||
|
printf("\n\n[Debug: Dump Raw Input Tokens, format: %d]\n", file_format);
|
||||||
|
outstr += get_tok_vec_str(embd_inp);
|
||||||
|
printf("%s\n", RemoveBell(outstr).c_str());
|
||||||
|
}
|
||||||
|
|
||||||
bool is_mamba = (file_format == FileFormat::GGUF_GENERIC && file_format_meta.model_architecture==GGUFArch::ARCH_MAMBA);
|
bool is_mamba = (file_format == FileFormat::GGUF_GENERIC && file_format_meta.model_architecture==GGUFArch::ARCH_MAMBA);
|
||||||
|
|
||||||
if (file_format == FileFormat::RWKV_1 || file_format==FileFormat::RWKV_2 || is_mamba)
|
if (file_format == FileFormat::RWKV_1 || file_format==FileFormat::RWKV_2 || is_mamba)
|
||||||
|
@ -2011,7 +2019,7 @@ generation_outputs gpttype_generate(const generation_inputs inputs)
|
||||||
if (debugmode==1)
|
if (debugmode==1)
|
||||||
{
|
{
|
||||||
std::string outstr = "";
|
std::string outstr = "";
|
||||||
printf("\n[Debug: Dump Input Tokens, format: %d]\n", file_format);
|
printf("\n[Debug: Dump Forwarded Input Tokens, format: %d]\n", file_format);
|
||||||
outstr += get_tok_vec_str(embd_inp);
|
outstr += get_tok_vec_str(embd_inp);
|
||||||
outstr += "\n\n[Debug: n_past="+std::to_string(n_past)+" Context Size = " + std::to_string(current_context_tokens.size()) + "]\n";
|
outstr += "\n\n[Debug: n_past="+std::to_string(n_past)+" Context Size = " + std::to_string(current_context_tokens.size()) + "]\n";
|
||||||
outstr += get_tok_vec_str(current_context_tokens);
|
outstr += get_tok_vec_str(current_context_tokens);
|
||||||
|
|
|
@ -1607,7 +1607,7 @@ def show_new_gui():
|
||||||
# slider data
|
# slider data
|
||||||
blasbatchsize_values = ["-1", "32", "64", "128", "256", "512", "1024", "2048"]
|
blasbatchsize_values = ["-1", "32", "64", "128", "256", "512", "1024", "2048"]
|
||||||
blasbatchsize_text = ["Don't Batch BLAS","32","64","128","256","512","1024","2048"]
|
blasbatchsize_text = ["Don't Batch BLAS","32","64","128","256","512","1024","2048"]
|
||||||
contextsize_text = ["256", "512", "1024", "2048", "3072", "4096", "6144", "8192", "12288", "16384", "24576", "32768", "49152", "65536"]
|
contextsize_text = ["256", "512", "1024", "2048", "3072", "4096", "6144", "8192", "12288", "16384", "24576", "32768", "49152", "65536", "98304", "131072"]
|
||||||
runopts = [opt for lib, opt in lib_option_pairs if file_exists(lib)]
|
runopts = [opt for lib, opt in lib_option_pairs if file_exists(lib)]
|
||||||
antirunopts = [opt.replace("Use ", "") for lib, opt in lib_option_pairs if not (opt in runopts)]
|
antirunopts = [opt.replace("Use ", "") for lib, opt in lib_option_pairs if not (opt in runopts)]
|
||||||
|
|
||||||
|
@ -2290,7 +2290,7 @@ def show_new_gui():
|
||||||
if str(g) in dict["usecublas"]:
|
if str(g) in dict["usecublas"]:
|
||||||
gpu_choice_var.set(str(g+1))
|
gpu_choice_var.set(str(g+1))
|
||||||
break
|
break
|
||||||
elif "usevulkan" in dict:
|
elif "usevulkan" in dict and dict['usevulkan'] is not None:
|
||||||
if "noavx2" in dict and dict["noavx2"]:
|
if "noavx2" in dict and dict["noavx2"]:
|
||||||
if vulkan_noavx2_option is not None:
|
if vulkan_noavx2_option is not None:
|
||||||
runopts_var.set(vulkan_noavx2_option)
|
runopts_var.set(vulkan_noavx2_option)
|
||||||
|
@ -3260,7 +3260,7 @@ if __name__ == '__main__':
|
||||||
compatgroup.add_argument("--noblas", help="Do not use OpenBLAS for accelerated prompt ingestion", action='store_true')
|
compatgroup.add_argument("--noblas", help="Do not use OpenBLAS for accelerated prompt ingestion", action='store_true')
|
||||||
parser.add_argument("--gpulayers", help="Set number of layers to offload to GPU when using GPU. Requires GPU.",metavar=('[GPU layers]'), nargs='?', const=1, type=int, default=0)
|
parser.add_argument("--gpulayers", help="Set number of layers to offload to GPU when using GPU. Requires GPU.",metavar=('[GPU layers]'), nargs='?', const=1, type=int, default=0)
|
||||||
parser.add_argument("--tensor_split", help="For CUDA and Vulkan only, ratio to split tensors across multiple GPUs, space-separated list of proportions, e.g. 7 3", metavar=('[Ratios]'), type=float, nargs='+')
|
parser.add_argument("--tensor_split", help="For CUDA and Vulkan only, ratio to split tensors across multiple GPUs, space-separated list of proportions, e.g. 7 3", metavar=('[Ratios]'), type=float, nargs='+')
|
||||||
parser.add_argument("--contextsize", help="Controls the memory allocated for maximum context size, only change if you need more RAM for big contexts. (default 2048). Supported values are [256,512,1024,2048,3072,4096,6144,8192,12288,16384,24576,32768,49152,65536]. IF YOU USE ANYTHING ELSE YOU ARE ON YOUR OWN.",metavar=('[256,512,1024,2048,3072,4096,6144,8192,12288,16384,24576,32768,49152,65536]'), type=check_range(int,256,262144), default=2048)
|
parser.add_argument("--contextsize", help="Controls the memory allocated for maximum context size, only change if you need more RAM for big contexts. (default 2048). Supported values are [256,512,1024,2048,3072,4096,6144,8192,12288,16384,24576,32768,49152,65536,98304,131072]. IF YOU USE ANYTHING ELSE YOU ARE ON YOUR OWN.",metavar=('[256,512,1024,2048,3072,4096,6144,8192,12288,16384,24576,32768,49152,65536,98304,131072]'), type=check_range(int,256,262144), default=2048)
|
||||||
parser.add_argument("--ropeconfig", help="If set, uses customized RoPE scaling from configured frequency scale and frequency base (e.g. --ropeconfig 0.25 10000). Otherwise, uses NTK-Aware scaling set automatically based on context size. For linear rope, simply set the freq-scale and ignore the freq-base",metavar=('[rope-freq-scale]', '[rope-freq-base]'), default=[0.0, 10000.0], type=float, nargs='+')
|
parser.add_argument("--ropeconfig", help="If set, uses customized RoPE scaling from configured frequency scale and frequency base (e.g. --ropeconfig 0.25 10000). Otherwise, uses NTK-Aware scaling set automatically based on context size. For linear rope, simply set the freq-scale and ignore the freq-base",metavar=('[rope-freq-scale]', '[rope-freq-base]'), default=[0.0, 10000.0], type=float, nargs='+')
|
||||||
#more advanced params
|
#more advanced params
|
||||||
parser.add_argument("--blasbatchsize", help="Sets the batch size used in BLAS processing (default 512). Setting it to -1 disables BLAS mode, but keeps other benefits like GPU offload.", type=int,choices=[-1,32,64,128,256,512,1024,2048], default=512)
|
parser.add_argument("--blasbatchsize", help="Sets the batch size used in BLAS processing (default 512). Setting it to -1 disables BLAS mode, but keeps other benefits like GPU offload.", type=int,choices=[-1,32,64,128,256,512,1024,2048], default=512)
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue