mirror of
https://github.com/LostRuins/koboldcpp.git
synced 2025-09-10 09:04:36 +00:00
unclamp glm4 in debug
This commit is contained in:
parent
621cc8f33f
commit
9fdec02914
1 changed files with 8 additions and 2 deletions
|
@ -1908,8 +1908,14 @@ ModelLoadResult gpttype_load_model(const load_model_inputs inputs, FileFormat in
|
||||||
clamped_max_context_length = 16384;
|
clamped_max_context_length = 16384;
|
||||||
}
|
}
|
||||||
if (isGguf && file_format_meta.model_architecture == GGUFArch::ARCH_GLM4 && kcpp_data->n_batch > 16) {
|
if (isGguf && file_format_meta.model_architecture == GGUFArch::ARCH_GLM4 && kcpp_data->n_batch > 16) {
|
||||||
printf("GLM-4 is broken on larger batch sizes. Clamping batch size to 16.\n");
|
if(debugmode==1)
|
||||||
kcpp_data->n_batch = kcpp_data->n_ubatch = 16;
|
{
|
||||||
|
printf("GLM-4 is broken on larger batch sizes. Clamp ignored in debug.\n");
|
||||||
|
} else {
|
||||||
|
printf("GLM-4 is broken on larger batch sizes. Clamping batch size to 16.\n");
|
||||||
|
kcpp_data->n_batch = kcpp_data->n_ubatch = 16;
|
||||||
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
kcpp_data->n_ctx = clamped_max_context_length;
|
kcpp_data->n_ctx = clamped_max_context_length;
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue