glm4 unclamp for all except vulkan

This commit is contained in:
Concedo 2025-04-30 17:19:38 +08:00
parent 9fdec02914
commit 5d382970ec
2 changed files with 8 additions and 6 deletions

View file

@ -1907,16 +1907,18 @@ ModelLoadResult gpttype_load_model(const load_model_inputs inputs, FileFormat in
printf("Warning: Only GGUF models can use max context above 16k. Max context lowered to 16k.\n");
clamped_max_context_length = 16384;
}
if (isGguf && file_format_meta.model_architecture == GGUFArch::ARCH_GLM4 && kcpp_data->n_batch > 16) {
#if defined(GGML_USE_VULKAN)
if (isGguf && file_format_meta.model_architecture == GGUFArch::ARCH_GLM4 && kcpp_data->n_ubatch > 16) {
if(debugmode==1)
{
printf("GLM-4 is broken on larger batch sizes. Clamp ignored in debug.\n");
printf("GLM-4 is broken on larger batch sizes in Vulkan. Clamp ignored in debug.\n");
} else {
printf("GLM-4 is broken on larger batch sizes. Clamping batch size to 16.\n");
kcpp_data->n_batch = kcpp_data->n_ubatch = 16;
printf("GLM-4 is broken on larger batch sizes in Vulkan. Clamping ubatch size to 16.\n");
kcpp_data->n_ubatch = 16;
}
}
#endif
kcpp_data->n_ctx = clamped_max_context_length;
max_context_limit_at_load = clamped_max_context_length;

View file

@ -3440,7 +3440,7 @@ Current version indicated by LITEVER below.
"name":"GLM-4",
"user":"<|user|>\\n",
"user_end":"",
"assistant":"<|assistant|>",
"assistant":"<|assistant|>\\n",
"assistant_end":"",
"system":"<|system|>\\n",
"system_end":"",