mirror of
https://github.com/LostRuins/koboldcpp.git
synced 2025-09-10 00:54:41 +00:00
temporarily make qwenv2l use clip on cpu for vulkan and macos
This commit is contained in:
parent
fc52a38a25
commit
b7d3274523
3 changed files with 17 additions and 0 deletions
|
@ -2316,6 +2316,13 @@ ModelLoadResult gpttype_load_model(const load_model_inputs inputs, FileFormat in
|
|||
if(mmproj_filename != "" && file_format==FileFormat::GGUF_GENERIC)
|
||||
{
|
||||
printf("\nAttempting to apply Multimodal Projector: %s\n", mmproj_filename.c_str());
|
||||
#if defined(GGML_USE_VULKAN) || defined(GGML_USE_METAL)
|
||||
if(file_format_meta.model_architecture == GGUFArch::ARCH_QWEN2VL)
|
||||
{
|
||||
set_clip_uses_gpu(false);
|
||||
printf("Clip will use CPU for this model!\n");
|
||||
}
|
||||
#endif
|
||||
clp_ctx = clip_model_load(mmproj_filename.c_str(), /*verbosity=*/ 1);
|
||||
if(clp_ctx == nullptr) {
|
||||
fprintf(stderr, "%s: error: failed to load mmproj model!\n", __func__);
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue