mirror of
https://github.com/LostRuins/koboldcpp.git
synced 2026-05-17 04:09:19 +00:00
llama : add missing call to ggml_backend_load_all() (#22752)
Signed-off-by: Adrien Gallouët <angt@huggingface.co>
This commit is contained in:
parent
2496f9c149
commit
3980e04d5a
1 changed files with 6 additions and 0 deletions
|
|
@ -71,12 +71,18 @@ bool llama_supports_mlock(void) {
|
|||
}
|
||||
|
||||
bool llama_supports_gpu_offload(void) {
|
||||
if (!ggml_backend_reg_count()) {
|
||||
ggml_backend_load_all();
|
||||
}
|
||||
return ggml_backend_dev_by_type(GGML_BACKEND_DEVICE_TYPE_GPU) != nullptr ||
|
||||
ggml_backend_dev_by_type(GGML_BACKEND_DEVICE_TYPE_IGPU) != nullptr ||
|
||||
llama_supports_rpc();
|
||||
}
|
||||
|
||||
bool llama_supports_rpc(void) {
|
||||
if (!ggml_backend_reg_count()) {
|
||||
ggml_backend_load_all();
|
||||
}
|
||||
return ggml_backend_reg_by_name("RPC") != nullptr;
|
||||
}
|
||||
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue