From 3980e04d5a374da025e1942eb1043ac2f33e6a9a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Adrien=20Gallou=C3=ABt?= Date: Thu, 7 May 2026 07:24:47 +0200 Subject: [PATCH] llama : add missing call to ggml_backend_load_all() (#22752) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Adrien Gallouët --- src/llama.cpp | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/src/llama.cpp b/src/llama.cpp index b5dd8433c..dfe30ce8f 100644 --- a/src/llama.cpp +++ b/src/llama.cpp @@ -71,12 +71,18 @@ bool llama_supports_mlock(void) { } bool llama_supports_gpu_offload(void) { + if (!ggml_backend_reg_count()) { + ggml_backend_load_all(); + } return ggml_backend_dev_by_type(GGML_BACKEND_DEVICE_TYPE_GPU) != nullptr || ggml_backend_dev_by_type(GGML_BACKEND_DEVICE_TYPE_IGPU) != nullptr || llama_supports_rpc(); } bool llama_supports_rpc(void) { + if (!ggml_backend_reg_count()) { + ggml_backend_load_all(); + } return ggml_backend_reg_by_name("RPC") != nullptr; }