diff --git a/src/llama-model-loader.cpp b/src/llama-model-loader.cpp index b69c26bbc..3907b86e7 100644 --- a/src/llama-model-loader.cpp +++ b/src/llama-model-loader.cpp @@ -934,22 +934,22 @@ bool llama_model_loader::load_all_data( auto * buft = ggml_backend_buffer_get_type(buf); auto * dev = ggml_backend_buft_get_device(buft); if (!dev) { - LLAMA_LOG_DEBUG("%s: no device found for buffer type %s for async uploads\n", func, - ggml_backend_buft_name(buft)); + // LLAMA_LOG_DEBUG("%s: no device found for buffer type %s for async uploads\n", func, + // ggml_backend_buft_name(buft)); return nullptr; } if (buft != ggml_backend_dev_buffer_type(dev)) { - LLAMA_LOG_DEBUG("%s: buffer type %s is not the default buffer type for device %s for async uploads\n", func, - ggml_backend_buft_name(buft), ggml_backend_dev_name(dev)); + // LLAMA_LOG_DEBUG("%s: buffer type %s is not the default buffer type for device %s for async uploads\n", func, + // ggml_backend_buft_name(buft), ggml_backend_dev_name(dev)); return nullptr; } ggml_backend_dev_props props; ggml_backend_dev_get_props(dev, &props); if (!props.caps.async || !props.caps.host_buffer || !props.caps.events) { - LLAMA_LOG_DEBUG("%s: device %s does not support async, host buffers or events\n", func, - ggml_backend_dev_name(dev)); + // LLAMA_LOG_DEBUG("%s: device %s does not support async, host buffers or events\n", func, + // ggml_backend_dev_name(dev)); return nullptr; }