Merge branch 'upstream' into concedo_experimental

# Conflicts:
#	examples/server/README.md
#	src/llama-model.cpp
This commit is contained in:
Concedo 2025-02-08 22:57:18 +08:00
commit 3fa4843850
7 changed files with 61 additions and 43 deletions

View file

@ -1280,8 +1280,7 @@ bool llama_model::load_tensors(llama_model_loader & ml) {
bool use_mmap_buffer = true;
LLAMA_LOG_INFO("%s: loading model tensors, this can take a while...", __func__);
// LLAMA_LOG_INFO("%s: loading model tensors, this can take a while... (mmap = %s)\n", __func__, use_mmap_buffer ? "true" : "false");
LLAMA_LOG_INFO("%s: loading model tensors, this can take a while... (mmap = %s)\n", __func__, ml.use_mmap ? "true" : "false");
// build a list of buffer types for the CPU and GPU devices
pimpl->cpu_buft_list = make_cpu_buft_list(devices);