Merge commit 'acd38efee3' into concedo_experimental

# Conflicts:
#	.devops/cpu.Dockerfile
#	.devops/vulkan.Dockerfile
#	.github/workflows/build.yml
#	.github/workflows/docker.yml
#	CMakeLists.txt
#	README.md
#	cmake/llama-config.cmake.in
#	examples/simple-cmake-pkg/.gitignore
#	ggml/CMakeLists.txt
#	ggml/src/CMakeLists.txt
#	ggml/src/ggml-hip/CMakeLists.txt
This commit is contained in:
Concedo 2025-01-28 18:16:44 +08:00
commit c5d4e07664
11 changed files with 395 additions and 61 deletions

View file

@ -9446,6 +9446,7 @@ static struct llama_model * llama_model_load_from_file_impl(
model->devices.push_back(*dev);
}
} else {
std::vector<ggml_backend_dev_t> rpc_servers;
// use all available devices
for (size_t i = 0; i < ggml_backend_dev_count(); ++i) {
ggml_backend_dev_t dev = ggml_backend_dev_get(i);
@ -9456,10 +9457,19 @@ static struct llama_model * llama_model_load_from_file_impl(
break;
case GGML_BACKEND_DEVICE_TYPE_GPU:
model->devices.push_back(dev);
ggml_backend_reg_t reg = ggml_backend_dev_backend_reg(dev);
if (ggml_backend_reg_name(reg) == std::string("RPC")) {
rpc_servers.push_back(dev);
} else {
model->devices.push_back(dev);
}
break;
}
}
// add RPC servers at the front of the list
if (!rpc_servers.empty()) {
model->devices.insert(model->devices.begin(), rpc_servers.begin(), rpc_servers.end());
}
}
// if using single GPU mode, remove all except the main GPU