mirror of
https://github.com/Lizonghang/prima.cpp.git
synced 2025-09-05 22:09:03 +00:00
Add tune_layer_allocation
Signed-off-by: DeEMO <yzzxrx@gmail.com>
This commit is contained in:
parent
b44187e3af
commit
26bb86c09b
1 changed files with 59 additions and 5 deletions
|
@ -1522,6 +1522,61 @@ static bool assign_layers_to_device(
|
|||
return true;
|
||||
}
|
||||
|
||||
static bool tune_layer_allocation(
|
||||
uint32_t n_world,
|
||||
uint32_t my_rank,
|
||||
std::vector<device_info> dev_infos,
|
||||
uint32_t * n_layer_window,
|
||||
uint32_t * n_gpu_layers,
|
||||
struct llama_model * model,
|
||||
const struct llama_context_params cparams,
|
||||
float min_disk_read_speed = 0.1f) {
|
||||
memset(n_layer_window, 0, n_world * sizeof(uint32_t));
|
||||
memset(n_gpu_layers, 0, n_world * sizeof(uint32_t));
|
||||
std::vector<device_info> dev_infos_temp = dev_infos;
|
||||
std::vector<uint32_t> n_layer_windows_temp;
|
||||
std::vector<uint32_t> n_gpu_layers_temp;
|
||||
while(n_world > 0) {
|
||||
std::vector<device_info> dev_infos_ = dev_infos_temp;
|
||||
std::vector<uint32_t> n_layer_windows_(n_world, 0);
|
||||
std::vector<uint32_t> n_gpu_layers_(n_world, 0);
|
||||
if (!assign_layers_to_device(n_world, my_rank, dev_infos_.data(),
|
||||
n_layer_windows_.data(), n_gpu_layers_.data(), model, cparams)) {
|
||||
return false;
|
||||
}
|
||||
dev_infos_temp.clear();
|
||||
n_layer_windows_temp.clear();
|
||||
n_gpu_layers_temp.clear();
|
||||
for(auto i=0; i<n_world; i++) {
|
||||
if (n_layer_windows_[i] > 1 || i==0 ) {
|
||||
dev_infos_temp.push_back(dev_infos_[i]);
|
||||
n_layer_windows_temp.push_back(n_layer_windows_[i]);
|
||||
n_gpu_layers_temp.push_back(n_gpu_layers_[i]);
|
||||
}
|
||||
}
|
||||
if(dev_infos_temp.size() == n_world) {
|
||||
// no device be removed
|
||||
break;
|
||||
}
|
||||
|
||||
n_world = dev_infos_temp.size();
|
||||
}
|
||||
int i =0 , j =0;
|
||||
while(j < n_world) {
|
||||
if(dev_infos[i].rank == dev_infos_temp[j].rank){
|
||||
n_layer_window[i] = n_layer_windows_temp[j];
|
||||
n_gpu_layers[i] = n_gpu_layers_temp[j];
|
||||
j++;
|
||||
i++;
|
||||
} else {
|
||||
n_layer_window[i] = 0;
|
||||
n_gpu_layers[i] = 0;
|
||||
i++;
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
//
|
||||
// Model utils
|
||||
//
|
||||
|
@ -1613,15 +1668,14 @@ struct llama_init_result llama_init_from_gpt_params(gpt_params & params) {
|
|||
// sychronize device profile to the master node
|
||||
if (my_rank == 0) {
|
||||
if (auto_schedule) {
|
||||
struct device_info * dev_info_set = nullptr;
|
||||
dev_info_set = (struct device_info *)malloc(n_world * sizeof(struct device_info));
|
||||
std::vector<device_info> dev_info_set(n_world);
|
||||
dev_info_set[0] = dev_info;
|
||||
|
||||
llama_gather_device_info(lctx, dev_info_set);
|
||||
device_print_props(dev_info_set, n_world, model, cparams);
|
||||
llama_gather_device_info(lctx, dev_info_set.data());
|
||||
device_print_props(dev_info_set.data(), n_world, model, cparams);
|
||||
|
||||
// automatically determine n_layer_window and n_gpu_layers
|
||||
if (!assign_layers_to_device(n_world, my_rank, dev_info_set, n_layer_window, n_gpu_layers, model, cparams)) {
|
||||
if (!tune_layer_allocation(n_world, my_rank, dev_info_set, n_layer_window, n_gpu_layers, model, cparams)) {
|
||||
LOG_ERR("%s: Invalid allocation by HiGHS solver\n", __func__);
|
||||
llama_free(lctx);
|
||||
llama_free_model(model);
|
||||
|
|
Loading…
Add table
Reference in a new issue