reformat code

This commit is contained in:
Li, Zonghang 2025-06-03 23:53:24 +04:00
parent b6fdbd541b
commit 6439090920
5 changed files with 137 additions and 130 deletions

View file

@ -280,7 +280,7 @@ ifeq ($(USE_HIGHS),1)
HIGHS_LDFLAGS = -L/usr/local/lib -lhighs HIGHS_LDFLAGS = -L/usr/local/lib -lhighs
ifeq ($(UNAME_S),Darwin) ifeq ($(UNAME_S),Darwin)
HIGHS_CPPFLAGS += -isystem /opt/homebrew/include/highs HIGHS_CPPFLAGS += -isystem /opt/homebrew/include/highs
HIGHS_LDFLAGS += -L/opt/homebrew/lib -lhighs HIGHS_LDFLAGS += -L/opt/homebrew/lib
endif endif
MK_CPPFLAGS += $(HIGHS_CPPFLAGS) -DUSE_HIGHS MK_CPPFLAGS += $(HIGHS_CPPFLAGS) -DUSE_HIGHS
MK_LDFLAGS += $(HIGHS_LDFLAGS) MK_LDFLAGS += $(HIGHS_LDFLAGS)

View file

@ -848,7 +848,6 @@ static std::string vec_to_str(const std::vector<T> & vec) {
static bool assign_layers_to_device( static bool assign_layers_to_device(
uint32_t n_world, uint32_t n_world,
uint32_t my_rank,
const device_info * dev_info_set, const device_info * dev_info_set,
uint32_t * n_layer_window, uint32_t * n_layer_window,
uint32_t * n_gpu_layers, uint32_t * n_gpu_layers,
@ -857,15 +856,8 @@ static bool assign_layers_to_device(
float min_disk_read_speed = 0.1f) { // minimum disk I/O speed: 100 MB/s float min_disk_read_speed = 0.1f) { // minimum disk I/O speed: 100 MB/s
GGML_ASSERT(dev_info_set != nullptr); GGML_ASSERT(dev_info_set != nullptr);
GGML_ASSERT(n_layer_window != nullptr); GGML_ASSERT(n_layer_window != nullptr);
GGML_ASSERT(my_rank == 0);
// if only 1 device, it is assigned all layers
const uint32_t n_layer = llama_model_n_layers(model); const uint32_t n_layer = llama_model_n_layers(model);
if (n_world == 1) {
n_layer_window[0] = n_layer;
return true;
}
std::vector<int> w(n_world, 0); std::vector<int> w(n_world, 0);
std::vector<int> n(n_world, 0); std::vector<int> n(n_world, 0);
std::vector<float> mem_budget(n_world, 0.0f); std::vector<float> mem_budget(n_world, 0.0f);
@ -1102,7 +1094,6 @@ static bool assign_layers_to_device(
}; };
(void)print_matrix; (void)print_matrix;
double final_objective = 1.0e30;
std::vector<double> final_solution; std::vector<double> final_solution;
int final_k = -1; int final_k = -1;
@ -1442,7 +1433,6 @@ static bool assign_layers_to_device(
// update the global best solution // update the global best solution
final_k = best_k; final_k = best_k;
final_objective = best_objective;
final_solution = best_solution; final_solution = best_solution;
if (solution_unchanged) break; if (solution_unchanged) break;
@ -1461,8 +1451,7 @@ static bool assign_layers_to_device(
LOG_INF(" - N Layer Window : %d\n", w[m]); LOG_INF(" - N Layer Window : %d\n", w[m]);
LOG_INF(" - N GPU Layers : %d\n", n[m]); LOG_INF(" - N GPU Layers : %d\n", n[m]);
} }
// LOG_INF("\nEstimated Latency: %.3f ms\n", final_objective); LOG_INF("\n");
// LOG_INF("------------------------------------------");
// copy value from w and n to n_layer_window and n_gpu_layers, respectively // copy value from w and n to n_layer_window and n_gpu_layers, respectively
std::copy(w.begin(), w.end(), n_layer_window); std::copy(w.begin(), w.end(), n_layer_window);
@ -1522,58 +1511,67 @@ static bool assign_layers_to_device(
return true; return true;
} }
static bool tune_layer_allocation( static bool assign_layers_and_select_devices(
uint32_t n_world, uint32_t n_world,
uint32_t my_rank,
std::vector<device_info> dev_infos, std::vector<device_info> dev_infos,
uint32_t * n_layer_window, uint32_t * n_layer_window,
uint32_t * n_gpu_layers, uint32_t * n_gpu_layers,
struct llama_model * model, struct llama_model * model,
const struct llama_context_params cparams, const struct llama_context_params cparams) {
float min_disk_read_speed = 0.1f) {
memset(n_layer_window, 0, n_world * sizeof(uint32_t)); memset(n_layer_window, 0, n_world * sizeof(uint32_t));
memset(n_gpu_layers, 0, n_world * sizeof(uint32_t)); memset(n_gpu_layers, 0, n_world * sizeof(uint32_t));
std::vector<device_info> dev_infos_temp = dev_infos; std::vector<device_info> dev_infos_temp = dev_infos;
std::vector<uint32_t> n_layer_windows_temp; std::vector<uint32_t> n_layer_windows_temp, n_gpu_layers_temp;
std::vector<uint32_t> n_gpu_layers_temp;
while (n_world > 0) { while (n_world > 0) {
std::vector<device_info> dev_infos_ = dev_infos_temp; std::vector<device_info> dev_infos_ = dev_infos_temp;
std::vector<uint32_t> n_layer_windows_(n_world, 0); std::vector<uint32_t> n_layer_windows_(n_world, 0), n_gpu_layers_(n_world, 0);
std::vector<uint32_t> n_gpu_layers_(n_world, 0);
if (!assign_layers_to_device(n_world, my_rank, dev_infos_.data(), if (!assign_layers_to_device(n_world, dev_infos_.data(),
n_layer_windows_.data(), n_gpu_layers_.data(), model, cparams)) { n_layer_windows_.data(), n_gpu_layers_.data(), model, cparams)) {
return false; return false;
} }
dev_infos_temp.clear(); dev_infos_temp.clear();
n_layer_windows_temp.clear(); n_layer_windows_temp.clear();
n_gpu_layers_temp.clear(); n_gpu_layers_temp.clear();
for (uint32_t i = 0; i < n_world; i++) { for (uint32_t i = 0; i < n_world; i++) {
if (n_layer_windows_[i] > 1 || i == 0 ) { if (n_layer_windows_[i] > 1 || i == 0 ) {
dev_infos_temp.push_back(dev_infos_[i]); dev_infos_temp.push_back(dev_infos_[i]);
n_layer_windows_temp.push_back(n_layer_windows_[i]); n_layer_windows_temp.push_back(n_layer_windows_[i]);
n_gpu_layers_temp.push_back(n_gpu_layers_[i]); n_gpu_layers_temp.push_back(n_gpu_layers_[i]);
} else {
// remove this device
LOG_INF("Remove device %s (rank %d) with only %d layer assigned.\n",
dev_infos_[i].device_name, dev_infos_[i].rank, n_layer_windows_[i]);
} }
} }
if(dev_infos_temp.size() == n_world) { if(dev_infos_temp.size() == n_world) {
// no device be removed // no device be removed
break; break;
} }
n_world = dev_infos_temp.size(); n_world = dev_infos_temp.size();
LOG_INF("Reassign layers to the remaining %d device(s).\n\n", n_world);
} }
uint32_t i = 0 , j = 0; uint32_t i = 0 , j = 0;
while (j < n_world) { while (j < n_world) {
if (dev_infos[i].rank == dev_infos_temp[j].rank) { if (dev_infos[i].rank == dev_infos_temp[j].rank) {
n_layer_window[i] = n_layer_windows_temp[j]; n_layer_window[i] = n_layer_windows_temp[j];
n_gpu_layers[i] = n_gpu_layers_temp[j]; n_gpu_layers[i] = n_gpu_layers_temp[j];
j++; j++;
i++;
} else { } else {
n_layer_window[i] = 0; n_layer_window[i] = 0;
n_gpu_layers[i] = 0; n_gpu_layers[i] = 0;
}
i++; i++;
} }
}
return true; return true;
} }
@ -1698,16 +1696,14 @@ struct llama_init_result llama_init_from_gpt_params(gpt_params & params) {
llama_gather_device_info(lctx, dev_info_set.data()); llama_gather_device_info(lctx, dev_info_set.data());
device_print_props(dev_info_set.data(), n_world, model, cparams); device_print_props(dev_info_set.data(), n_world, model, cparams);
// automatically determine n_layer_window and n_gpu_layers // assign layers to devices and remove weak devices
if (!tune_layer_allocation(n_world, my_rank, dev_info_set, n_layer_window, n_gpu_layers, model, cparams)) { if (!assign_layers_and_select_devices(n_world, dev_info_set, n_layer_window, n_gpu_layers, model, cparams)) {
LOG_ERR("%s: Invalid allocation by HiGHS solver\n", __func__); LOG_ERR("%s: Invalid allocation by HiGHS solver\n", __func__);
llama_free(lctx); llama_free(lctx);
llama_free_model(model); llama_free_model(model);
return iparams; return iparams;
} }
llama_bcast_layer_setup(lctx, n_layer_window, n_gpu_layers); llama_bcast_layer_setup(lctx, n_layer_window, n_gpu_layers);
//rebuild topo
llama_rebuild_topo(lctx, n_layer_window, dev_info_set.data()); llama_rebuild_topo(lctx, n_layer_window, dev_info_set.data());
} else { } else {
// use the user-defined n_layer_window // use the user-defined n_layer_window
@ -1718,24 +1714,24 @@ struct llama_init_result llama_init_from_gpt_params(gpt_params & params) {
if (auto_schedule){ if (auto_schedule){
llama_send_device_info(lctx, &dev_info); llama_send_device_info(lctx, &dev_info);
llama_recv_layer_setup(lctx, n_layer_window, n_gpu_layers); llama_recv_layer_setup(lctx, n_layer_window, n_gpu_layers);
// rebuild topo
llama_rebuild_topo (lctx, n_layer_window, nullptr); llama_rebuild_topo (lctx, n_layer_window, nullptr);
} else { } else {
llama_recv_layer_setup(lctx, n_layer_window, n_gpu_layers); llama_recv_layer_setup(lctx, n_layer_window, n_gpu_layers);
} }
} }
// if this is a weak device, then exit
if (n_layer_window[my_rank] <= 0) { if (n_layer_window[my_rank] <= 0) {
LOG_INF("%s: info: rank %d has no layers to run, skipping\n", __func__, my_rank); LOG_INF("No layer is assigned to me, exit.\n");
llama_free(lctx); llama_free(lctx);
llama_free_model(model); llama_free_model(model);
exit(0); exit(0);
} }
//update rank and n_world for consistency // update my rank and n_world
uint32_t update_rank = 0; uint32_t update_rank = 0, update_n_world = 1;
uint32_t update_n_world = 1; std::vector<uint32_t> n_layer_window_temp = {n_layer_window[0]}, n_gpu_layers_temp = {n_gpu_layers[0]};
std::vector<uint32_t> n_layer_window_temp = {n_layer_window[0]};
std::vector<uint32_t> n_gpu_layers_temp = {n_gpu_layers[0]};
for (uint32_t i = 1; i < n_world; i++) { for (uint32_t i = 1; i < n_world; i++) {
if (n_layer_window[i] <= 0) { if (n_layer_window[i] <= 0) {
continue; continue;
@ -1747,22 +1743,29 @@ struct llama_init_result llama_init_from_gpt_params(gpt_params & params) {
n_layer_window_temp.push_back(n_layer_window[i]); n_layer_window_temp.push_back(n_layer_window[i]);
n_gpu_layers_temp.push_back(n_gpu_layers[i]); n_gpu_layers_temp.push_back(n_gpu_layers[i]);
} }
memset(n_layer_window, 0, n_world * sizeof(uint32_t)); memset(n_layer_window, 0, n_world * sizeof(uint32_t));
memset(n_gpu_layers, 0, n_world * sizeof(uint32_t)); memset(n_gpu_layers, 0, n_world * sizeof(uint32_t));
for (uint32_t i = 0; i < update_n_world; i++) { for (uint32_t i = 0; i < update_n_world; i++) {
n_layer_window[i] = n_layer_window_temp[i]; n_layer_window[i] = n_layer_window_temp[i];
n_gpu_layers[i] = n_gpu_layers_temp[i]; n_gpu_layers[i] = n_gpu_layers_temp[i];
} }
llama_update_context_with_rankworld(lctx, update_rank, update_n_world);
// update my rank
cparams.rank = update_rank; cparams.rank = update_rank;
cparams.n_world = update_n_world;
mparams.rank = update_rank; mparams.rank = update_rank;
mparams.n_world = update_n_world;
params.rank = update_rank; params.rank = update_rank;
params.n_world = update_n_world;
my_rank = update_rank; my_rank = update_rank;
// update n_world
cparams.n_world = update_n_world;
mparams.n_world = update_n_world;
params.n_world = update_n_world;
n_world = update_n_world; n_world = update_n_world;
llama_update_context_with_rankworld(lctx, update_rank, update_n_world);
// update n_layer_window and n_gpu_layers // update n_layer_window and n_gpu_layers
std::copy(std::begin(n_layer_window), std::end(n_layer_window), params.n_layer_window); std::copy(std::begin(n_layer_window), std::end(n_layer_window), params.n_layer_window);
std::copy(std::begin(n_layer_window), std::end(n_layer_window), cparams.n_layer_window); std::copy(std::begin(n_layer_window), std::end(n_layer_window), cparams.n_layer_window);

View file

@ -200,7 +200,8 @@ int main(int argc, char ** argv) {
// load the model and apply lora adapter, if any // load the model and apply lora adapter, if any
LOG_INF("%s: load the model and apply lora adapter, if any\n", __func__); LOG_INF("%s: load the model and apply lora adapter, if any\n", __func__);
llama_init_result llama_init = llama_init_from_gpt_params(params); llama_init_result llama_init = llama_init_from_gpt_params(params);
// update
// update my rank and world size if any devices removed
my_rank = params.rank; my_rank = params.rank;
n_world = params.n_world; n_world = params.n_world;

View file

@ -463,7 +463,8 @@ extern "C" {
struct llama_model * model, struct llama_model * model,
struct llama_model_params params); struct llama_model_params params);
LLAMA_API void llama_update_context_with_rankworld(struct llama_context * ctx, LLAMA_API void llama_update_context_with_rankworld(
struct llama_context * ctx,
uint32_t rank, uint32_t rank,
uint32_t n_world); uint32_t n_world);

View file

@ -7470,6 +7470,8 @@ static void llm_load_qwen2_tensors(
const uint32_t * n_layer_window, const uint32_t * n_layer_window,
bool * use_mmap_buffer, bool * use_mmap_buffer,
bool set_needed) { bool set_needed) {
(void)use_mmap_buffer; // unused in this function
const auto tn = LLM_TN(model.arch); const auto tn = LLM_TN(model.arch);
ggml_context * ctx_input = nullptr; ggml_context * ctx_input = nullptr;
@ -7488,7 +7490,6 @@ static void llm_load_qwen2_tensors(
const llama_hparams hparams = model.hparams; const llama_hparams hparams = model.hparams;
const int64_t n_embd = hparams.n_embd; const int64_t n_embd = hparams.n_embd;
const int64_t n_embd_gqa = hparams.n_embd_v_gqa(); const int64_t n_embd_gqa = hparams.n_embd_v_gqa();
// const int64_t n_embd_gqa = n_embd_v_gqa;
const int64_t n_ff = hparams.n_ff(); const int64_t n_ff = hparams.n_ff();
const int64_t n_vocab = hparams.n_vocab; const int64_t n_vocab = hparams.n_vocab;
const int64_t n_layer = hparams.n_layer; const int64_t n_layer = hparams.n_layer;
@ -20525,14 +20526,12 @@ int llama_bcast_layer_setup(struct llama_context * ctx, uint32_t * n_layer_windo
return 0; return 0;
} }
LLAMA_API int llama_rebuild_topo(llama_context *ctx, int llama_rebuild_topo(llama_context * ctx, uint32_t * n_layer_window, device_info * dev_info_set) {
uint32_t *n_layer_window,
device_info *dev_info_set) {
uint32_t n_world = ctx->cparams.n_world; uint32_t n_world = ctx->cparams.n_world;
uint32_t my_rank = ctx->cparams.rank; uint32_t my_rank = ctx->cparams.rank;
device_info * dev_info_ptr = nullptr; device_info * dev_info_ptr = nullptr;
if (dev_info_set == nullptr) { if (dev_info_set == nullptr) {
// for rank!=0, recv all devices info
std::vector<zmq::message_t> msgs; std::vector<zmq::message_t> msgs;
if (!zmq::recv_multipart(*ctx->recv_socket, std::back_inserter(msgs))) { if (!zmq::recv_multipart(*ctx->recv_socket, std::back_inserter(msgs))) {
return -1; return -1;
@ -20564,12 +20563,12 @@ LLAMA_API int llama_rebuild_topo(llama_context *ctx,
} }
} }
// check myself's layer
zmq::socket_t * socket_to_close = nullptr; zmq::socket_t * socket_to_close = nullptr;
if (n_layer_window[my_rank] > 0) { if (n_layer_window[my_rank] > 0) {
// reconstruct socket to the next valid rank // reconstruct socket to the next valid rank
std::string next_ip; std::string next_ip;
auto current_rank = my_rank; auto current_rank = my_rank;
while (next_rank != my_rank) { while (next_rank != my_rank) {
if (n_layer_window[next_rank] > 0) { if (n_layer_window[next_rank] > 0) {
next_ip = dev_info_ptr[current_rank].next_ip; next_ip = dev_info_ptr[current_rank].next_ip;
@ -20578,6 +20577,7 @@ LLAMA_API int llama_rebuild_topo(llama_context *ctx,
next_rank = (next_rank + 1) % n_world; next_rank = (next_rank + 1) % n_world;
current_rank = (current_rank + 1) % n_world; current_rank = (current_rank + 1) % n_world;
} }
if (!next_ip.empty()) { if (!next_ip.empty()) {
if ((my_rank + 1) % n_world != next_rank) { if ((my_rank + 1) % n_world != next_rank) {
socket_to_close = ctx->send_socket; socket_to_close = ctx->send_socket;
@ -20587,6 +20587,7 @@ LLAMA_API int llama_rebuild_topo(llama_context *ctx,
ctx->next_node_ip = next_ip; ctx->next_node_ip = next_ip;
ctx->cparams.original_next_rank = next_rank; ctx->cparams.original_next_rank = next_rank;
} }
if (next_rank != 0) { if (next_rank != 0) {
try { try {
auto msgs = dev_infos_to_messages(dev_info_ptr, n_world); auto msgs = dev_infos_to_messages(dev_info_ptr, n_world);
@ -20604,13 +20605,16 @@ LLAMA_API int llama_rebuild_topo(llama_context *ctx,
ctx->next_node_ip = ""; ctx->next_node_ip = "";
} }
} }
if (!dev_info_set) { if (!dev_info_set) {
delete[] dev_info_ptr; delete[] dev_info_ptr;
} }
if(socket_to_close != nullptr){ if(socket_to_close != nullptr){
socket_to_close->close(); socket_to_close->close();
delete socket_to_close; delete socket_to_close;
} }
return 0; return 0;
} }
@ -20675,9 +20679,7 @@ void llama_free_sockets(struct llama_context * ctx, char ** msg) {
} }
} }
void llama_update_context_with_rankworld(struct llama_context * ctx, void llama_update_context_with_rankworld(struct llama_context * ctx, uint32_t rank, uint32_t n_world) {
uint32_t rank,
uint32_t n_world) {
if (ctx) { if (ctx) {
ctx->cparams.rank = rank; ctx->cparams.rank = rank;
ctx->cparams.n_world = n_world; ctx->cparams.n_world = n_world;