mirror of
https://github.com/Lizonghang/prima.cpp.git
synced 2025-09-10 06:15:26 +00:00
add mmap prefetch and unloading
This commit is contained in:
parent
ba5117581e
commit
c97ea10617
7 changed files with 161 additions and 11 deletions
|
@ -696,6 +696,13 @@ gpt_params_context gpt_params_parser_init(gpt_params & params, llama_example ex,
|
|||
params.next_node_ip = value;
|
||||
}
|
||||
).set_env("LLAMA_ARG_NEXT_NODE_IP"));
|
||||
add_opt(llama_arg(
|
||||
{"--unload", "--unload-weight"},
|
||||
format("whether to unload layer weights after use (default: %s)", params.unload ? "true" : "false"),
|
||||
[](gpt_params & params) {
|
||||
params.unload = true;
|
||||
}
|
||||
).set_env("LLAMA_ARG_UNLOAD"));
|
||||
add_opt(llama_arg(
|
||||
{"-n", "--predict", "--n-predict"}, "N",
|
||||
format("number of tokens to predict (default: %d, -1 = infinity, -2 = until context filled)", params.n_predict),
|
||||
|
|
|
@ -1039,6 +1039,7 @@ struct llama_context_params llama_context_params_from_gpt_params(const gpt_param
|
|||
cparams.n_world = params.n_world;
|
||||
cparams.rank = params.rank;
|
||||
cparams.n_layer_window = params.n_layer_window;
|
||||
cparams.unload = params.unload;
|
||||
|
||||
if (cparams.master_ip != nullptr) {
|
||||
delete[] cparams.master_ip;
|
||||
|
|
|
@ -147,6 +147,7 @@ struct gpt_params {
|
|||
int32_t n_layer_window = 32; // number of layers to process in each compute
|
||||
std::string master_ip = "localhost"; // ip address of the master node
|
||||
std::string next_node_ip = "localhost"; // ip address of my next node
|
||||
bool unload = false; // unload layer weights after use or not
|
||||
int32_t n_predict = -1; // new tokens to predict
|
||||
int32_t n_ctx = 0; // context size
|
||||
int32_t n_batch = 2048; // logical batch size for prompt processing (must be >=32 to use BLAS)
|
||||
|
|
|
@ -2086,8 +2086,10 @@ extern "C" {
|
|||
|
||||
GGML_API int ggml_graph_size (struct ggml_cgraph * cgraph);
|
||||
GGML_API struct ggml_tensor * ggml_graph_node (struct ggml_cgraph * cgraph, int i); // if i < 0, returns nodes[n_nodes + i]
|
||||
GGML_API struct ggml_tensor * ggml_graph_leaf (struct ggml_cgraph * cgraph, int i); // if i < 0, returns leafs[n_leafs + i]
|
||||
GGML_API struct ggml_tensor ** ggml_graph_nodes (struct ggml_cgraph * cgraph);
|
||||
GGML_API int ggml_graph_n_nodes(struct ggml_cgraph * cgraph);
|
||||
GGML_API int ggml_graph_n_leafs(struct ggml_cgraph * cgraph);
|
||||
|
||||
GGML_API void ggml_graph_add_node(struct ggml_cgraph * cgraph, struct ggml_tensor * tensor);
|
||||
|
||||
|
|
|
@ -19104,6 +19104,16 @@ struct ggml_tensor * ggml_graph_node(struct ggml_cgraph * cgraph, int i) {
|
|||
return cgraph->nodes[i];
|
||||
}
|
||||
|
||||
struct ggml_tensor * ggml_graph_leaf(struct ggml_cgraph * cgraph, int i) {
|
||||
if (i < 0) {
|
||||
GGML_ASSERT(cgraph->n_leafs + i >= 0);
|
||||
return cgraph->leafs[cgraph->n_leafs + i];
|
||||
}
|
||||
|
||||
GGML_ASSERT(i < cgraph->n_leafs);
|
||||
return cgraph->leafs[i];
|
||||
}
|
||||
|
||||
struct ggml_tensor ** ggml_graph_nodes(struct ggml_cgraph * cgraph) {
|
||||
return cgraph->nodes;
|
||||
}
|
||||
|
@ -19112,6 +19122,10 @@ int ggml_graph_n_nodes(struct ggml_cgraph * cgraph) {
|
|||
return cgraph->n_nodes;
|
||||
}
|
||||
|
||||
int ggml_graph_n_leafs(struct ggml_cgraph * cgraph) {
|
||||
return cgraph->n_leafs;
|
||||
}
|
||||
|
||||
void ggml_graph_add_node(struct ggml_cgraph * cgraph, struct ggml_tensor * tensor) {
|
||||
GGML_ASSERT(cgraph->size > cgraph->n_nodes);
|
||||
cgraph->nodes[cgraph->n_nodes] = tensor;
|
||||
|
|
|
@ -318,6 +318,7 @@ extern "C" {
|
|||
uint32_t n_world; // world size
|
||||
uint32_t rank; // my rank
|
||||
uint32_t n_layer_window; // number of layers to process in each compute
|
||||
bool unload; // whether to unload layer weights after use
|
||||
char * master_ip; // ip address of the master node
|
||||
char * next_node_ip; // ip address of the next node
|
||||
uint32_t n_ctx; // text context, 0 = from model
|
||||
|
|
132
src/llama.cpp
132
src/llama.cpp
|
@ -1968,6 +1968,10 @@ struct llama_mmap {
|
|||
if (*last <= *first) {
|
||||
*last = *first;
|
||||
}
|
||||
|
||||
GGML_ASSERT(*first % page_size == 0);
|
||||
GGML_ASSERT(*last % page_size == 0);
|
||||
GGML_ASSERT(*last >= *first);
|
||||
}
|
||||
|
||||
// partially unmap the file in the range [first, last)
|
||||
|
@ -2562,6 +2566,7 @@ struct llama_cparams {
|
|||
uint32_t n_world;
|
||||
uint32_t rank;
|
||||
uint32_t n_layer_window;
|
||||
bool unload;
|
||||
uint32_t n_ctx; // context size used during inference
|
||||
uint32_t n_batch;
|
||||
uint32_t n_ubatch;
|
||||
|
@ -8935,7 +8940,7 @@ static bool llm_load_tensors(
|
|||
}
|
||||
}
|
||||
|
||||
ml.init_mappings(true, use_mlock ? &model.mlock_mmaps : nullptr);
|
||||
ml.init_mappings(false, use_mlock ? &model.mlock_mmaps : nullptr); // disable prefetch here
|
||||
model.mappings.reserve(ml.mappings.size());
|
||||
|
||||
// create the backend buffers
|
||||
|
@ -17286,14 +17291,14 @@ static void llama_send_tensors(zmq::socket_t & socket, struct input_tensors * te
|
|||
}
|
||||
|
||||
static int llama_recv_meta(zmq::socket_t & socket, struct sync_meta * meta) {
|
||||
socket.setsockopt(ZMQ_RCVTIMEO, 1000);
|
||||
socket.set(zmq::sockopt::rcvtimeo, 1000);
|
||||
|
||||
std::vector<zmq::message_t> recv_msgs;
|
||||
if (!zmq::recv_multipart(socket, std::back_inserter(recv_msgs))) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
socket.setsockopt(ZMQ_RCVTIMEO, -1);
|
||||
socket.set(zmq::sockopt::rcvtimeo, -1);
|
||||
|
||||
for (size_t i = 0; i < recv_msgs.size(); i += 2) {
|
||||
std::string key = recv_msgs[i].to_string();
|
||||
|
@ -17335,6 +17340,106 @@ static void llama_recv_tensors(zmq::socket_t & socket, input_tensors * tensors)
|
|||
}
|
||||
}
|
||||
|
||||
static bool is_tensor_loaded(struct ggml_tensor * tensor) {
|
||||
void * addr = (void *)tensor->data;
|
||||
size_t size = ggml_nbytes(tensor);
|
||||
|
||||
#ifdef _WIN32
|
||||
MEMORY_BASIC_INFORMATION mbi;
|
||||
size_t bytes_checked = 0;
|
||||
while (bytes_checked < size) {
|
||||
if (VirtualQuery((char *)addr + bytes_checked, &mbi, sizeof(mbi)) == 0) {
|
||||
LLAMA_LOG_ERROR("VirtualQuery failed\n");
|
||||
return false;
|
||||
}
|
||||
if (mbi.State != MEM_COMMIT) {
|
||||
return false; // memory not loaded
|
||||
}
|
||||
bytes_checked += mbi.RegionSize;
|
||||
}
|
||||
return true;
|
||||
#else
|
||||
size_t first = (size_t)addr;
|
||||
size_t last = first + size;
|
||||
long page_size = sysconf(_SC_PAGESIZE);
|
||||
|
||||
// align addr
|
||||
llama_mmap::align_range(&first, &last, page_size);
|
||||
size_t len = std::max(last - first, static_cast<size_t>(page_size));
|
||||
|
||||
// calculate the number of pages to check
|
||||
size_t page_count = (len + page_size - 1) / page_size;
|
||||
|
||||
#ifdef __APPLE__
|
||||
char * mincore_res = new char[page_count];
|
||||
#else
|
||||
unsigned char *mincore_res = new unsigned char[page_count]; // use 'unsigned char' for Linux
|
||||
#endif
|
||||
|
||||
// call mincore to check if pages are resident in memory
|
||||
if (mincore((void *)first, len, mincore_res) == 0) {
|
||||
for (size_t i = 0; i < page_count; ++i) {
|
||||
if (!(mincore_res[i] & 1)) {
|
||||
delete[] mincore_res;
|
||||
return false; // page not loaded
|
||||
}
|
||||
}
|
||||
delete[] mincore_res;
|
||||
return true; // page loaded
|
||||
} else {
|
||||
LLAMA_LOG_ERROR("mincore failed\n");
|
||||
delete[] mincore_res;
|
||||
return false;
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
static float is_graph_loaded(struct ggml_cgraph * cgraph) {
|
||||
uint32_t n_loaded = 0;
|
||||
uint32_t n_total = 0;
|
||||
for (int i = 0; i < ggml_graph_n_leafs(cgraph); i++) {
|
||||
struct ggml_tensor * cur = ggml_graph_leaf(cgraph, i);
|
||||
if (strstr(cur->name, "weight") == nullptr || cur->data == nullptr) {
|
||||
continue;
|
||||
}
|
||||
if (is_tensor_loaded(cur)) n_loaded++;
|
||||
n_total++;
|
||||
}
|
||||
return float(n_loaded) / float(n_total) * 100.0f;
|
||||
}
|
||||
|
||||
static void manage_graph_tensors(struct ggml_cgraph * cgraph, int advice, bool force = false) {
|
||||
for (int i = 0; i < ggml_graph_n_leafs(cgraph); i++) {
|
||||
struct ggml_tensor * cur = ggml_graph_leaf(cgraph, i);
|
||||
|
||||
if (strstr(cur->name, "weight") == nullptr || cur->data == nullptr) {
|
||||
continue;
|
||||
}
|
||||
|
||||
void * addr = (void *)cur->data;
|
||||
size_t size = ggml_nbytes(cur);
|
||||
size_t first = (size_t)addr;
|
||||
size_t last = first + size;
|
||||
long page_size = sysconf(_SC_PAGESIZE);
|
||||
|
||||
// align addr
|
||||
llama_mmap::align_range(&first, &last, page_size);
|
||||
size_t len = std::max(last - first, static_cast<size_t>(page_size));
|
||||
|
||||
// hint to load memory
|
||||
posix_madvise((void *)first, len, advice);
|
||||
|
||||
// if advice is POSIX_MADV_WILLNEED, force to prefetch data
|
||||
if (force && advice == POSIX_MADV_WILLNEED) {
|
||||
volatile char * ptr = (volatile char *)first;
|
||||
for (size_t off = 0; off < len; off += page_size) {
|
||||
volatile char data = ptr[off];
|
||||
(void)data;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// decode a batch of tokens by evaluating the transformer
|
||||
//
|
||||
// - lctx: llama context
|
||||
|
@ -17533,6 +17638,8 @@ static int llama_decode_internal(
|
|||
ggml_backend_sched_alloc_graph(lctx.sched[i], gf[i]);
|
||||
}
|
||||
|
||||
ggml_cgraph * sub_gf = nullptr;
|
||||
ggml_cgraph * next_gf = nullptr;
|
||||
const uint32_t n_layer = hparams.n_layer;
|
||||
const char * layer_str = nullptr;
|
||||
int cur_l = -1;
|
||||
|
@ -17541,7 +17648,8 @@ static int llama_decode_internal(
|
|||
GGML_ASSERT(my_rank == 0 || n_world > 1);
|
||||
|
||||
for (size_t i = 0; i < (size_t)gf.size(); ++i) {
|
||||
ggml_cgraph * sub_gf = gf[i];
|
||||
sub_gf = gf[i];
|
||||
next_gf = gf[(i + 1) % gf.size()];
|
||||
|
||||
if (n_world > 1 && !(my_rank == 0 && i == 0) && !(my_rank == 0 && is_last_l)) {
|
||||
{ // receive data from previous nodes
|
||||
|
@ -17600,6 +17708,17 @@ static int llama_decode_internal(
|
|||
zmq::socket_t * s = is_to_master ? lctx.master_socket : lctx.send_socket;
|
||||
llama_send_tensors(*s, &tensors);
|
||||
}
|
||||
|
||||
// overlap memory scheduling with other nodes' communication and computing
|
||||
if (cparams.unload) {
|
||||
timer(manage_graph_tensors);
|
||||
if (n_world != 1) {
|
||||
manage_graph_tensors(sub_gf, POSIX_MADV_DONTNEED);
|
||||
if (!(my_rank == 0 && is_last_l)) {
|
||||
manage_graph_tensors(next_gf, POSIX_MADV_WILLNEED, true);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// update the kv ring buffer
|
||||
|
@ -19314,6 +19433,7 @@ struct llama_context_params llama_context_default_params() {
|
|||
/*.n_world =*/ 1,
|
||||
/*.rank =*/ 0,
|
||||
/*.n_layer_window =*/ 32,
|
||||
/*.unload =*/ false,
|
||||
/*.master_ip =*/ nullptr,
|
||||
/*.next_node_ip =*/ nullptr,
|
||||
/*.n_ctx =*/ 512,
|
||||
|
@ -19617,6 +19737,7 @@ struct llama_context * llama_new_context_with_model(
|
|||
cparams.n_world = params.n_world;
|
||||
cparams.rank = params.rank;
|
||||
cparams.n_layer_window = params.n_layer_window;
|
||||
cparams.unload = params.unload;
|
||||
cparams.n_seq_max = std::max(1u, params.n_seq_max);
|
||||
cparams.n_threads = params.n_threads;
|
||||
cparams.n_threads_batch = params.n_threads_batch;
|
||||
|
@ -19996,6 +20117,9 @@ struct llama_context * llama_new_context_with_model(
|
|||
|
||||
ctx->sched.resize(gf.size());
|
||||
|
||||
// prefetch the first subgraph weights
|
||||
manage_graph_tensors(gf.front(), POSIX_MADV_WILLNEED, true);
|
||||
|
||||
// initialize scheduler with the worst-case graph
|
||||
bool ok = true;
|
||||
GGML_ASSERT(ctx->sched.size() == gf.size());
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue