mirror of
https://github.com/Lizonghang/prima.cpp.git
synced 2025-09-10 05:14:38 +00:00
add mmap prefetch and unloading
This commit is contained in:
parent
ba5117581e
commit
c97ea10617
7 changed files with 161 additions and 11 deletions
|
@ -696,6 +696,13 @@ gpt_params_context gpt_params_parser_init(gpt_params & params, llama_example ex,
|
|||
params.next_node_ip = value;
|
||||
}
|
||||
).set_env("LLAMA_ARG_NEXT_NODE_IP"));
|
||||
add_opt(llama_arg(
|
||||
{"--unload", "--unload-weight"},
|
||||
format("whether to unload layer weights after use (default: %s)", params.unload ? "true" : "false"),
|
||||
[](gpt_params & params) {
|
||||
params.unload = true;
|
||||
}
|
||||
).set_env("LLAMA_ARG_UNLOAD"));
|
||||
add_opt(llama_arg(
|
||||
{"-n", "--predict", "--n-predict"}, "N",
|
||||
format("number of tokens to predict (default: %d, -1 = infinity, -2 = until context filled)", params.n_predict),
|
||||
|
|
|
@ -1039,6 +1039,7 @@ struct llama_context_params llama_context_params_from_gpt_params(const gpt_param
|
|||
cparams.n_world = params.n_world;
|
||||
cparams.rank = params.rank;
|
||||
cparams.n_layer_window = params.n_layer_window;
|
||||
cparams.unload = params.unload;
|
||||
|
||||
if (cparams.master_ip != nullptr) {
|
||||
delete[] cparams.master_ip;
|
||||
|
|
|
@ -147,6 +147,7 @@ struct gpt_params {
|
|||
int32_t n_layer_window = 32; // number of layers to process in each compute
|
||||
std::string master_ip = "localhost"; // ip address of the master node
|
||||
std::string next_node_ip = "localhost"; // ip address of my next node
|
||||
bool unload = false; // unload layer weights after use or not
|
||||
int32_t n_predict = -1; // new tokens to predict
|
||||
int32_t n_ctx = 0; // context size
|
||||
int32_t n_batch = 2048; // logical batch size for prompt processing (must be >=32 to use BLAS)
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue