mirror of
https://github.com/Lizonghang/prima.cpp.git
synced 2025-09-09 10:14:36 +00:00
keep the output layer weights in shared memory by default
This commit is contained in:
parent
f3dd5776eb
commit
1ca9a43bd1
4 changed files with 16 additions and 16 deletions
|
@ -737,15 +737,15 @@ gpt_params_context gpt_params_parser_init(gpt_params & params, llama_example ex,
|
||||||
params.gpu_mem = value; // in GiB
|
params.gpu_mem = value; // in GiB
|
||||||
}
|
}
|
||||||
).set_env("LLAMA_ARG_CUDA_MEM"));
|
).set_env("LLAMA_ARG_CUDA_MEM"));
|
||||||
#ifdef GGML_USE_METAL
|
// #ifdef GGML_USE_METAL
|
||||||
add_opt(llama_arg(
|
// add_opt(llama_arg(
|
||||||
{"--keep-out-in-metal"},
|
// {"--keep-out-in-metal"},
|
||||||
format("whether to keep output weights in metal memory (default: %s)", params.keep_out_in_metal ? "true" : "false"),
|
// format("whether to keep output weights in metal memory (default: %s)", params.keep_out_in_metal ? "true" : "false"),
|
||||||
[](gpt_params & params) {
|
// [](gpt_params & params) {
|
||||||
params.keep_out_in_metal = true;
|
// params.keep_out_in_metal = true;
|
||||||
}
|
// }
|
||||||
).set_env("LLAMA_ARG_KEEP_INP_OUT_IN_METAL"));
|
// ).set_env("LLAMA_ARG_KEEP_INP_OUT_IN_METAL"));
|
||||||
#endif
|
// #endif
|
||||||
add_opt(llama_arg(
|
add_opt(llama_arg(
|
||||||
{"-n", "--predict", "--n-predict"}, "N",
|
{"-n", "--predict", "--n-predict"}, "N",
|
||||||
format("number of tokens to predict (default: %d, -1 = infinity, -2 = until context filled)", params.n_predict),
|
format("number of tokens to predict (default: %d, -1 = infinity, -2 = until context filled)", params.n_predict),
|
||||||
|
|
|
@ -1691,11 +1691,11 @@ static ggml_type kv_cache_type_from_str(const std::string & s) {
|
||||||
struct llama_context_params llama_context_params_from_gpt_params(const gpt_params & params) {
|
struct llama_context_params llama_context_params_from_gpt_params(const gpt_params & params) {
|
||||||
auto cparams = llama_context_default_params();
|
auto cparams = llama_context_default_params();
|
||||||
|
|
||||||
cparams.n_world = params.n_world;
|
cparams.n_world = params.n_world;
|
||||||
cparams.rank = params.rank;
|
cparams.rank = params.rank;
|
||||||
cparams.unload = params.unload;
|
cparams.unload = params.unload;
|
||||||
cparams.keep_out_in_metal = params.keep_out_in_metal;
|
cparams.keep_out_in_metal = params.keep_out_in_metal;
|
||||||
cparams.n_gpu_layers = params.n_gpu_layers;
|
cparams.n_gpu_layers = params.n_gpu_layers;
|
||||||
std::copy(std::begin(params.n_layer_window), std::end(params.n_layer_window), cparams.n_layer_window);
|
std::copy(std::begin(params.n_layer_window), std::end(params.n_layer_window), cparams.n_layer_window);
|
||||||
|
|
||||||
if (cparams.master_ip != nullptr) {
|
if (cparams.master_ip != nullptr) {
|
||||||
|
|
|
@ -148,7 +148,7 @@ struct gpt_params {
|
||||||
std::string master_ip = "localhost"; // ip address of the master node
|
std::string master_ip = "localhost"; // ip address of the master node
|
||||||
std::string next_node_ip = "localhost"; // ip address of my next node
|
std::string next_node_ip = "localhost"; // ip address of my next node
|
||||||
bool unload = false; // unload layer weights after use or not
|
bool unload = false; // unload layer weights after use or not
|
||||||
bool keep_out_in_metal = false; // whether to keep output weights in metal memory, not by default
|
bool keep_out_in_metal = true; // whether to keep output weights in metal memory, true by default
|
||||||
int32_t gpu_mem = 999.0; // gpu memory to use, in GiB
|
int32_t gpu_mem = 999.0; // gpu memory to use, in GiB
|
||||||
int32_t n_predict = -1; // new tokens to predict
|
int32_t n_predict = -1; // new tokens to predict
|
||||||
int32_t n_ctx = 0; // context size
|
int32_t n_ctx = 0; // context size
|
||||||
|
|
|
@ -19781,7 +19781,7 @@ struct llama_model_params llama_model_default_params() {
|
||||||
/*.use_mmap =*/ true,
|
/*.use_mmap =*/ true,
|
||||||
/*.use_mlock =*/ false,
|
/*.use_mlock =*/ false,
|
||||||
/*.check_tensors =*/ false,
|
/*.check_tensors =*/ false,
|
||||||
/*.keep_out_in_metal =*/ false,
|
/*.keep_out_in_metal =*/ true,
|
||||||
};
|
};
|
||||||
|
|
||||||
#ifdef GGML_USE_METAL
|
#ifdef GGML_USE_METAL
|
||||||
|
@ -19799,7 +19799,7 @@ struct llama_context_params llama_context_default_params() {
|
||||||
/*.n_layer_window =*/ {32},
|
/*.n_layer_window =*/ {32},
|
||||||
/*.n_gpu_layers =*/ 0,
|
/*.n_gpu_layers =*/ 0,
|
||||||
/*.unload =*/ false,
|
/*.unload =*/ false,
|
||||||
/*.keep_out_in_metal =*/ false,
|
/*.keep_out_in_metal =*/ true,
|
||||||
/*.master_ip =*/ nullptr,
|
/*.master_ip =*/ nullptr,
|
||||||
/*.next_node_ip =*/ nullptr,
|
/*.next_node_ip =*/ nullptr,
|
||||||
/*.n_ctx =*/ 512,
|
/*.n_ctx =*/ 512,
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue