From 1ca9a43bd130c3f4fe1b69d8e87856a3d0b36710 Mon Sep 17 00:00:00 2001 From: Lizonghang <870644199@qq.com> Date: Sat, 25 Jan 2025 23:31:43 +0400 Subject: [PATCH] keep the output layer weights in shared memory by default --- common/arg.cpp | 18 +++++++++--------- common/common.cpp | 8 ++++---- common/common.h | 2 +- src/llama.cpp | 4 ++-- 4 files changed, 16 insertions(+), 16 deletions(-) diff --git a/common/arg.cpp b/common/arg.cpp index e189b5ce..1cab211d 100644 --- a/common/arg.cpp +++ b/common/arg.cpp @@ -737,15 +737,15 @@ gpt_params_context gpt_params_parser_init(gpt_params & params, llama_example ex, params.gpu_mem = value; // in GiB } ).set_env("LLAMA_ARG_CUDA_MEM")); -#ifdef GGML_USE_METAL - add_opt(llama_arg( - {"--keep-out-in-metal"}, - format("whether to keep output weights in metal memory (default: %s)", params.keep_out_in_metal ? "true" : "false"), - [](gpt_params & params) { - params.keep_out_in_metal = true; - } - ).set_env("LLAMA_ARG_KEEP_INP_OUT_IN_METAL")); -#endif +// #ifdef GGML_USE_METAL +// add_opt(llama_arg( +// {"--keep-out-in-metal"}, +// format("whether to keep output weights in metal memory (default: %s)", params.keep_out_in_metal ? "true" : "false"), +// [](gpt_params & params) { +// params.keep_out_in_metal = true; +// } +// ).set_env("LLAMA_ARG_KEEP_INP_OUT_IN_METAL")); +// #endif add_opt(llama_arg( {"-n", "--predict", "--n-predict"}, "N", format("number of tokens to predict (default: %d, -1 = infinity, -2 = until context filled)", params.n_predict), diff --git a/common/common.cpp b/common/common.cpp index d5dcf2af..e737a4f3 100644 --- a/common/common.cpp +++ b/common/common.cpp @@ -1691,11 +1691,11 @@ static ggml_type kv_cache_type_from_str(const std::string & s) { struct llama_context_params llama_context_params_from_gpt_params(const gpt_params & params) { auto cparams = llama_context_default_params(); - cparams.n_world = params.n_world; - cparams.rank = params.rank; - cparams.unload = params.unload; + cparams.n_world = params.n_world; + cparams.rank = params.rank; + cparams.unload = params.unload; cparams.keep_out_in_metal = params.keep_out_in_metal; - cparams.n_gpu_layers = params.n_gpu_layers; + cparams.n_gpu_layers = params.n_gpu_layers; std::copy(std::begin(params.n_layer_window), std::end(params.n_layer_window), cparams.n_layer_window); if (cparams.master_ip != nullptr) { diff --git a/common/common.h b/common/common.h index fece83f3..9ac200c1 100644 --- a/common/common.h +++ b/common/common.h @@ -148,7 +148,7 @@ struct gpt_params { std::string master_ip = "localhost"; // ip address of the master node std::string next_node_ip = "localhost"; // ip address of my next node bool unload = false; // unload layer weights after use or not - bool keep_out_in_metal = false; // whether to keep output weights in metal memory, not by default + bool keep_out_in_metal = true; // whether to keep output weights in metal memory, true by default int32_t gpu_mem = 999.0; // gpu memory to use, in GiB int32_t n_predict = -1; // new tokens to predict int32_t n_ctx = 0; // context size diff --git a/src/llama.cpp b/src/llama.cpp index d4cecf8f..5b32ce90 100644 --- a/src/llama.cpp +++ b/src/llama.cpp @@ -19781,7 +19781,7 @@ struct llama_model_params llama_model_default_params() { /*.use_mmap =*/ true, /*.use_mlock =*/ false, /*.check_tensors =*/ false, - /*.keep_out_in_metal =*/ false, + /*.keep_out_in_metal =*/ true, }; #ifdef GGML_USE_METAL @@ -19799,7 +19799,7 @@ struct llama_context_params llama_context_default_params() { /*.n_layer_window =*/ {32}, /*.n_gpu_layers =*/ 0, /*.unload =*/ false, - /*.keep_out_in_metal =*/ false, + /*.keep_out_in_metal =*/ true, /*.master_ip =*/ nullptr, /*.next_node_ip =*/ nullptr, /*.n_ctx =*/ 512,