keep the output layer weights in shared memory by default

This commit is contained in:
Lizonghang 2025-01-25 23:31:43 +04:00
parent f3dd5776eb
commit 1ca9a43bd1
4 changed files with 16 additions and 16 deletions

View file

@ -737,15 +737,15 @@ gpt_params_context gpt_params_parser_init(gpt_params & params, llama_example ex,
params.gpu_mem = value; // in GiB
}
).set_env("LLAMA_ARG_CUDA_MEM"));
#ifdef GGML_USE_METAL
add_opt(llama_arg(
{"--keep-out-in-metal"},
format("whether to keep output weights in metal memory (default: %s)", params.keep_out_in_metal ? "true" : "false"),
[](gpt_params & params) {
params.keep_out_in_metal = true;
}
).set_env("LLAMA_ARG_KEEP_INP_OUT_IN_METAL"));
#endif
// #ifdef GGML_USE_METAL
// add_opt(llama_arg(
// {"--keep-out-in-metal"},
// format("whether to keep output weights in metal memory (default: %s)", params.keep_out_in_metal ? "true" : "false"),
// [](gpt_params & params) {
// params.keep_out_in_metal = true;
// }
// ).set_env("LLAMA_ARG_KEEP_INP_OUT_IN_METAL"));
// #endif
add_opt(llama_arg(
{"-n", "--predict", "--n-predict"}, "N",
format("number of tokens to predict (default: %d, -1 = infinity, -2 = until context filled)", params.n_predict),

View file

@ -1691,11 +1691,11 @@ static ggml_type kv_cache_type_from_str(const std::string & s) {
struct llama_context_params llama_context_params_from_gpt_params(const gpt_params & params) {
auto cparams = llama_context_default_params();
cparams.n_world = params.n_world;
cparams.rank = params.rank;
cparams.unload = params.unload;
cparams.n_world = params.n_world;
cparams.rank = params.rank;
cparams.unload = params.unload;
cparams.keep_out_in_metal = params.keep_out_in_metal;
cparams.n_gpu_layers = params.n_gpu_layers;
cparams.n_gpu_layers = params.n_gpu_layers;
std::copy(std::begin(params.n_layer_window), std::end(params.n_layer_window), cparams.n_layer_window);
if (cparams.master_ip != nullptr) {

View file

@ -148,7 +148,7 @@ struct gpt_params {
std::string master_ip = "localhost"; // ip address of the master node
std::string next_node_ip = "localhost"; // ip address of my next node
bool unload = false; // unload layer weights after use or not
bool keep_out_in_metal = false; // whether to keep output weights in metal memory, not by default
bool keep_out_in_metal = true; // whether to keep output weights in metal memory, true by default
int32_t gpu_mem = 999.0; // gpu memory to use, in GiB
int32_t n_predict = -1; // new tokens to predict
int32_t n_ctx = 0; // context size

View file

@ -19781,7 +19781,7 @@ struct llama_model_params llama_model_default_params() {
/*.use_mmap =*/ true,
/*.use_mlock =*/ false,
/*.check_tensors =*/ false,
/*.keep_out_in_metal =*/ false,
/*.keep_out_in_metal =*/ true,
};
#ifdef GGML_USE_METAL
@ -19799,7 +19799,7 @@ struct llama_context_params llama_context_default_params() {
/*.n_layer_window =*/ {32},
/*.n_gpu_layers =*/ 0,
/*.unload =*/ false,
/*.keep_out_in_metal =*/ false,
/*.keep_out_in_metal =*/ true,
/*.master_ip =*/ nullptr,
/*.next_node_ip =*/ nullptr,
/*.n_ctx =*/ 512,