From 33429ec4e1f49ac32a9ab5a0e1bf4ec628ecb0f0 Mon Sep 17 00:00:00 2001 From: Zonghang Li Date: Wed, 22 Jan 2025 11:25:09 +0400 Subject: [PATCH] add option --keep-inp-out-in-metal --- common/arg.cpp | 3 ++- src/llama.cpp | 1 + 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/common/arg.cpp b/common/arg.cpp index 602ad9f3..33464eaa 100644 --- a/common/arg.cpp +++ b/common/arg.cpp @@ -737,7 +737,7 @@ gpt_params_context gpt_params_parser_init(gpt_params & params, llama_example ex, params.cuda_mem = value; // in GiB } ).set_env("LLAMA_ARG_CUDA_MEM")); - // "--keep-inp-out-in-metal" is a temporary option to keep the input and output in metal +#ifdef GGML_USE_METAL add_opt(llama_arg( {"--keep-inp-out-in-metal"}, format("whether to keep input and output weight in metal (default: %s)", params.keep_inp_out_in_metal ? "true" : "false"), @@ -745,6 +745,7 @@ gpt_params_context gpt_params_parser_init(gpt_params & params, llama_example ex, params.keep_inp_out_in_metal = true; } ).set_env("LLAMA_ARG_KEEP_INP_OUT_IN_METAL")); +#endif add_opt(llama_arg( {"-n", "--predict", "--n-predict"}, "N", format("number of tokens to predict (default: %d, -1 = infinity, -2 = until context filled)", params.n_predict), diff --git a/src/llama.cpp b/src/llama.cpp index 6a94f017..36eb971d 100644 --- a/src/llama.cpp +++ b/src/llama.cpp @@ -19773,6 +19773,7 @@ struct llama_model_params llama_model_default_params() { /*.use_mmap =*/ true, /*.use_mlock =*/ false, /*.check_tensors =*/ false, + /*.keep_inp_out_in_metal =*/ false, }; #ifdef GGML_USE_METAL