mirror of
https://github.com/Lizonghang/prima.cpp.git
synced 2025-09-09 18:44:35 +00:00
add args -k and --force
This commit is contained in:
parent
9cbdf01645
commit
bcfdace59b
5 changed files with 27 additions and 5 deletions
|
@ -737,6 +737,20 @@ gpt_params_context gpt_params_parser_init(gpt_params & params, llama_example ex,
|
|||
params.gpu_mem = value; // in GiB
|
||||
}
|
||||
).set_env("LLAMA_ARG_CUDA_MEM"));
|
||||
add_opt(llama_arg(
|
||||
{"-k", "--n-cycles"}, "N",
|
||||
format("number of cycles to output one token (default: %d)", params.n_cycles),
|
||||
[](gpt_params & params, int value) {
|
||||
params.n_cycles = value;
|
||||
}
|
||||
).set_env("LLAMA_ARG_K"));
|
||||
add_opt(llama_arg(
|
||||
{"--force"},
|
||||
format("force to start prefetching after computation (default: %s)", params.force ? "true" : "false"),
|
||||
[](gpt_params & params) {
|
||||
params.force = true;
|
||||
}
|
||||
).set_env("LLAMA_ARG_FORCE"));
|
||||
// #ifdef GGML_USE_METAL
|
||||
// // warn: if the output layer weights are not kept in metal shared memory, its mmap-ed weight data
|
||||
// // could be released by the OS and reloaded repeatedly, which causes additional disk I/O latency.
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue