mirror of
https://github.com/LostRuins/koboldcpp.git
synced 2025-09-10 17:14:36 +00:00
Merge branch 'upstream' into concedo_experimental
# Conflicts: # examples/run/run.cpp # ggml/src/ggml-cann/aclnn_ops.cpp
This commit is contained in:
commit
67851e5415
9 changed files with 39 additions and 8 deletions
|
@ -1037,6 +1037,8 @@ struct common_init_result common_init_from_params(common_params & params) {
|
|||
if (params.warmup) {
|
||||
LOG_WRN("%s: warming up the model with an empty run - please wait ... (--no-warmup to disable)\n", __func__);
|
||||
|
||||
llama_set_warmup(lctx, true);
|
||||
|
||||
std::vector<llama_token> tmp;
|
||||
llama_token bos = llama_vocab_bos(vocab);
|
||||
llama_token eos = llama_vocab_eos(vocab);
|
||||
|
@ -1067,6 +1069,7 @@ struct common_init_result common_init_from_params(common_params & params) {
|
|||
llama_kv_self_clear(lctx);
|
||||
llama_synchronize(lctx);
|
||||
llama_perf_context_reset(lctx);
|
||||
llama_set_warmup(lctx, false);
|
||||
}
|
||||
|
||||
iparams.model.reset(model);
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue