mirror of
https://github.com/LostRuins/koboldcpp.git
synced 2025-09-11 01:24:36 +00:00
Merged the upstream updates for model loading code, and ditched the legacy llama loaders since they were no longer needed.
This commit is contained in:
commit
f53238f570
20 changed files with 1234 additions and 1446 deletions
|
@ -42,17 +42,13 @@ bool llama_load_model(const load_model_inputs inputs, FileFormat in_file_format)
|
|||
ctx_params.seed = -1;
|
||||
ctx_params.f16_kv = inputs.f16_kv;
|
||||
ctx_params.logits_all = false;
|
||||
ctx_params.use_mmap = inputs.use_mmap;
|
||||
|
||||
file_format = in_file_format;
|
||||
|
||||
if (file_format == FileFormat::GGML || file_format == FileFormat::GGHF)
|
||||
{
|
||||
ctx = legacy_llama_init_from_file(modelname.c_str(), ctx_params);
|
||||
}
|
||||
else
|
||||
{
|
||||
ctx = llama_init_from_file(modelname.c_str(), ctx_params);
|
||||
}
|
||||
|
||||
ctx = llama_init_from_file(modelname.c_str(), ctx_params);
|
||||
|
||||
|
||||
if (ctx == NULL)
|
||||
{
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue