mirror of
https://github.com/LostRuins/koboldcpp.git
synced 2025-09-11 01:24:36 +00:00
Merge branch 'upstream' into concedo_experimental
# Conflicts: # .devops/full-cuda.Dockerfile # .devops/full-rocm.Dockerfile # .devops/full.Dockerfile # .devops/llama-cli-cuda.Dockerfile # .devops/llama-cli-intel.Dockerfile # .devops/llama-cli-rocm.Dockerfile # .devops/llama-cli-vulkan.Dockerfile # .devops/llama-cli.Dockerfile # .devops/llama-server-cuda.Dockerfile # .devops/llama-server-intel.Dockerfile # .devops/llama-server-rocm.Dockerfile # .devops/llama-server-vulkan.Dockerfile # .devops/llama-server.Dockerfile # CMakeLists.txt # CONTRIBUTING.md # Makefile # ggml/CMakeLists.txt # ggml/src/CMakeLists.txt # requirements.txt # src/llama.cpp # tests/test-backend-ops.cpp
This commit is contained in:
commit
24b9616344
61 changed files with 12994 additions and 936 deletions
|
@ -1443,16 +1443,12 @@ ModelLoadResult gpttype_load_model(const load_model_inputs inputs, FileFormat in
|
|||
lora_base_arg = lora_base.c_str();
|
||||
}
|
||||
|
||||
int err = llama_model_apply_lora_from_file(llamamodel,
|
||||
lora_filename.c_str(),
|
||||
1.0f,
|
||||
lora_base_arg,
|
||||
kcpp_params->n_threads);
|
||||
if (err != 0)
|
||||
{
|
||||
auto adapter = llama_lora_adapter_init(llamamodel, lora_filename.c_str());
|
||||
if (adapter == nullptr) {
|
||||
fprintf(stderr, "%s: error: failed to apply lora adapter\n", __func__);
|
||||
return ModelLoadResult::FAIL;
|
||||
}
|
||||
llama_lora_adapter_set(llama_ctx_v4, adapter, 1.0f);
|
||||
}
|
||||
|
||||
if(mmproj_filename != "" && file_format==FileFormat::GGUF_GENERIC)
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue