Merge branch 'upstream' into concedo_experimental

# Conflicts:
#	.devops/llama-cli-intel.Dockerfile
#	.devops/llama-server-intel.Dockerfile
#	README.md
#	ggml/src/CMakeLists.txt
#	tests/test-chat-template.cpp
This commit is contained in:
Concedo 2024-07-24 21:57:50 +08:00
commit cca2fa9a6c
4 changed files with 11 additions and 2 deletions

View file

@ -529,12 +529,16 @@ extern "C" {
struct llama_lora_adapter * adapter,
float scale);
// Remove a LoRA adapter from given context
// Remove a specific LoRA adapter from given context
// Return -1 if the adapter is not present in the context
LLAMA_API int32_t llama_lora_adapter_remove(
struct llama_context * ctx,
struct llama_lora_adapter * adapter);
// Remove all LoRA adapters from given context
LLAMA_API void llama_lora_adapter_clear(
struct llama_context * ctx);
// Manually free a LoRA adapter
// Note: loaded adapters will be free when the associated model is deleted
LLAMA_API void llama_lora_adapter_free(struct llama_lora_adapter * adapter);