From 602128273b54c4148c9de8f16ff152ac04e330ec Mon Sep 17 00:00:00 2001 From: Concedo <39025047+LostRuins@users.noreply.github.com> Date: Sat, 24 Feb 2024 18:50:20 +0800 Subject: [PATCH] edit warning --- llama.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/llama.cpp b/llama.cpp index f8929ac7d..397d0ddf8 100644 --- a/llama.cpp +++ b/llama.cpp @@ -4970,7 +4970,7 @@ static struct ggml_tensor * llm_build_kqv( #if defined(GGML_USE_VULKAN) || defined(GGML_USE_KOMPUTE) || defined(GGML_USE_SYCL) #pragma message("TODO: ALiBi support in ggml_soft_max_ext is not implemented for Vulkan, Kompute, and SYCL") -#pragma message(" Falling back to ggml_alibi(). Will become an error in Mar 2024") +#pragma message(" Falling back to ggml_alibi(). Will become an error in Mar 2024. But koboldcpp will deal with it.") #pragma message("ref: https://github.com/ggerganov/llama.cpp/pull/5488") if (hparams.f_max_alibi_bias > 0.0f) { kq = ggml_scale(ctx, kq, kq_scale);