mirror of
https://github.com/LostRuins/koboldcpp.git
synced 2025-09-11 01:24:36 +00:00
pulled all Occam's fixes and the kquants are all working now
This commit is contained in:
parent
9b6c35b651
commit
9830871d0f
2 changed files with 19 additions and 37 deletions
12
llama.cpp
12
llama.cpp
|
@ -1074,18 +1074,6 @@ static void llama_model_load_internal(
|
|||
}
|
||||
}
|
||||
|
||||
#if defined(GGML_USE_CLBLAST)
|
||||
if (file_version == LLAMA_FILE_VERSION_GGJT_V3) {
|
||||
if (hparams.ftype >= LLAMA_FTYPE_MOSTLY_Q2_K && hparams.ftype <= LLAMA_FTYPE_MOSTLY_Q6_K) {
|
||||
if(n_gpu_layers>0)
|
||||
{
|
||||
n_gpu_layers = 0;
|
||||
printf("\n===\nCLBlast cannot offload layers for K-Quants!\nPlease select a q4_0, q4_0, q5_0 or q5_1 format instead!\n=====\n");
|
||||
}
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
if (vocab_only) {
|
||||
return;
|
||||
}
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue