fixed 70B detection again, try fix horde issues, fixed lite unicode issue, fixed cmake for cuda

This commit is contained in:
Concedo 2023-08-09 01:05:00 +08:00
parent 3554080502
commit 793cfd136c
4 changed files with 9 additions and 3 deletions

View file

@ -1081,7 +1081,7 @@ static void llama_model_load_internal(
// LLaMAv2
// TODO: temporary until GGUF
//patch for llama2 gqa
if (model.type == e_model::MODEL_65B && hparams.n_mult == 4096) {
if (model.type == e_model::MODEL_65B && (hparams.n_mult >= 4096 && hparams.n_mult != 5504)) {
fprintf(stderr, "%s: Applying KCPP Patch for 70B model, setting GQA to 8\n", __func__);
n_gqa = 8;
}