fixed support for old falcon models

This commit is contained in:
Concedo 2023-10-18 17:20:44 +08:00
parent 700951dbd4
commit c1ca1de2ac
4 changed files with 255 additions and 4 deletions

View file

@ -803,6 +803,14 @@ ModelLoadResult gpttype_load_model(const load_model_inputs inputs, FileFormat in
}
#endif
//compat for old falcon
if(file_format_meta.fileversion==1 && file_format==FileFormat::GGUF_FALCON)
{
//apply compat fix
printf("\nUsing older tokenizer for Falcon...");
OldBPETokenizerMode = true;
}
llama_model * llamamodel = llama_load_model_from_file(modelname.c_str(), model_params);
llama_ctx_v4 = llama_new_context_with_model(llamamodel, llama_ctx_params);