swa full used unless ctx shift and fast forward disabled

This commit is contained in:
Concedo 2025-05-21 22:47:45 +08:00
commit 9f976e9c65
16 changed files with 1429 additions and 654 deletions

View file

@ -583,6 +583,7 @@ static void speculative_decoding_setup(std::string spec_model_filename, const ll
draft_ctx_params.flash_attn = base_ctx_params.flash_attn;
draft_ctx_params.type_k = base_ctx_params.type_k;
draft_ctx_params.type_v = base_ctx_params.type_v;
draft_ctx_params.swa_full = base_ctx_params.swa_full;
llama_model * draftmodel = llama_model_load_from_file(spec_model_filename.c_str(), draft_model_params);
draft_ctx = llama_init_from_model(draftmodel, draft_ctx_params);
@ -1923,6 +1924,7 @@ ModelLoadResult gpttype_load_model(const load_model_inputs inputs, FileFormat in
kcpp_data->use_smartcontext = inputs.use_smartcontext;
kcpp_data->use_contextshift = inputs.use_contextshift;
kcpp_data->use_fastforward = inputs.use_fastforward;
kcpp_data->swa_full = (inputs.use_fastforward || inputs.use_contextshift)?true:false;
debugmode = inputs.debugmode;
draft_ctx = nullptr;
guidance_ctx = nullptr;
@ -2318,6 +2320,7 @@ ModelLoadResult gpttype_load_model(const load_model_inputs inputs, FileFormat in
}
llama_ctx_params.flash_attn = kcpp_data->flash_attn;
llama_ctx_params.swa_full = kcpp_data->swa_full;
llama_ctx_params.type_k = (inputs.quant_k>1?GGML_TYPE_Q4_0:(inputs.quant_k==1?GGML_TYPE_Q8_0:GGML_TYPE_F16));
llama_ctx_params.type_v = (inputs.quant_v>1?GGML_TYPE_Q4_0:(inputs.quant_v==1?GGML_TYPE_Q8_0:GGML_TYPE_F16));
llama_ctx_v4 = llama_init_from_model(llamamodel, llama_ctx_params);