mirror of
https://github.com/LostRuins/koboldcpp.git
synced 2025-09-13 10:29:43 +00:00
update deprecated apis
This commit is contained in:
parent
4204f111f7
commit
c494525b33
3 changed files with 22 additions and 22 deletions
|
@ -560,7 +560,7 @@ bool ttstype_load_model(const tts_load_model_inputs inputs)
|
|||
}
|
||||
|
||||
std::vector<int> tmp = {1, 2, 3, 4};
|
||||
llama_kv_self_clear(ttc_ctx);
|
||||
llama_memory_clear(llama_get_memory(ttc_ctx),true);
|
||||
auto er = llama_decode(ttc_ctx, llama_batch_get_one(tmp.data(), tmp.size()));
|
||||
if(er!=0)
|
||||
{
|
||||
|
@ -619,8 +619,8 @@ tts_generation_outputs ttstype_generate(const tts_generation_inputs inputs)
|
|||
const std::string sampletext = (custom_speaker_text=="")?process_text("but that is what it is",ttsver):process_text(custom_speaker_text,ttsver);
|
||||
|
||||
// process prompt and generate voice codes
|
||||
llama_kv_self_clear(ttc_ctx);
|
||||
llama_kv_self_clear(cts_ctx);
|
||||
llama_memory_clear(llama_get_memory(ttc_ctx),true);
|
||||
llama_memory_clear(llama_get_memory(cts_ctx),true);
|
||||
std::vector<llama_token> prompt_inp;
|
||||
prompt_init(prompt_inp, ttcvocab);
|
||||
|
||||
|
@ -818,7 +818,7 @@ tts_generation_outputs ttstype_generate(const tts_generation_inputs inputs)
|
|||
}
|
||||
}
|
||||
guide_tokens.clear();
|
||||
llama_kv_self_clear(ttc_ctx);
|
||||
llama_memory_clear(llama_get_memory(ttc_ctx),true);
|
||||
prompt_init(prompt_inp, ttcvocab);
|
||||
next_token_uses_guide_token = true;
|
||||
}
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue